Index: /issm/trunk/configure.ac
===================================================================
--- /issm/trunk/configure.ac	(revision 28012)
+++ /issm/trunk/configure.ac	(revision 28013)
@@ -2,5 +2,5 @@
 
 #AUTOCONF
-AC_INIT([Ice-sheet and Sea-level System Model (ISSM)],[4.22],[issm@jpl.nasa.gov],[issm],[http://issm.jpl.nasa.gov]) #Initializing configure
+AC_INIT([Ice-sheet and Sea-level System Model (ISSM)],[4.23],[https://issm.ess.uci.edu/forum/],[issm],[http://issm.jpl.nasa.gov]) #Initializing configure
 AC_CONFIG_AUX_DIR([./aux-config])	# Put config files in aux-config
 AC_CONFIG_MACRO_DIR([m4])			# m4 macros are located in m4
Index: /issm/trunk/etc/environment.sh
===================================================================
--- /issm/trunk/etc/environment.sh	(revision 28012)
+++ /issm/trunk/etc/environment.sh	(revision 28013)
@@ -275,12 +275,12 @@
 # Libraries / binaries
 #############################
-MPI_ROOT="${ISSM_EXT_DIR}/mpich/install"
-if [ -d "${MPI_ROOT}" ]; then
-	export MPI_DIR=${MPI_ROOT}
-	export MPI_HOME=${MPI_ROOT} # Used in installation of Dakota
-	export MPI_INC_DIR="${MPI_ROOT}/include"
-	path_prepend "${MPI_ROOT}/bin"
-	cpath_prepend "${MPI_ROOT}/include"
-	ld_library_path_append "${MPI_ROOT}/lib"
+MPI_ROOT_TEMP="${ISSM_EXT_DIR}/mpich/install"
+if [ -d "${MPI_ROOT_TEMP}" ]; then
+	export MPI_DIR=${MPI_ROOT_TEMP}
+	export MPI_HOME=${MPI_ROOT_TEMP} # Used in installation of Dakota
+	export MPI_INC_DIR="${MPI_ROOT_TEMP}/include"
+	path_prepend "${MPI_ROOT_TEMP}/bin"
+	cpath_prepend "${MPI_ROOT_TEMP}/include"
+	ld_library_path_append "${MPI_ROOT_TEMP}/lib"
 fi
 
@@ -484,4 +484,5 @@
 if [ -d "${GSL_ROOT}" ]; then
 	export GSL_HOME="${GSL_ROOT}" # Used in installation of Dakota
+	cpath_prepend "${GSL_ROOT}/include"
 	ld_library_path_append "${GSL_ROOT}/lib"
 fi
@@ -675,6 +676,9 @@
 fi
 
-VALGRIND_ROOT="${ISSM_EXT_DIR}/valgrind/install"
-path_prepend "${VALGRIND_ROOT}/bin"
+VALGRIND_ROOT="${ISSM_DIR}/externalpackages/valgrind/install"
+if [ -d "${VALGRIND_ROOT="${ISSM_DIR}/valgrind/install"
+}" ]; then
+	path_prepend "${VALGRIND_ROOT}/bin"
+fi
 
 DOXYGEN_ROOT="${ISSM_EXT_DIR}/doxygen/install"
Index: /issm/trunk/examples/EsaGRACE/runme.m
===================================================================
--- /issm/trunk/examples/EsaGRACE/runme.m	(revision 28012)
+++ /issm/trunk/examples/EsaGRACE/runme.m	(revision 28013)
@@ -125,10 +125,10 @@
 		set(0,'DefaultAxesFontSize',18,'DefaultAxesLineWidth',1,'DefaultTextFontSize',18,'DefaultLineMarkerSize',8)
 		figure1=figure('Position', [100, 100, 1000, 500]);
-		gcf; load coast; cla;
+		gcf; load coastlines; cla;
 		pcolor(lon_grid,lat_grid,sol_grid); shading flat; hold on;
 		if (kk==1)
-			geoshow(flipud(lat),flipud(long),'DisplayType','polygon','FaceColor','white');
+			geoshow(flipud(coastlat),flipud(coastlon),'DisplayType','polygon','FaceColor','white');
 		end
-		plot(long,lat,'k'); hold off;
+		plot(coastlon,coastlat,'k'); hold off;
 		c1=colorbar;
 		colormap('haxby');
Index: /issm/trunk/examples/StISSM/DomainOutline.exp
===================================================================
--- /issm/trunk/examples/StISSM/DomainOutline.exp	(revision 28013)
+++ /issm/trunk/examples/StISSM/DomainOutline.exp	(revision 28013)
@@ -0,0 +1,101 @@
+## Name:
+## Icon:0
+# Points Count Value
+95 25000.000000
+# X pos Y pos
+-1712113.0179281300 -349656.0205056490
+-1711509.1917759699 -349354.1074295690
+-1709240.6586108401 -349354.1074295690
+-1707080.1508345299 -348922.0058743060
+-1705459.7700022999 -347193.5996532570
+-1703083.2114483600 -345249.1426545770
+-1701138.7544496800 -344168.8887664210
+-1698438.1197292900 -343952.8379887900
+-1698006.0181740201 -341900.3556012940
+-1693793.0280102200 -340279.9747690600
+-1691092.3932898301 -339631.8224361670
+-1688391.7585694401 -339415.7716585360
+-1686447.3015707601 -340063.9239914290
+-1683530.6160727399 -341036.1524907690
+-1682558.3875734000 -343304.6856558960
+-1682134.7209454600 -344182.1487658350
+-1682126.2860181299 -345681.2442098390
+-1681072.0444963200 -345932.4393879640
+-1680634.4718407800 -346682.5639403050
+-1680571.9614614199 -348557.8753211580
+-1677446.4424933300 -348370.3441830730
+-1675508.6207331200 -349495.5310115840
+-1676616.9911885399 -353567.0975933760
+-1672991.8601872099 -353799.6175753630
+-1668839.1631938200 -353999.1991486380
+-1667132.2298986400 -355121.4651541420
+-1666069.5534494901 -357496.8595698890
+-1662944.0344814099 -358309.4945015920
+-1660756.1712037399 -358747.0671571240
+-1657922.9836098100 -361682.8074238540
+-1656579.2228470000 -364003.8487410000
+-1653004.8841628900 -364748.0635758530
+-1653442.4568184200 -361622.5446077650
+-1651129.5727820301 -355684.0585683980
+-1648929.2928810001 -350845.9692010000
+-1646260.7698359799 -347802.2804785470
+-1644571.8968503401 -341785.6704672340
+-1643503.3064999001 -340869.0986596620
+-1643190.7546030900 -336680.9032424240
+-1642690.6715682000 -335243.1645171040
+-1641913.0916588283 -332341.0814454452
+-1640683.9739821283 -330436.6112348017
+-1637816.0327364956 -328532.1410241582
+-1635241.2508976001 -328652.8685499910
+-1631765.4107996500 -327017.1790921330
+-1625934.5618617306 -322342.6128395668
+-1620608.3852626982 -320438.1426289233
+-1616101.6204481323 -320438.1426289233
+-1611594.8556335662 -320914.2601815842
+-1607907.5026034669 -321866.4952869060
+-1602991.0318966676 -324723.2006028712
+-1599713.3847588014 -327579.9059188365
+-1593977.5022675355 -328532.1410241582
+-1590699.8551296694 -334721.6692087495
+-1589259.8391499999 -338300.0840570000
+-1588627.0898328500 -343572.9950327750
+-1588056.6000060199 -345432.3563530700
+-1586993.9235568701 -347432.6884926460
+-1589259.8391499999 -349315.9832070000
+-1585493.6744521901 -352996.1122558430
+-1585118.6121760199 -354496.3613605250
+-1585861.9157163899 -356240.3342513760
+-1586368.8197632500 -358747.0671571240
+-1587889.0972700799 -360996.6006787950
+-1586681.3716600600 -363935.4286441500
+-1585806.2263489999 -365748.2296456410
+-1586743.8820394201 -368373.6655788350
+-1586056.2678664399 -370123.9562009640
+-1585493.6744521901 -371124.1222707520
+-1586243.7990045301 -373562.0270658610
+-1414257.6683934701 -327460.5482541260
+-1412220.4351378100 -325972.8942023180
+-1404515.9240538401 -296165.1737279960
+-1387984.9332285200 -267002.0106682320
+-1360067.9373810000 -107884.1934940000
+-1423274.3497420000 -81217.9259650000
+-1402446.7987670000 -45916.9921070000
+-1478343.8065599999 60338.8188030000
+-1503760.4789370000 36334.1837800000
+-1528509.5619775900 63667.6809196406
+-1623055.4918765700 62841.1506965373
+-1725734.5897359999 -61372.6193030000
+-1719614.6457630000 -69022.5492690000
+-1759088.2843859999 -114004.1374660000
+-1755416.3180020000 -117064.1094530000
+-1761230.2647760001 -126856.0198080000
+-1758170.2927900001 -130527.9861920000
+-1762148.2563720001 -135423.9413700000
+-1764902.2311590000 -131445.9777880000
+-1769798.1863370000 -134199.9525750000
+-1775000.1387139999 -131445.9777880000
+-1794277.9622269999 -153477.7760890000
+-1712576.7101950001 -225081.1205660000
+-1729939.5891459100 -335652.4567604720
+-1712113.0179281300 -349656.0205056490
+
Index: /issm/trunk/examples/StISSM/PigStISSM.par
===================================================================
--- /issm/trunk/examples/StISSM/PigStISSM.par	(revision 28013)
+++ /issm/trunk/examples/StISSM/PigStISSM.par	(revision 28013)
@@ -0,0 +1,117 @@
+
+% Name and Coordinate system
+md.miscellaneous.name = 'PigStISSM';
+md.mesh.epsg          = 3031;
+
+% NetCdf Loading
+disp('   Loading SeaRISE data from NetCDF');
+ncdata ='../Data/Antarctica_5km_withshelves_v0.75.nc';
+x1     = ncread(ncdata,'x1');
+y1     = ncread(ncdata,'y1');
+usrf   = ncread(ncdata,'usrf')';
+topg   = ncread(ncdata,'topg')';
+temp   = ncread(ncdata,'presartm')';
+smb    = ncread(ncdata,'presprcp')';
+gflux  = ncread(ncdata,'bheatflx_fox')';
+
+% Geometry
+disp('   Interpolating surface and ice base');
+md.geometry.base    = InterpFromGridToMesh(x1,y1,topg,md.mesh.x,md.mesh.y,0);
+md.geometry.surface = InterpFromGridToMesh(x1,y1,usrf,md.mesh.x,md.mesh.y,0);
+clear usrf, topg;
+
+disp('   Constructing thickness');
+md.geometry.thickness = md.geometry.surface-md.geometry.base;
+
+% Ensure hydrostatic equilibrium on ice shelf: 
+di = md.materials.rho_ice/md.materials.rho_water;
+
+% Get the node numbers of floating nodes
+pos = find(md.mask.ocean_levelset<0); 
+
+% Apply a flotation criterion on the precedingly defined nodes and
+% redefine base and thickness accordingly
+md.geometry.thickness(pos)    = 1/(1-di)*md.geometry.surface(pos);
+md.geometry.base(pos)         = md.geometry.surface(pos)-md.geometry.thickness(pos);
+md.geometry.hydrostatic_ratio = ones(md.mesh.numberofvertices,1); %For Dakota
+
+% Set min thickness to 1 meter
+pos0 = find(md.geometry.thickness<=1);
+md.geometry.thickness(pos0) = 1;
+md.geometry.surface         = md.geometry.thickness+md.geometry.base;
+md.geometry.bed             = md.geometry.base;
+md.geometry.bed(pos)        = md.geometry.base(pos)-1000;
+
+% Initialization parameters
+disp('   Interpolating temperatures');
+Temp_change = 0;
+md.initialization.temperature = InterpFromGridToMesh(x1,y1,temp,md.mesh.x,md.mesh.y,0)+273.15+Temp_change;
+clear temp;
+
+disp('   Loading velocities data from NetCDF');
+nsidc_vel = '../Data/Antarctica_ice_velocity.nc';
+xmin      = ncreadatt(nsidc_vel,'/','xmin');
+ymax      = ncreadatt(nsidc_vel,'/','ymax');
+spacing   = ncreadatt(nsidc_vel,'/','spacing');
+nx        = double(ncreadatt(nsidc_vel,'/','nx'));
+ny        = double(ncreadatt(nsidc_vel,'/','ny'));
+velx      = double(ncread(nsidc_vel,'vx'));
+vely      = double(ncread(nsidc_vel,'vy'));
+% Read coordinates
+xmin = strtrim(xmin);  
+xmin = str2num(xmin(1:end-2)); 
+ymax = strtrim(ymax);  
+ymax = str2num(ymax(1:end-2));  
+spacing = strtrim(spacing);
+spacing = str2num(spacing(1:end-2));  
+% Build the coordinates
+x2 = xmin+(0:1:nx)'*spacing;
+y2 = (ymax-ny*spacing)+(0:1:ny)'*spacing;
+
+disp('   Set observed velocities')
+md.initialization.vx  = InterpFromGridToMesh(x2,y2,flipud(velx'),md.mesh.x,md.mesh.y,0);
+md.initialization.vy  = InterpFromGridToMesh(x2,y2,flipud(vely'),md.mesh.x,md.mesh.y,0);
+md.initialization.vz  = zeros(md.mesh.numberofvertices,1);
+md.initialization.vel = sqrt(md.initialization.vx.^2+md.initialization.vy.^2);
+clear velx vely;
+
+disp('   Set Pressure');
+md.initialization.pressure = md.materials.rho_ice*md.constants.g*md.geometry.thickness;
+
+disp('   Construct ice rheological properties');
+md.materials.rheology_n = 3*ones(md.mesh.numberofelements,1);
+md.materials.rheology_B = paterson(md.initialization.temperature);
+
+%Forcings
+disp('   Interpolating surface mass balance');
+mass_balance = InterpFromGridToMesh(x1,y1,smb,md.mesh.x,md.mesh.y,0);
+md.smb.mass_balance = mass_balance*md.materials.rho_water/md.materials.rho_ice;
+clear smb;
+
+disp('   Set geothermal heat flux');
+md.basalforcings.geothermalflux = InterpFromGridToMesh(x1,y1,gflux,md.mesh.x,md.mesh.y,0);
+clear gflux;
+
+% Friction and inversion set up
+disp('   Construct basal friction parameters');
+friction_coefficient    = 10.0;
+md.friction.coefficient = friction_coefficient*ones(md.mesh.numberofvertices,1);
+md.friction.p           = ones(md.mesh.numberofelements,1);
+md.friction.q           = ones(md.mesh.numberofelements,1);
+
+% No friction applied on floating ice
+pos = find(md.mask.ocean_levelset<0);
+md.friction.coefficient(pos) = 0;
+md.groundingline.migration   = 'SubelementMigration';
+
+md.inversion         = m1qn3inversion();
+md.inversion.vx_obs  = md.initialization.vx;
+md.inversion.vy_obs  = md.initialization.vy;
+md.inversion.vel_obs = md.initialization.vel;
+
+disp('   Set boundary conditions');
+md.basalforcings.floatingice_melting_rate = zeros(md.mesh.numberofvertices,1);
+md.basalforcings.groundedice_melting_rate = zeros(md.mesh.numberofvertices,1);
+md                                        = SetMarineIceSheetBC(md);
+md.thermal.spctemperature                 = md.initialization.temperature;
+md.masstransport.spcthickness             = NaN*ones(md.mesh.numberofvertices,1);
Index: /issm/trunk/examples/StISSM/runme.m
===================================================================
--- /issm/trunk/examples/StISSM/runme.m	(revision 28013)
+++ /issm/trunk/examples/StISSM/runme.m	(revision 28013)
@@ -0,0 +1,288 @@
+
+%%%
+% Tutorial for StISSM
+%%%
+
+steps = [1];
+
+if any(steps==1)
+   % Mesh parameters 
+   domain    = ['./DomainOutline.exp'];
+   hinit     = 5000; % element size for the initial mesh
+   hmax      = 40000; % maximum element size of the final mesh
+   hmin      = 4000; % minimum element size of the final mesh
+   gradation = 1.7; % maximum size ratio between two neighboring elements
+   err       = 8; % maximum error between interpolated and control field
+
+   % Generate an initial uniform mesh (resolution = hinit m)
+   md = bamg(model,'domain',domain,'hmax',hinit);
+
+   % Get necessary data to build up the velocity grid
+   nsidc_vel  = '../Data/Antarctica_ice_velocity.nc';
+   xmin       = strsplit(ncreadatt(nsidc_vel,'/','xmin')); 
+   xmin       = str2num(xmin{2});
+   ymax       = strsplit(ncreadatt(nsidc_vel,'/','ymax'));
+   ymax       = str2num(ymax{2});
+   spacing    = strsplit(ncreadatt(nsidc_vel,'/','spacing')); 
+   spacing    = str2num(spacing{2});
+   nx         = double(ncreadatt(nsidc_vel,'/','nx'));
+   ny         = double(ncreadatt(nsidc_vel,'/','ny'));
+   vx         = double(ncread(nsidc_vel,'vx'));
+   vy         = double(ncread(nsidc_vel,'vy'));
+
+   % Build the coordinates
+   x = xmin+(0:1:nx)'*spacing;
+   y = (ymax-ny*spacing)+(0:1:ny)'*spacing;
+
+   % Interpolate velocities onto coarse mesh
+   vx_obs   = InterpFromGridToMesh(x,y,flipud(vx'),md.mesh.x,md.mesh.y,0);
+   vy_obs   = InterpFromGridToMesh(x,y,flipud(vy'),md.mesh.x,md.mesh.y,0);
+   vel_obs  = sqrt(vx_obs.^2+vy_obs.^2);
+   clear vx vy x y;
+
+   % Adapt the mesh to minimize error in velocity interpolation
+   md = bamg(md,'hmax',hmax,'hmin',hmin,'gradation',gradation,'field',vel_obs,'err',err);
+
+   % Plot and save model
+   plotmodel(md,'data','mesh')
+   save ./Models/PIG_StISSM_Mesh_generation md;
+   
+end
+
+if any(steps==2) %Masks #2 
+   md = loadmodel('./Models/PIG_StISSM_Mesh_generation');
+
+   % Load SeaRISe dataset for Antarctica  http://websrv.cs.umt.edu/isis/index.php/Present_Day_Antarctica
+   searise = '../Data/Antarctica_5km_withshelves_v0.75.nc';
+
+   % Read thickness mask from SeaRISE
+   x1      = double(ncread(searise,'x1'));
+   y1      = double(ncread(searise,'y1'));
+   thkmask = double(ncread(searise,'thkmask'));
+
+   % Interpolate onto our mesh vertices
+   groundedice                 = double(InterpFromGridToMesh(x1,y1,thkmask',md.mesh.x,md.mesh.y,0));
+   groundedice(groundedice<=0) = -1;
+   clear thkmask;
+
+   % Fill in the md.mask structure
+   md.mask.ocean_levelset = groundedice; %ice is grounded for mask equal one
+   md.mask.ice_levelset   = -1*ones(md.mesh.numberofvertices,1); %ice is present when negatvie
+
+   plotmodel(md,'data',md.mask.ocean_levelset,'title','grounded/floating','data',md.mask.ice_levelset,'title','ice/no-ice')
+
+   save ./Models/PIG_StISSM_SetMask md;
+end
+
+if any(steps==3) %Parameterization #3 
+   
+   md = loadmodel('./Models/PIG_StISSM_SetMask');
+   md = setflowequation(md,'SSA','all');
+   md = parameterize(md,'./PigStISSM.par');
+
+   save ./Models/PIG_StISSM_Parameterization md;
+end
+
+if any(steps==4) %Stochastic SMB
+    md = loadmodel('./Models/PIG_StISSM_Parameterization');
+
+    % Create the different subdomains for SMB %
+    ymax  = max(md.mesh.y);
+    ymin  = min(md.mesh.y);
+    xmax  = max(md.mesh.x);
+    xmin  = min(md.mesh.x);
+    idsmb = zeros(md.mesh.numberofelements,1); %subdomain ID is defined for each element
+    iid1  = find(md.mesh.x>=(xmax-2/3*(xmax-xmin))); %vertices in subdomain 1
+    iid2  = find(md.mesh.x<(xmax-2/3*(xmax-xmin))); %vertices in subdomain 2
+    for ii=1:md.mesh.numberofelements
+        for vertex=1:3
+            if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in subdomain 1
+                idsmb(ii) = 1;
+            end
+        end
+        if idsmb(ii)==0 %no vertex was found in subdomain 1
+            idsmb(ii) = 2;
+        end
+    end
+    
+    % SMBarma implementation 
+    md.smb = SMBarma();
+    md.smb.num_basins = 2; %we use two different subdomains
+    md.smb.basin_id   = idsmb; %element subdomain IDs
+    md.smb.num_breaks = 1; %1 breakpoint in the piecewise polynomial
+    md.smb.datebreaks = [5;5]; %breakpoint occurs at year 5 in both subdomains
+    md.smb.num_params = 2; %use a constant and a linear trend for the piecwise polynomial    
+    constsmb    = [0.5,0.2;0.3,0.5]; %constant SMB term for subdomains pre- and post-breakpoint [m yr^-1] 
+    trendsmb    = [0,0;0.01,0.001]; %trend is SMB for subdomains pre- and post-breakpoint [m yr^-2] 
+    md.smb.polynomialparams = cat(3,constsmb,trendsmb); %concatenating const and trend along a 3rd dimension
+    md.smb.ar_order         = 1; %first-order AR
+    md.smb.ma_order         = 1; %first-order MA
+    md.smb.arlag_coefs      = [0.3;0]; %AR coefficients in each subdomain
+    md.smb.malag_coefs      = zeros(md.smb.num_basins,md.smb.ma_order); %all zeros is equivalent to MA order 0
+    md.smb.arma_timestep    = 1.0; %yearly ARMA  
+    md.smb.elevationbins    = [300,1000;300,1000]; %elevations separating different lapse rate values [m]
+    md.smb.lapserates       = 1e-2*[0.03,0.01,-1e-4;0.02,0.02,-1e-5]; %lapse rate values [m ice eq. m^-1 yr^-1]
+    md.smb.refelevation     = [500,500]; %elevations at which the SMBarma calculated values apply (i.e., before using lapse rates)
+    
+    % Set-up the covariance matrix
+    sdevSMB1         = 0.01; %low standard deviation in subdomain 1 [m ice eq. yr^-1]
+    sdevSMB2         = 0.2; %higher variability in subdomain 2 [m ice eq. yr^-1]
+    correlationSMB   = [1.0,0.5;0.5,1.0]; %moderate correlation between the subdomains
+    covarianceSMB    = diag([sdevSMB1,sdevSMB2])*correlationSMB*diag([sdevSMB1,sdevSMB2]); %covariance matrix [(m ice eq. yr^-1)^2]
+    
+    % Stochasticforcing implementation
+    md.stochasticforcing.isstochasticforcing = 1; %activate stochasticity
+    md.stochasticforcing.fields              = [{'SMBarma'}];
+    md.stochasticforcing.covariance          = covarianceSMB; %prescribe the SMB covariance
+    md.stochasticforcing.stochastictimestep  = 1.0; %yearly stochasticity
+    
+    save ./Models/PIG_StISSM_StochSMB md;
+    
+end
+
+if any(steps==5) % Transient Run #1 
+
+   md = loadmodel('./Models/PIG_StISSM_StochSMB');
+
+   md.inversion.iscontrol         = 0;
+   md.transient.ismasstransport   = 1;
+   md.transient.isstressbalance   = 1;
+   md.transient.isgroundingline   = 1;
+   md.transient.ismovingfront     = 0;
+   md.transient.isthermal         = 0;
+   md.verbose.solution            = 1;
+   md.timestepping.time_step      = 0.1;
+   md.timestepping.final_time     = 10;
+   md.transient.requested_outputs = {'default','SmbMassBalance','MaskIceLevelset'};
+
+   md        = solve(md,'Transient');
+   timing    = cell2mat({md.results.TransientSolution(:).time});
+   fullsmb   = cell2mat({md.results.TransientSolution(:).SmbMassBalance});
+   timemeansmb = mean(fullsmb,2);
+   timesdevsmb = std(fullsmb,0,2);
+   maxsmb      = max(fullsmb,[],'all');
+   minsmb      = min(fullsmb,[],'all');
+   
+   
+   plotmodel(md,'figure',1,...
+       'data',md.results.TransientSolution(1).SmbMassBalance,'title','SMB at timestep 1','caxis#1',[minsmb,maxsmb],...
+       'data',md.results.TransientSolution(end).SmbMassBalance,'title','SMB at last timestep','caxis#2',[minsmb,maxsmb])
+   plotmodel(md,'figure',2,...
+       'data',timemeansmb,'title','SMB mean over simulation',...
+       'data',timesdevsmb,'title','SMB standard deviation over simulation')
+       
+   seriesSMB1  = fullsmb(1,:);
+   seriesSMB71 = fullsmb(71,:);
+   figure(3);
+   plot(timing,seriesSMB1,'b');
+   hold on
+   plot(timing,seriesSMB71,'r');
+   title('SMB at two different vertices');
+   hold off
+   
+   save ./Models/PIG_StISSM_Transient1 md;
+   
+end
+
+if any(steps==6) % Set up stochastic calving also
+    
+    md = loadmodel('./Models/PIG_StISSM_Transient1');
+
+    md.calving.calvingrate         = 20*ones(md.mesh.numberofvertices,1);
+    md.frontalforcings.meltingrate = zeros(md.mesh.numberofvertices,1);
+    md.levelset.spclevelset        = NaN(md.mesh.numberofvertices,1);
+    md.levelset.migration_max      = 100.0; %avoid too fast advance/retreat of the front
+    md.transient.ismovingfront     = 1;
+    
+    % Parameterize stochastic calving
+    idbasinsCalving = md.smb.basin_id; %same subdomains as for SMB
+    sdevClv1         = 0.5; %low standard deviation in subdomain 1 [m yr^-1]
+    sdevClv2         = 5; %higher variability in subdomain 2 [m yr^-1]
+    correlationClv   = [1.0,0.0;0.0,1.0]; %no correlation between the subdomains
+    covarianceClv    = diag([sdevClv1,sdevClv2])*correlationClv*diag([sdevClv1,sdevClv2]); %covariance matrix [(m yr^-1)^2]
+    
+    % Adjust stochastic forcing class
+    md.stochasticforcing.fields     = [{'SMBarma'},{'DefaultCalving'}];
+    oldcovarianceSMB                = md.stochasticforcing.covariance;
+    covarianceGlobal                = blkdiag(oldcovarianceSMB,covarianceClv); %independence between SMB and calving
+    md.stochasticforcing.covariance = covarianceGlobal;
+    md.stochasticforcing.defaultdimension = md.smb.num_basins; %2 basins for stochastic default (used for calving, same as SMB)
+    md.stochasticforcing.default_id       = idbasinsCalving;
+    
+    save ./Models/PIG_StISSM_StochCalving md;  
+       
+end
+
+if any(steps==7) % Transient Run #2
+
+   md = loadmodel('./Models/PIG_StISSM_StochCalving');
+
+   md.verbose.solution            = 1;
+   md.timestepping.start_time     = md.timestepping.final_time;
+   md.timestepping.time_step      = 0.1;
+   md.timestepping.final_time     = md.timestepping.start_time+5;
+   md.transient.requested_outputs = {'default','SmbMassBalance','CalvingCalvingrate'};
+
+   % Set up initial conditions from previous transient results
+   md.geometry.thickness        = md.results.TransientSolution(end).Thickness;
+   md.initialization.vx         = md.results.TransientSolution(end).Vx;
+   md.initialization.vy         = md.results.TransientSolution(end).Vy;
+   md.initialization.vel        = md.results.TransientSolution(end).Vel;
+   md.mask.ocean_levelset       = md.results.TransientSolution(end).MaskOceanLevelset;
+   md.initialization.pressure   = md.results.TransientSolution(end).Pressure;
+   md.geometry.base             = md.results.TransientSolution(end).Base;
+   md.geometry.surface          = md.results.TransientSolution(end).Surface;
+   md.mask.ice_levelset         = md.results.TransientSolution(end).MaskIceLevelset;   
+   
+   md        = solve(md,'Transient');
+   md        = loadresultsfromcluster(md);
+   timing    = cell2mat({md.results.TransientSolution(:).time});
+   fullsmb   = cell2mat({md.results.TransientSolution(:).SmbMassBalance});
+   fullclv   = cell2mat({md.results.TransientSolution(:).CalvingCalvingrate});
+
+   timemeansmb = mean(fullsmb,2);
+   timesdevsmb = std(fullsmb,0,2);
+   maxsmb      = max(fullsmb,[],'all');
+   minsmb      = min(fullsmb,[],'all');
+   timemeanclv = mean(fullclv,2);
+   timesdevclv = std(fullclv,0,2);
+   maxclv      = max(fullclv,[],'all');
+   minclv      = min(fullclv,[],'all');
+   
+   plotmodel(md,'figure',1,...
+       'data',md.results.TransientSolution(1).SmbMassBalance,'title','SMB at timestep 1','caxis#1',[minsmb,maxsmb],...
+       'data',md.results.TransientSolution(end).SmbMassBalance,'title','SMB at last timestep','caxis#2',[minsmb,maxsmb])
+   plotmodel(md,'figure',2,...
+       'data',timemeansmb,'title','SMB mean over simulation',...
+       'data',timesdevsmb,'title','SMB standard deviation over simulation')
+   plotmodel(md,'figure',3,...
+       'data',timemeanclv,'title','Calving mean over simulation',...
+       'data',timesdevclv,'title','Calving standard deviation over simulation')  
+   
+   seriesSMB1  = fullsmb(1,:);
+   seriesSMB71 = fullsmb(71,:);
+   figure(4);
+   plot(timing,seriesSMB1,'b');
+   hold on
+   plot(timing,seriesSMB71,'r');
+   title('SMB at two different vertices');
+   hold off 
+   
+   seriesclv1  = fullclv(1,:);
+   seriesclv71 = fullclv(71,:);
+   figure(5);
+   plot(timing,seriesclv1,'b');
+   hold on
+   plot(timing,seriesclv71,'r');
+   title('Calving at two different vertices');
+   hold off 
+   
+end
+
+
+
+
+
+
+
+
Index: /issm/trunk/externalpackages/adjoinablempi/install-pleaides-gcc.sh
===================================================================
--- /issm/trunk/externalpackages/adjoinablempi/install-pleaides-gcc.sh	(revision 28012)
+++ /issm/trunk/externalpackages/adjoinablempi/install-pleaides-gcc.sh	(revision 28013)
@@ -10,5 +10,5 @@
 
 #Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/adjoinablempi' 'adjoinablempi.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/adjoinablempi.tar.gz' 'adjoinablempi.tar.gz'
 
 #Untar ADOL-C
Index: /issm/trunk/externalpackages/adjoinablempi/install-pleaides.sh
===================================================================
--- /issm/trunk/externalpackages/adjoinablempi/install-pleaides.sh	(revision 28012)
+++ /issm/trunk/externalpackages/adjoinablempi/install-pleaides.sh	(revision 28013)
@@ -10,5 +10,5 @@
 
 #Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/adjoinablempi' 'adjoinablempi.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/adjoinablempi.tar.gz' 'adjoinablempi.tar.gz'
 
 #Untar ADOL-C
Index: /issm/trunk/externalpackages/adolc/install-withampi-pleiades-gcc.sh
===================================================================
--- /issm/trunk/externalpackages/adolc/install-withampi-pleiades-gcc.sh	(revision 28012)
+++ /issm/trunk/externalpackages/adolc/install-withampi-pleiades-gcc.sh	(revision 28013)
@@ -9,5 +9,5 @@
 
 #Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C' 'ADOL-C.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C.tar.gz' 'ADOL-C.tar.gz'
 
 #Untar ADOL-C
Index: /issm/trunk/externalpackages/adolc/install-withampi-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/adolc/install-withampi-pleiades.sh	(revision 28012)
+++ /issm/trunk/externalpackages/adolc/install-withampi-pleiades.sh	(revision 28013)
@@ -9,5 +9,5 @@
 
 #Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C' 'ADOL-C.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C.tar.gz' 'ADOL-C.tar.gz'
 
 #Untar ADOL-C
Index: /issm/trunk/externalpackages/adolc/install.sh
===================================================================
--- /issm/trunk/externalpackages/adolc/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/adolc/install.sh	(revision 28013)
@@ -9,5 +9,5 @@
 
 #Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C' 'ADOL-C.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C.tar.gz' 'ADOL-C.tar.gz'
 
 #Untar ADOL-C
Index: /issm/trunk/externalpackages/boost/install-1.55-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-pleiades.sh	(revision 28012)
+++ /issm/trunk/externalpackages/boost/install-1.55-pleiades.sh	(revision 28013)
@@ -7,4 +7,8 @@
 #symlinks in externalpackages/python to what boost is expecting. Ther is NO WAY 
 #to get the boost library to include python support without doing that. 
+
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+export CXXFLAGS="-D_INTEL_LINUX_ -std=c++11"
+export CFLAGS="-D_INTEL_LINUX_"
 
 #Some cleanup
@@ -23,11 +27,12 @@
 
 patch src/boost/mpl/aux_/config/adl.hpp ./configs/1.55/adl.hpp.patch
+# Copy customized source and configuration files to 'src' directory
+cp configs/1.55/boost/multi_index/ordered_index.hpp src/boost/multi_index
 
 #Configure and compile
-cd src 
+cd src
 ./bootstrap.sh \
-	--prefix="$ISSM_DIR/externalpackages/boost/install" \
-	--with-python=python3.2 \
-	--with-python-root="$ISSM_DIR/externalpackages/python/install" 
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
 
 #Compile boost
Index: /issm/trunk/externalpackages/boost/install-1.7-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.7-linux-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/boost/install-1.7-linux-static.sh	(revision 28013)
@@ -39,3 +39,3 @@
 # TODO: Reconfigure so that dynamic libraries are not compiled at all
 #
-rm -f $(find ${BOOST_ROOT}/install/lib -name *.so*)
+rm -f $(find ${PREFIX}/lib -name *.so*)
Index: /issm/trunk/externalpackages/boost/install-1.7-linux-valgrind.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.7-linux-valgrind.sh	(revision 28012)
+++ /issm/trunk/externalpackages/boost/install-1.7-linux-valgrind.sh	(revision 28013)
@@ -10,5 +10,5 @@
 
 # Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
 
 # Unpack source
@@ -23,6 +23,6 @@
 rm -rf boost_${VER}
 
-# Copy customized source and config files to 'src' directory
-cp ./configs/1.73/boost/math/tools/user.hpp ./src/boost/math/tools/
+# Copy customized source and configuration files to 'src' directory
+cp ./configs/1.73/boost/math/tools/user.hpp ./src/boost/math/tools
 
 # Configure
Index: /issm/trunk/externalpackages/boost/install-1.7-mac-valgrind.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.7-mac-valgrind.sh	(revision 28013)
+++ /issm/trunk/externalpackages/boost/install-1.7-mac-valgrind.sh	(revision 28013)
@@ -0,0 +1,66 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_73_0"
+
+PREFIX="${ISSM_DIR}/externalpackages/boost/install" # Set to location where external package should be installed
+
+## Environment
+#
+export LDFLAGS="-Wl,-headerpad_max_install_names"
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} src
+mkdir -p ${PREFIX} src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src
+rm -rf boost_${VER}
+
+# Copy customized source and configuration files to 'src' directory
+cp ./configs/1.73/boost/math/tools/user.hpp ./src/boost/math/tools
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${PREFIX} \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./b2 install link=shared runtime-link=shared
+
+# Set install_name for all shared libraries
+#
+# NOTE: The install_name_tool prints an error message on some installations, 
+#		but this is not a shell error, and it does not seem to affect the 
+#		Boost libraries called by ISSM. For now, we are simply redirecting the 
+#		error to null.
+#
+# TODO:
+# - Modify the source to apply absolute paths to the library ids so that 
+#	patching it after the fact with install_name_tool is not necessary.
+#
+cd ${PREFIX}/lib
+for name in *.dylib; do
+	install_name_tool -id ${PREFIX}/lib/${name} ${name} 2>/dev/null
+done
+
+if [ "${VER}" == "1_79_0" ]; then
+	## Patch install names for certain libraries
+	#
+	# TODO: Figure out how to reconfigure source to apply these install names at compile time
+	#
+	install_name_tool -change @rpath/libboost_atomic.dylib ${PREFIX}/lib/libboost_atomic.dylib libboost_filesystem.dylib
+fi
Index: /issm/trunk/externalpackages/chaco/configs/mac/code/util/smalloc.c.patch
===================================================================
--- /issm/trunk/externalpackages/chaco/configs/mac/code/util/smalloc.c.patch	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/configs/mac/code/util/smalloc.c.patch	(revision 28013)
@@ -0,0 +1,4 @@
+6c6
+< #include <malloc.h>
+---
+> #include <malloc/malloc.h>
Index: /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/Makefile
===================================================================
--- /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/Makefile	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/Makefile	(revision 28013)
@@ -0,0 +1,202 @@
+DEST_DIR = 	../exec
+DEST=		${DEST_DIR}/chaco
+CC = 		gcc
+IFLAG =		-Imain -I/mingw64/x86_64-w64-mingw32/include
+CFLAGS =	-fPIC -fno-omit-frame-pointer -pthread -fexceptions -g -Wno-implicit-function-declaration -D_MSYS2_
+OFLAGS =	-O2
+#AR =             /usr/ccs/bin/ar rcv   # for solaris 2
+#AR =             /usr/bin/ar rcv
+AR =             ar cr
+#RANLIB =         /usr/ccs/bin/ranlib   # for solaris 2
+#RANLIB =         /usr/bin/ranlib
+RANLIB =         ranlib
+
+FILES.c=	main/user_params.c main/interface.c main/main.c \
+		submain/balance.c submain/divide.c submain/submain.c \
+		input/input_assign.c \
+		input/check_input.c input/input.c input/input_geom.c \
+		input/input_graph.c input/read_params.c input/reflect_input.c \
+		input/read_val.c \
+		graph/check_graph.c graph/free_graph.c \
+		graph/reformat.c graph/subgraph.c graph/graph_out.c \
+		inertial/eigenvec2.c inertial/eigenvec3.c inertial/inertial.c \
+		inertial/inertial1d.c inertial/inertial2d.c \
+		inertial/inertial3d.c inertial/make_subgeom.c \
+		klspiff/buckets.c klspiff/buckets_bi.c klspiff/buckets1.c \
+		klspiff/bilistops.c klspiff/coarsen_kl.c klspiff/count_weights.c \
+		klspiff/compress_ewgts.c klspiff/kl_init.c klspiff/kl_output.c \
+		klspiff/klspiff.c klspiff/make_bndy_list.c \
+		klspiff/make_kl_list.c klspiff/nway_kl.c \
+		klvspiff/bpm_improve.c klvspiff/bucketsv.c \
+		klvspiff/clear_dvals.c klvspiff/coarsen_klv.c \
+		klvspiff/countup_vtx_sep.c klvspiff/find_bndy.c klvspiff/flow.c \
+		klvspiff/klv_init.c klvspiff/klvspiff.c klvspiff/make_bpgraph.c \
+		klvspiff/make_sep_list.c klvspiff/matching.c klvspiff/nway_klv.c \
+		klvspiff/flatten.c \
+		coarsen/coarsen.c coarsen/interpolate.c coarsen/makefgraph.c \
+		coarsen/makeccoords.c \
+		coarsen/coarsen1.c coarsen/makev2cv.c \
+		coarsen/maxmatch.c coarsen/maxmatch1.c coarsen/maxmatch2.c \
+		coarsen/maxmatch3.c coarsen/maxmatch4.c coarsen/maxmatch5.c \
+		connect/add_edges.c connect/connected.c connect/find_edges.c \
+		eigen/bidir.c eigen/bisect.c eigen/checkeig.c \
+		eigen/checkeig_ext.c \
+		eigen/checkorth.c eigen/cksturmcnt.c eigen/mkeigvecs.c\
+		eigen/eigensolve.c eigen/get_extval.c eigen/get_ritzvals.c \
+		eigen/lanczos_FO.c eigen/lanczos_SO.c eigen/lanczos_SO_float.c \
+		eigen/lanczos_ext.c eigen/lanczos_ext_float.c eigen/lanc_seconds.c\
+		eigen/lanpause.c eigen/makeorthlnk.c eigen/mkscanlist.c \
+		eigen/orthog1.c eigen/orthogonalize.c eigen/orthogvec.c \
+		eigen/ql.c eigen/rqi.c eigen/rqi_ext.c eigen/scale_diag.c \
+		eigen/scanmax.c eigen/scanmin.c eigen/solistout.c \
+                eigen/sorthog.c eigen/splarax.c eigen/sturmcnt.c \
+		eigen/Tevec.c eigen/tri_solve.c eigen/warnings.c \
+		symmlq/aprod.c symmlq/msolve.c symmlq/pow_dd.c \
+		symmlq/symmlq.c symmlq/symmlqblas.c \
+		tinvit/tinvit.c tinvit/pythag.c tinvit/epslon.c \
+		optimize/determinant.c optimize/func2d.c \
+		optimize/func3d.c optimize/opt2d.c optimize/opt3d.c \
+		assign/assign.c assign/assign_out.c assign/mapper.c \
+		assign/median.c assign/merge_assign.c \
+		assign/rec_median.c assign/rotate.c assign/y2x.c \
+		bpmatch/checkbp.c bpmatch/inits2d.c bpmatch/inits3d.c \
+		bpmatch/genvals2d.c bpmatch/genvals3d.c bpmatch/map2d.c \
+		bpmatch/map3d.c bpmatch/movevtxs.c \
+		bpmatch/sorts2d.c bpmatch/sorts3d.c \
+		refine_map/compute_cube_edata.c refine_map/compute_cube_vdata.c \
+		refine_map/refine_cube.c refine_map/update_cube_edata.c \
+		refine_map/update_cube_vdata.c refine_map/find_edge_cube.c \
+		refine_map/init_cube_edata.c refine_map/compute_mesh_edata.c \
+		refine_map/compute_mesh_vdata.c refine_map/find_edge_mesh.c \
+		refine_map/init_mesh_edata.c refine_map/refine_mesh.c \
+		refine_map/update_mesh_edata.c refine_map/update_mesh_vdata.c \
+		refine_map/refine_map.c refine_map/make_comm_graph.c \
+		refine_part/refine_part.c refine_part/kl_refine.c \
+		refine_part/make_maps_ref.c refine_part/make_terms_ref.c \
+		internal/force_internal.c internal/improve_internal.c \
+		internal/check_internal.c \
+		misc/define_subcubes.c misc/define_submeshes.c \
+		misc/divide_procs.c misc/merge_goals.c misc/make_term_props.c \
+		misc/count.c misc/countup.c misc/countup_cube.c \
+		misc/countup_mesh.c misc/make_subgoal.c \
+		misc/find_maxdeg.c misc/make_maps.c misc/make_setlists.c \
+		misc/sequence.c misc/perturb.c misc/simple_part.c \
+		misc/time_kernels.c misc/timing.c \
+		util/affirm.c util/array_alloc_2D.c util/bit_reverse.c \
+		util/checkpnt.c util/cpvec.c util/dot.c \
+		util/doubleout.c util/input_int.c util/gray.c \
+		util/machine_params.c util/makevwsqrt.c util/mkvec.c util/norm.c \
+                util/normalize.c util/mergesort.c \
+                util/randomize.c util/smalloc.c util/bail.c \
+		util/scadd.c util/seconds.c util/setvec.c util/shell_sort.c \
+		util/strout.c util/tri_prod.c util/true_or_false.c \
+		util/update.c  util/vecout.c util/vecran.c \
+                util/vecscale.c 
+
+
+FILESMINUSBLAS.c=	main/user_params.c main/interface.c main/main.c \
+		submain/balance.c submain/divide.c submain/submain.c \
+		input/input_assign.c \
+		input/check_input.c input/input.c input/input_geom.c \
+		input/input_graph.c input/read_params.c input/reflect_input.c \
+		input/read_val.c \
+		graph/check_graph.c graph/free_graph.c \
+		graph/reformat.c graph/subgraph.c graph/graph_out.c \
+		inertial/eigenvec2.c inertial/eigenvec3.c inertial/inertial.c \
+		inertial/inertial1d.c inertial/inertial2d.c \
+		inertial/inertial3d.c inertial/make_subgeom.c \
+		klspiff/buckets.c klspiff/buckets_bi.c klspiff/buckets1.c \
+		klspiff/bilistops.c klspiff/coarsen_kl.c klspiff/count_weights.c \
+		klspiff/compress_ewgts.c klspiff/kl_init.c klspiff/kl_output.c \
+		klspiff/klspiff.c klspiff/make_bndy_list.c \
+		klspiff/make_kl_list.c klspiff/nway_kl.c \
+		klvspiff/bpm_improve.c klvspiff/bucketsv.c \
+		klvspiff/clear_dvals.c klvspiff/coarsen_klv.c \
+		klvspiff/countup_vtx_sep.c klvspiff/find_bndy.c klvspiff/flow.c \
+		klvspiff/klv_init.c klvspiff/klvspiff.c klvspiff/make_bpgraph.c \
+		klvspiff/make_sep_list.c klvspiff/matching.c klvspiff/nway_klv.c \
+		klvspiff/flatten.c \
+		coarsen/coarsen.c coarsen/interpolate.c coarsen/makefgraph.c \
+		coarsen/makeccoords.c \
+		coarsen/coarsen1.c coarsen/makev2cv.c \
+		coarsen/maxmatch.c coarsen/maxmatch1.c coarsen/maxmatch2.c \
+		coarsen/maxmatch3.c coarsen/maxmatch4.c coarsen/maxmatch5.c \
+		connect/add_edges.c connect/connected.c connect/find_edges.c \
+		eigen/bidir.c eigen/bisect.c eigen/checkeig.c \
+		eigen/checkeig_ext.c \
+		eigen/checkorth.c eigen/cksturmcnt.c eigen/mkeigvecs.c\
+		eigen/eigensolve.c eigen/get_extval.c eigen/get_ritzvals.c \
+		eigen/lanczos_FO.c eigen/lanczos_SO.c eigen/lanczos_SO_float.c \
+		eigen/lanczos_ext.c eigen/lanczos_ext_float.c eigen/lanc_seconds.c\
+		eigen/lanpause.c eigen/makeorthlnk.c eigen/mkscanlist.c \
+		eigen/orthog1.c eigen/orthogonalize.c eigen/orthogvec.c \
+		eigen/ql.c eigen/rqi.c eigen/rqi_ext.c eigen/scale_diag.c \
+		eigen/scanmax.c eigen/scanmin.c eigen/solistout.c \
+                eigen/sorthog.c eigen/splarax.c eigen/sturmcnt.c \
+		eigen/Tevec.c eigen/tri_solve.c eigen/warnings.c \
+		symmlq/aprod.c symmlq/msolve.c symmlq/pow_dd.c \
+		symmlq/symmlq.c  \
+		tinvit/tinvit.c tinvit/pythag.c tinvit/epslon.c \
+		optimize/determinant.c optimize/func2d.c \
+		optimize/func3d.c optimize/opt2d.c optimize/opt3d.c \
+		assign/assign.c assign/assign_out.c assign/mapper.c \
+		assign/median.c assign/merge_assign.c \
+		assign/rec_median.c assign/rotate.c assign/y2x.c \
+		bpmatch/checkbp.c bpmatch/inits2d.c bpmatch/inits3d.c \
+		bpmatch/genvals2d.c bpmatch/genvals3d.c bpmatch/map2d.c \
+		bpmatch/map3d.c bpmatch/movevtxs.c \
+		bpmatch/sorts2d.c bpmatch/sorts3d.c \
+		refine_map/compute_cube_edata.c refine_map/compute_cube_vdata.c \
+		refine_map/refine_cube.c refine_map/update_cube_edata.c \
+		refine_map/update_cube_vdata.c refine_map/find_edge_cube.c \
+		refine_map/init_cube_edata.c refine_map/compute_mesh_edata.c \
+		refine_map/compute_mesh_vdata.c refine_map/find_edge_mesh.c \
+		refine_map/init_mesh_edata.c refine_map/refine_mesh.c \
+		refine_map/update_mesh_edata.c refine_map/update_mesh_vdata.c \
+		refine_map/refine_map.c refine_map/make_comm_graph.c \
+		refine_part/refine_part.c refine_part/kl_refine.c \
+		refine_part/make_maps_ref.c refine_part/make_terms_ref.c \
+		internal/force_internal.c internal/improve_internal.c \
+		internal/check_internal.c \
+		misc/define_subcubes.c misc/define_submeshes.c \
+		misc/divide_procs.c misc/merge_goals.c misc/make_term_props.c \
+		misc/count.c misc/countup.c misc/countup_cube.c \
+		misc/countup_mesh.c misc/make_subgoal.c \
+		misc/find_maxdeg.c misc/make_maps.c misc/make_setlists.c \
+		misc/sequence.c misc/perturb.c misc/simple_part.c \
+		misc/time_kernels.c misc/timing.c \
+		util/affirm.c util/array_alloc_2D.c util/bit_reverse.c \
+		util/checkpnt.c util/cpvec.c util/dot.c \
+		util/doubleout.c util/input_int.c util/gray.c \
+		util/machine_params.c util/makevwsqrt.c util/mkvec.c util/norm.c \
+                util/normalize.c util/mergesort.c \
+                util/randomize.c util/smalloc.c util/bail.c \
+		util/scadd.c util/seconds.c util/setvec.c util/shell_sort.c \
+		util/strout.c util/tri_prod.c util/true_or_false.c \
+		util/update.c  util/vecout.c util/vecran.c \
+                util/vecscale.c 
+
+FILES.o=	$(FILES.c:.c=.o) 
+FILESMINUSBLAS.o=	$(FILESMINUSBLAS.c:.c=.o) 
+
+
+${DEST}:	${FILES.c} chaco.a Makefile
+		${CC} ${OFLAGS} chaco.a -lm -o ${DEST}
+
+chaco.a:	${FILES.o}
+		${AR} $@ ${FILES.o} ; ${RANLIB} $@
+
+chacominusblas.a:	${FILESMINUSBLAS.o}
+		${AR} $@ ${FILESMINUSBLAS.o} ; ${RANLIB} $@
+
+lint:
+		lint ${IFLAG} ${FILES.c} -lm
+
+alint:
+		alint ${IFLAG} ${FILES.c} -lm
+
+clean:
+		rm -f */*.o ${DEST_DIR}/core
+
+.c.o:
+		${CC} -c ${IFLAG} ${CFLAGS} -o $*.o $*.c
Index: /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/main/interface.c
===================================================================
--- /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/main/interface.c	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/main/interface.c	(revision 28013)
@@ -0,0 +1,234 @@
+/* This software was developed by Bruce Hendrickson and Robert Leland   *
+ * at Sandia National Laboratories under US Department of Energy        *
+ * contract DE-AC04-76DP00789 and is copyrighted by Sandia Corporation. */
+
+#include <stdio.h>
+#include "defs.h"
+#include "structs.h"
+
+int       Using_Main = FALSE;	/* Is main routine being called? */
+
+int       interface(nvtxs, start, adjacency, vwgts, ewgts, x, y, z,
+		              outassignname, outfilename,
+		              assignment,
+		              architecture, ndims_tot, mesh_dims, goal,
+		              global_method, local_method, rqi_flag, vmax, ndims,
+		              eigtol, seed)
+int       nvtxs;		/* number of vertices in full graph */
+int      *start;		/* start of edge list for each vertex */
+int      *adjacency;		/* edge list data */
+int      *vwgts;		/* weights for all vertices */
+float    *ewgts;		/* weights for all edges */
+float    *x, *y, *z;		/* coordinates for inertial method */
+char     *outassignname;	/* name of assignment output file */
+char     *outfilename;		/* output file name */
+short    *assignment;		/* set number of each vtx (length n) */
+int       architecture;		/* 0 => hypercube, d => d-dimensional mesh */
+int       ndims_tot;		/* total number of cube dimensions to divide */
+int       mesh_dims[3];		/* dimensions of mesh of processors */
+double   *goal;			/* desired set sizes for each set */
+int       global_method;	/* global partitioning algorithm */
+int       local_method;		/* local partitioning algorithm */
+int       rqi_flag;		/* should I use RQI/Symmlq eigensolver? */
+int       vmax;			/* how many vertices to coarsen down to? */
+int       ndims;		/* number of eigenvectors (2^d sets) */
+double    eigtol;		/* tolerance on eigenvectors */
+long      seed;			/* for random graph mutations */
+{
+    extern char *PARAMS_FILENAME;	/* name of file with parameter updates */
+    extern int MAKE_VWGTS;	/* make vertex weights equal to degrees? */
+    extern int MATCH_TYPE;      /* matching routine to use */
+    extern int FREE_GRAPH;	/* free graph data structure after reformat? */
+    extern int DEBUG_PARAMS;	/* debug flag for reading parameters */
+    extern int DEBUG_TRACE;	/* trace main execution path */
+    extern double start_time;	/* time routine is entered */
+    extern double reformat_time;/* time spent reformatting graph */
+    FILE     *params_file;	/* file for reading new parameters */
+    struct vtx_data **graph;	/* graph data structure */
+    double    vwgt_sum;		/* sum of vertex weights */
+    double    time;		/* timing variable */
+    float   **coords;		/* coordinates for vertices if used */
+    int      *vptr;		/* loops through vertex weights */
+    int       flag;		/* return code from balance */
+    int       nedges;		/* number of edges in graph */
+    int       using_vwgts;	/* are vertex weights being used? */
+    int       using_ewgts;	/* are edge weights being used? */
+    int       nsets_tot;	/* total number of sets being created */
+    int       igeom;		/* geometric dimension for inertial method */
+    int       default_goal;	/* using default goals? */
+    int       i;		/* loop counter */
+    double    seconds();
+    double   *smalloc_ret();
+    int       sfree(), submain(), reformat();
+    void      free_graph(), read_params(), strout();
+
+    if (DEBUG_TRACE > 0) {
+	printf("<Entering interface>\n");
+    }
+
+    flag = 0;
+    graph = NULL;
+    coords = NULL;
+
+    if (!Using_Main) {		/* If not using main, need to read parameters file. */
+	start_time = seconds();
+	params_file = fopen(PARAMS_FILENAME, "r");
+	if (params_file == NULL && DEBUG_PARAMS > 1) {
+	    printf("Parameter file `%s' not found; using default parameters.\n",
+		   PARAMS_FILENAME);
+	}
+	read_params(params_file);
+    }
+
+    if (goal == NULL) {	/* If not passed in, default goals have equal set sizes. */
+	default_goal = TRUE;
+	if (architecture == 0)
+	    nsets_tot = 1 << ndims_tot;
+	else if (architecture == 1) 
+	    nsets_tot = mesh_dims[0];
+	else if (architecture == 2) 
+	    nsets_tot = mesh_dims[0] * mesh_dims[1];
+	else if (architecture > 2) 
+	    nsets_tot = mesh_dims[0] * mesh_dims[1] * mesh_dims[2];
+
+	if (MAKE_VWGTS && start != NULL) {
+	    vwgt_sum = start[nvtxs] - start[0] + nvtxs;
+	}
+	else if (vwgts == NULL) {
+	    vwgt_sum = nvtxs;
+	}
+	else {
+	    vwgt_sum = 0;
+	    vptr = vwgts;
+	    for (i = nvtxs; i; i--)
+		vwgt_sum += *(vptr++);
+	}
+
+	vwgt_sum /= nsets_tot;
+	goal = (double *) smalloc_ret((unsigned) nsets_tot * sizeof(double));
+	if (goal == NULL) {
+	    strout("\nERROR: No room to make goals.\n");
+	    flag = 1;
+	    goto skip;
+	}
+	for (i = 0; i < nsets_tot; i++)
+	    goal[i] = vwgt_sum;
+    }
+    else {
+	default_goal = FALSE;
+    }
+
+    if (MAKE_VWGTS) {
+	/* Generate vertex weights equal to degree of node. */
+	if (vwgts != NULL) {
+	    strout("WARNING: Vertex weights being overwritten by vertex degrees.");
+	}
+	vwgts = (int *) smalloc_ret((unsigned) nvtxs * sizeof(int));
+	if (vwgts == NULL) {
+	    strout("\nERROR: No room to make vertex weights.\n");
+	    flag = 1;
+	    goto skip;
+	}
+	if (start != NULL) {
+	    for (i = 0; i < nvtxs; i++)
+	        vwgts[i] = 1 + start[i + 1] - start[i];
+	}
+	else {
+	    for (i = 0; i < nvtxs; i++)
+	        vwgts[i] = 1;
+	}
+    }
+
+    using_vwgts = (vwgts != NULL);
+    using_ewgts = (ewgts != NULL);
+
+    if (start != NULL || vwgts != NULL) {	/* Reformat into our data structure. */
+	time = seconds();
+	flag = reformat(start, adjacency, nvtxs, &nedges, vwgts, ewgts, &graph);
+
+	if (flag) {
+	    strout("\nERROR: No room to reformat graph.\n");
+	    goto skip;
+	}
+
+	reformat_time += seconds() - time;
+    }
+    else {
+	nedges = 0;
+    }
+
+    if (FREE_GRAPH) {		/* Free old graph data structures. */
+	sfree((char *) start);
+	sfree((char *) adjacency);
+	if (vwgts != NULL)
+	    sfree((char *) vwgts);
+	if (ewgts != NULL)
+	    sfree((char *) ewgts);
+	start = NULL;
+	adjacency = NULL;
+	vwgts = NULL;
+	ewgts = NULL;
+    }
+
+
+    if (global_method == 3 ||
+        (MATCH_TYPE == 5 && (global_method == 1 || 
+			     (global_method == 2 && rqi_flag)))) {
+	if (x == NULL) {
+	    igeom = 0;
+	}
+	else {			/* Set up coordinate data structure. */
+	    coords = (float **) smalloc_ret((unsigned) 3 * sizeof(float *));
+	    if (coords == NULL) {
+		strout("\nERROR: No room to make coordinate array.\n");
+		flag = 1;
+		goto skip;
+	    }
+	    /* Minus 1's are to allow remainder of program to index with 1. */
+	    coords[0] = x - 1;
+	    igeom = 1;
+	    if (y != NULL) {
+		coords[1] = y - 1;
+		igeom = 2;
+		if (z != NULL) {
+		    coords[2] = z - 1;
+		    igeom = 3;
+		}
+	    }
+	}
+    }
+    else {
+	igeom = 0;
+    }
+
+    /* Subtract from assignment to allow code to index from 1. */
+    assignment = assignment - 1;
+    flag = submain(graph, nvtxs, nedges, using_vwgts, using_ewgts, igeom, coords,
+		   outassignname, outfilename,
+		   assignment, goal,
+		   architecture, ndims_tot, mesh_dims,
+		   global_method, local_method, rqi_flag, vmax, ndims,
+		   eigtol, seed);
+
+skip:
+    if (coords != NULL)
+	sfree((char *) coords);
+
+    if (default_goal)
+	sfree((char *) goal);
+
+    if (graph != NULL)
+	free_graph(graph);
+
+    if (flag && FREE_GRAPH) {
+	sfree((char *) start);
+	sfree((char *) adjacency);
+	sfree((char *) vwgts);
+	sfree((char *) ewgts);
+    }
+
+    if (!Using_Main && params_file != NULL)
+	fclose(params_file);
+
+    return (flag);
+}
Index: /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/bail.c
===================================================================
--- /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/bail.c	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/bail.c	(revision 28013)
@@ -0,0 +1,27 @@
+/* This software was developed by Bruce Hendrickson and Robert Leland   *
+ * at Sandia National Laboratories under US Department of Energy        *
+ * contract DE-AC04-76DP00789 and is copyrighted by Sandia Corporation. */
+
+#include	<stdlib.h>
+#include	<stdio.h>
+#include	<string.h>
+#include	"defs.h"
+
+/* Wrapper for exit() - print message and exit with status code. Exit code
+   of 0 indicates normal termination. Exit code of 1 indicates early 
+   termination following detection of some problem. Call with bail(NULL,status) 
+   to suppress message. */ 
+void      bail(msg, status)
+char     *msg;
+int       status;
+{
+    extern FILE *Output_File;		/* Output file or NULL */
+
+    if (msg != NULL && (int) strlen(msg) > 0) {
+        printf("%s\n", msg);
+	if (Output_File != NULL) {
+            fprintf(Output_File, "%s\n", msg);
+	}
+    }
+    exit(status);
+}
Index: /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/seconds.c
===================================================================
--- /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/seconds.c	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/configs/win/msys2/mingw64/code/util/seconds.c	(revision 28013)
@@ -0,0 +1,35 @@
+/* This software was developed by Bruce Hendrickson and Robert Leland   *
+ * at Sandia National Laboratories under US Department of Energy        *
+ * contract DE-AC04-76DP00789 and is copyrighted by Sandia Corporation. */
+
+#if defined(_INTEL_WIN_) || defined(_MSYS2_)
+#include   <time.h>
+#else
+#include   <sys/time.h>
+#include   <sys/resource.h>
+#endif
+
+double    seconds()
+{
+    double    curtime;
+
+#ifdef RUSAGE_SELF
+
+/* This timer is faster and more robust (if it exists). */
+    struct rusage rusage;
+    int getrusage();
+ 
+    getrusage(RUSAGE_SELF, &rusage);
+    curtime = ((rusage.ru_utime.tv_sec + rusage.ru_stime.tv_sec) +
+	    1.0e-6 * (rusage.ru_utime.tv_usec + rusage.ru_stime.tv_usec));
+
+#else
+
+/* ANSI timer, but lower resolution & wraps around after ~36 minutes. */
+
+    curtime = clock()/((double) CLOCKS_PER_SEC);
+
+#endif
+
+    return (curtime);
+}
Index: /issm/trunk/externalpackages/chaco/install-linux.sh
===================================================================
--- /issm/trunk/externalpackages/chaco/install-linux.sh	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/install-linux.sh	(revision 28013)
@@ -0,0 +1,60 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER=2.2
+
+PREFIX="${ISSM_DIR}/externalpackages/chaco/install" # Set to location where external package should be installed
+
+## Environment
+#
+export CFLAGS="-Wno-error=implicit-function-declaration"
+
+# Cleanup
+rm -rf ${PREFIX} src
+mkdir -p ${PREFIX} src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/Chaco-${VER}.tar.gz" "Chaco-${VER}.tar.gz"
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/docs/chaco_guide.pdf" "chaco_guide.pdf"
+
+# Unpack source
+tar -xvzf Chaco-${VER}.tar.gz
+
+# Move source to 'src' directory
+mv Chaco-${VER}/* src
+rm -rf Chaco-${VER}
+
+# Apply patches
+patch -R -p0 < chaco.patch # Written by diff -rc src ~/Libs/Chaco-${VER} > chaco.patch
+patch src/code/Makefile < patches/Makefile.patch
+
+# Compile
+cd src/code
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+make chacominusblas.a
+
+# Clean up objects (but not library or executable)
+make clean
+cd ../..
+
+# Install
+cp -p src/exec/README ${PREFIX}
+cp -p src/exec/User_Params ${PREFIX}
+cp -p src/exec/*.coords ${PREFIX}
+cp -p src/exec/*.graph ${PREFIX}
+mkdir ${PREFIX}/include
+cp -p src/code/main/defs.h ${PREFIX}/include/defs.h
+cp -p src/code/main/params.h ${PREFIX}/include/params.h
+cp -p chaco.h ${PREFIX}/include/chaco.h
+mkdir ${PREFIX}/lib
+mv src/code/chaco.a ${PREFIX}/lib/libchaco.a
+mv src/code/chacominusblas.a ${PREFIX}/lib/libchacominusblas.a
+mkdir ${PREFIX}/exec
+mv src/exec/chaco ${PREFIX}/exec
Index: sm/trunk/externalpackages/chaco/install-mac-sierra.sh
===================================================================
--- /issm/trunk/externalpackages/chaco/install-mac-sierra.sh	(revision 28012)
+++ 	(revision )
@@ -1,55 +1,0 @@
-#!/bin/bash
-set -eu
-
-# Some cleanup
-rm -rf Chaco-2.2
-rm -rf src 
-rm -rf install 
-mkdir src install 
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/Chaco-2.2.tar.gz' 'Chaco-2.2.tar.gz'
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/docs/chaco_guide.pdf' 'chaco_guide.pdf'
-
-# Untar 
-tar -xvzf Chaco-2.2.tar.gz
-
-# Move chaco to src directory
-mv Chaco-2.2/* src
-rm -rf Chaco-2.2
-
-# Apply patches (all at once)
-# (written by diff -rc src ~/Libs/Chaco-2.2 > chaco.patch)
-patch -R -p0 < chaco.patch
-
-# Patch src/code/Makefile
-patch ./src/code/Makefile ./patches/Makefile.mac-sierra.patch
-
-
-# Build chaco
-cd src/code
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make chacominusblas.a
-
-# Clean up objects (but not library or executable)
-make clean
-cd ../..
-
-# Populate install directory
-cp -p src/exec/README install
-cp -p src/exec/User_Params install
-cp -p src/exec/*.coords install
-cp -p src/exec/*.graph install
-mkdir install/include
-cp -p src/code/main/defs.h install/include/defs.h
-cp -p src/code/main/params.h install/include/params.h
-cp -p chaco.h install/include/chaco.h
-mkdir install/lib
-mv src/code/chaco.a install/lib/libchaco.a
-mv src/code/chacominusblas.a install/lib/libchacominusblas.a
-mkdir install/exec
-mv src/exec/chaco install/exec
Index: /issm/trunk/externalpackages/chaco/install-mac.sh
===================================================================
--- /issm/trunk/externalpackages/chaco/install-mac.sh	(revision 28013)
+++ /issm/trunk/externalpackages/chaco/install-mac.sh	(revision 28013)
@@ -0,0 +1,61 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER=2.2
+
+PREFIX="${ISSM_DIR}/externalpackages/chaco/install" # Set to location where external package should be installed
+
+## Environment
+#
+export CFLAGS="-Wno-error=implicit-function-declaration"
+
+# Cleanup
+rm -rf ${PREFIX} src
+mkdir -p ${PREFIX} src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/Chaco-${VER}.tar.gz" "Chaco-${VER}.tar.gz"
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/docs/chaco_guide.pdf" "chaco_guide.pdf"
+
+# Unpack source
+tar -xvzf Chaco-${VER}.tar.gz
+
+# Move source to 'src' directory
+mv Chaco-${VER}/* src
+rm -rf Chaco-${VER}
+
+# Apply patches
+patch -R -p0 < chaco.patch # Written by diff -rc src ~/Libs/Chaco-${VER} > chaco.patch
+patch src/code/Makefile < patches/Makefile.patch
+patch src/code/util/smalloc.c < configs/mac/code/util/smalloc.c.patch
+
+# Compile
+cd src/code
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+make chacominusblas.a
+
+# Clean up objects (but not library or executable)
+make clean
+cd ../..
+
+# Install
+cp -p src/exec/README ${PREFIX}
+cp -p src/exec/User_Params ${PREFIX}
+cp -p src/exec/*.coords ${PREFIX}
+cp -p src/exec/*.graph ${PREFIX}
+mkdir ${PREFIX}/include
+cp -p src/code/main/defs.h ${PREFIX}/include/defs.h
+cp -p src/code/main/params.h ${PREFIX}/include/params.h
+cp -p chaco.h ${PREFIX}/include/chaco.h
+mkdir ${PREFIX}/lib
+mv src/code/chaco.a ${PREFIX}/lib/libchaco.a
+mv src/code/chacominusblas.a ${PREFIX}/lib/libchacominusblas.a
+mkdir ${PREFIX}/exec
+mv src/exec/chaco ${PREFIX}/exec
Index: /issm/trunk/externalpackages/chaco/install-win-msys2-mingw.sh
===================================================================
--- /issm/trunk/externalpackages/chaco/install-win-msys2-mingw.sh	(revision 28012)
+++ /issm/trunk/externalpackages/chaco/install-win-msys2-mingw.sh	(revision 28013)
@@ -24,13 +24,12 @@
 rm -rf Chaco-${VER}
 
-# Apply patches (all at once)
-# (written by diff -rc src ~/Libs/Chaco-${VER} > chaco.patch)
-patch -R -p0 < chaco.patch
+# Apply patches
+patch -R -p0 < chaco.patch  # Written by diff -rc src ~/Libs/Chaco-${VER} > chaco.patch
 
 # Copy customized source and configuration files to 'src' directory
-cp configs/win/msys2/mingw64/src/code/Makefile src/code
-cp configs/win/msys2/mingw64/src/code/main/interface.c src/code/main
-cp configs/win/msys2/mingw64/src/code/util/bail.c src/code/util
-cp configs/win/msys2/mingw64/src/code/util/seconds.c src/code/util
+cp configs/win/msys2/mingw64/code/Makefile src/code
+cp configs/win/msys2/mingw64/code/main/interface.c src/code/main
+cp configs/win/msys2/mingw64/code/util/bail.c src/code/util
+cp configs/win/msys2/mingw64/code/util/seconds.c src/code/util
 
 # Compile
Index: sm/trunk/externalpackages/chaco/install.sh
===================================================================
--- /issm/trunk/externalpackages/chaco/install.sh	(revision 28012)
+++ 	(revision )
@@ -1,61 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER=2.2
-
-PREFIX="${ISSM_DIR}/externalpackages/chaco/install" # Set to location where external package should be installed
-
-## Environment
-#
-export CFLAGS="-Wno-error=implicit-function-declaration"
-
-# Cleanup
-rm -rf ${PREFIX} src
-mkdir -p ${PREFIX} src
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/Chaco-${VER}.tar.gz" "Chaco-${VER}.tar.gz"
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/docs/chaco_guide.pdf" "chaco_guide.pdf"
-
-# Unpack source
-tar -xvzf Chaco-${VER}.tar.gz
-
-# Move source to 'src' directory
-mv Chaco-${VER}/* src
-rm -rf Chaco-${VER}
-
-# Apply patches (all at once)
-# (written by diff -rc src ~/Libs/Chaco-${VER} > chaco.patch)
-patch -R -p0 < chaco.patch
-patch src/code/Makefile patches/Makefile.patch
-
-# Compile
-cd src/code
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make chacominusblas.a
-
-# Clean up objects (but not library or executable)
-make clean
-cd ../..
-
-# Install
-cp -p src/exec/README ${PREFIX}
-cp -p src/exec/User_Params ${PREFIX}
-cp -p src/exec/*.coords ${PREFIX}
-cp -p src/exec/*.graph ${PREFIX}
-mkdir ${PREFIX}/include
-cp -p src/code/main/defs.h ${PREFIX}/include/defs.h
-cp -p src/code/main/params.h ${PREFIX}/include/params.h
-cp -p chaco.h ${PREFIX}/include/chaco.h
-mkdir ${PREFIX}/lib
-mv src/code/chaco.a ${PREFIX}/lib/libchaco.a
-mv src/code/chacominusblas.a ${PREFIX}/lib/libchacominusblas.a
-mkdir ${PREFIX}/exec
-mv src/exec/chaco ${PREFIX}/exec
Index: /issm/trunk/externalpackages/codipack/install.sh
===================================================================
--- /issm/trunk/externalpackages/codipack/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/codipack/install.sh	(revision 28013)
@@ -8,5 +8,5 @@
 
 #Download development version
-svn co https://github.com/SciCompKL/CoDiPack.git/trunk install
+git clone https://github.com/SciCompKL/CoDiPack.git install
 
 ## Download source
Index: /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh	(revision 28013)
@@ -13,8 +13,11 @@
 # TODO:
 # - Move this to etc/environment.sh
+# - Test if -static-libgfortran flag will avoid all of this.
+# - Otherwise, refactor this to work with other gfortran installations.
 #
-LIBGFORTRAN=$(find /usr -name libgfortran* | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1)
 
 ## Environment
Index: /issm/trunk/externalpackages/dakota/install-6.2-linux.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux.sh	(revision 28012)
+++ /issm/trunk/externalpackages/dakota/install-6.2-linux.sh	(revision 28013)
@@ -9,11 +9,20 @@
 PREFIX="${ISSM_DIR}/externalpackages/dakota/install" # Set to location where external package should be installed
 
+# Find libgfortran so that we do not have to hardcode it.
+#
+# TODO:
+# - Move this to etc/environment.sh
+#
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
+
 ## Environment
 #
-export BLAS_LIBS="-L${BLAS_ROOT}/lib -lfblas -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export BLAS_LIBS="-L${BLAS_ROOT}/lib -lfblas -L${LIBGFORTRAN_ROOT} -lgfortran" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
 export DAK_BUILD=${ISSM_DIR}/externalpackages/dakota/build # DO NOT CHANGE THIS
 export DAK_INSTALL=${PREFIX} # DO NOT CHANGE THIS
 export DAK_SRC=${ISSM_DIR}/externalpackages/dakota/src # DO NOT CHANGE THIS
-export LAPACK_LIBS="-L${LAPACK_ROOT}/lib -lflapack -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export LAPACK_LIBS="-L${LAPACK_ROOT}/lib -lflapack -L${LIBGFORTRAN_ROOT} -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
 
 # Cleanup
Index: /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh	(revision 28013)
@@ -9,17 +9,15 @@
 PREFIX="${ISSM_DIR}/externalpackages/dakota/install" # Set to location where external package should be installed
 
-# Find libgfortran and libgcc so we do not have to hardcode them.
-#
-# Should retrieve a copy of gfortran that is compiled from source before 
-# returning one that is installed via package manager.
+# Find libgfortran and libgcc so we do not have to hardcode them
 #
 # TODO:
 # - Move this to etc/environment.sh
-# - Test if -static-libgfortran flag will avoid all of this
-# - Otherwise, refactor this to work with other gfortran installations
+# - Test if -static-libgfortran flag will avoid all of this.
+# - Otherwise, refactor this to work with other gfortran installations.
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(mdfind -onlyin ${LIBGFORTRAN_ROOT} -name libgcc | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 
 ## Environment
@@ -30,4 +28,5 @@
 export DAK_SRC=${ISSM_DIR}/externalpackages/dakota/src # DO NOT CHANGE THIS
 export LAPACK_LIBS="-L${LAPACK_ROOT}/lib -lflapack ${LIBGFORTRAN_ROOT}/libgfortran.a ${LIBGFORTRAN_ROOT}/libquadmath.a ${LIBGCC}" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export LDFLAGS="-framework CoreFoundation"
 
 # Cleanup
@@ -75,4 +74,10 @@
 
 # Configure
+#
+# NOTE:
+# - The -w option has been added to CMAKE_C_FLAGS, CMAKE_CXX_FLAGS, and 
+#	CMAKE_Fortran_FLAGS. This should be removed for more recent versions of 
+#	Dakota.
+#
 cd ${DAK_BUILD}
 cmake \
@@ -80,10 +85,10 @@
 	-DBUILD_STATIC_LIBS=ON \
 	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
-	-DCMAKE_C_FLAGS="-fPIC -Wno-error=implicit-function-declaration" \
+	-DCMAKE_C_FLAGS="-fPIC -Wno-error=implicit-function-declaration -w" \
 	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
-	-DCMAKE_CXX_FLAGS="-fPIC -fdelayed-template-parsing" \
+	-DCMAKE_CXX_FLAGS="-fPIC -fdelayed-template-parsing -w" \
 	-DCMAKE_CXX_STANDARD="11" \
 	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
-	-DCMAKE_Fortran_FLAGS="-fPIC -fallow-argument-mismatch" \
+	-DCMAKE_Fortran_FLAGS="-fPIC -fallow-argument-mismatch -w" \
 	-DBoost_NO_BOOST_CMAKE=TRUE \
 	-DHAVE_ACRO=OFF \
Index: /issm/trunk/externalpackages/dakota/install-6.2-mac.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/dakota/install-6.2-mac.sh	(revision 28013)
@@ -11,11 +11,9 @@
 # Find libgfortran so that we do not have to hardcode it.
 #
-# Should retrieve a copy of gfortran that is compiled from source before 
-# returning one that is installed via package manager.
-#
 # TODO:
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
 
@@ -27,4 +25,5 @@
 export DAK_SRC=${ISSM_DIR}/externalpackages/dakota/src # DO NOT CHANGE THIS
 export LAPACK_LIBS="-L${LAPACK_ROOT}/lib -lflapack -L${LIBGFORTRAN_ROOT} -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export LDFLAGS="-framework CoreFoundation"
 
 # Cleanup
@@ -59,5 +58,15 @@
 sed -i'' -e 's|SET(PythonInterp_FIND_VERSION|#SET(PythonInterp_FIND_VERSION|' ${DAK_SRC}/packages/teuchos/cmake/tribits/package_arch/TribitsFindPythonInterp.cmake
 
+echo "Debug:"
+echo "GSL_HOME : ${GSL_HOME}"
+echo "CPATH : ${CPATH}"
+
 # Configure
+#
+# NOTE:
+# - The -w option has been added to CMAKE_C_FLAGS, CMAKE_CXX_FLAGS, and 
+#	CMAKE_Fortran_FLAGS. This should be removed for more recent versions of 
+#	Dakota.
+#
 cd ${DAK_BUILD}
 cmake \
@@ -65,6 +74,7 @@
 	-DBUILD_STATIC_LIBS=OFF \
 	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
+	-DCMAKE_C_FLAGS="-w" \
 	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
-	-DCMAKE_CXX_FLAGS="-fdelayed-template-parsing" \
+	-DCMAKE_CXX_FLAGS="-fdelayed-template-parsing -w" \
 	-DCMAKE_CXX_STANDARD="11" \
 	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
@@ -101,5 +111,6 @@
 ## Patch install names for certain libraries
 #
-# TODO: Figure out how to reconfigure source to apply these install names at compile time
+# TODO: Figure out how to reconfigure source to apply these install names at 
+# 		compile time
 #
 install_name_tool -change libdakota_src_fortran.dylib ${DAK_INSTALL}/lib/libdakota_src_fortran.dylib libdakota_src.dylib
@@ -120,2 +131,11 @@
 install_name_tool -change libteuchos.dylib ${DAK_INSTALL}/lib/libteuchos.dylib libpecos_src.dylib
 install_name_tool -change libsurfpack_fortran.dylib ${DAK_INSTALL}/lib/libsurfpack_fortran.dylib libsurfpack.dylib
+
+## Add LIBGFORTRAN_ROOT to rpath for libraries that need it
+#
+# TODO: Figure out how to reconfigure source to add to rpath at compile time
+#
+install_name_tool -add_rpath ${LIBGFORTRAN_ROOT} libpecos.dylib
+install_name_tool -add_rpath ${LIBGFORTRAN_ROOT} libteuchos.dylib
+install_name_tool -add_rpath ${LIBGFORTRAN_ROOT} liboptpp.dylib
+
Index: sm/trunk/externalpackages/dakota/install-6.2-macosx64-catalina.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-macosx64-catalina.sh	(revision 28012)
+++ 	(revision )
@@ -1,76 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src 
-rm -rf build 
-rm -rf install 
-mkdir src build install 
-
-#Download from ISSM server
-#$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/dakota-6.2-public.src.tar.gz' 'dakota-6.2-public-src.tar.gz'
-
-#Untar 
-tar -zxvf dakota-6.2-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-6.2.0.src/* src
-rm -rf dakota-6.2.0.src
-
-#GSL: 
-export GSL_HOME=$HOME/externalpackages/gsl/install
-
-#Set up Dakota cmake variables and config
-export DAK_SRC=$HOME/externalpackages/dakota/src
-export DAK_BUILD=$HOME/externalpackages/dakota/build
-export MPIHOME=$HOME/externalpackages/mpich/install
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/6.2/BuildDakotaCustom.cmake.mac.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/6.2/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/6.2/CMakeLists.txt.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/6.2/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/6.2/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/6.2/pecos_global_defs.hpp.patch
-patch src/packages/VPISparseGrid/src/sandia_rules.cpp configs/6.2/sandia_rules.cpp.patch
-
-export BOOST_ROOT=$HOME/externalpackages/boost/install
-
-
-#Configure dakota
-# Set your local gcc compiler here
-cd $DAK_BUILD
-cmake -DBoost_NO_BOOST_CMAKE=TRUE \
-	-DBoost_NO_SYSTEM_PATHS=TRUE \
-	-DBOOST_ROOT:PATHNAME=$BOOST_ROOT \
-	-DBoost_LIBRARY_DIRS:FILEPATH=${BOOST_ROOT}/lib \
-	-D CMAKE_C_COMPILER=$HOME/externalpackages/mpich/install/bin/mpicc \
-	-D CMAKE_CXX_COMPILER=$HOME/externalpackages/mpich/install/bin/mpicxx \
-	-D CMAKE_Fortran_COMPILER=$HOME/externalpackages/mpich/install/bin/mpif77 \
-	-D CMAKE_CXX_FLAGS=-fdelayed-template-parsing \
-	-DHAVE_ACRO=off \
-	-DHAVE_JEGA=off \
-	-DDAKOTA_HAVE_GSL=on \
-	-DHAVE_QUESO=on \
-	-C $DAK_SRC/cmake/BuildDakotaCustom.cmake \
-	-C $DAK_SRC/cmake/DakotaDev.cmake \
-	$DAK_SRC
-cd ..
-
-# Snowleopard: Mpi should be made with these compilers
-#-DCMAKE_CXX_COMPILER=/usr/bin/g++ -DCMAKE_CC_COMPILER=/usr/bin/gcc \
-#-DCMAKE_Fortran_COMPILER=/usr/local/gfortran/bin/x86_64-apple-darwin10-gfortran \
-
-#Compile and install dakota
-cd $DAK_BUILD
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-cd ..
Index: sm/trunk/externalpackages/dakota/install-6.2-macosx64-highsierra.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-macosx64-highsierra.sh	(revision 28012)
+++ 	(revision )
@@ -1,70 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src 
-rm -rf build 
-rm -rf install 
-mkdir src build install 
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/dakota-6.2-public.src.tar.gz' 'dakota-6.2-public-src.tar.gz'
-
-#Untar 
-tar -zxvf dakota-6.2-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-6.2.0.src/* src
-rm -rf dakota-6.2.0.src
-
-#Set up Dakota cmake variables and config
-export DAK_SRC=$ISSM_DIR/externalpackages/dakota/src
-export DAK_BUILD=$ISSM_DIR/externalpackages/dakota/build
-export MPIHOME=$ISSM_DIR/externalpackages/mpich/install
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/6.2/BuildDakotaCustom.cmake.mac.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/6.2/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/6.2/CMakeLists.txt.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/6.2/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/6.2/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/6.2/pecos_global_defs.hpp.patch
-patch src/packages/VPISparseGrid/src/sandia_rules.cpp configs/6.2/sandia_rules.cpp.patch
-
-export BOOST_ROOT=$ISSM_DIR/externalpackages/boost/install
-
-#Configure dakota
-# Set your local gcc compiler here
-cd $DAK_BUILD
-cmake -DBoost_NO_BOOST_CMAKE=TRUE \
-	-DBoost_NO_SYSTEM_PATHS=TRUE \
-	-DBOOST_ROOT:PATHNAME=$BOOST_ROOT \
-	-DBoost_LIBRARY_DIRS:FILEPATH=${BOOST_ROOT}/lib \
-	-D CMAKE_C_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicc \
-	-D CMAKE_CXX_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicxx \
-	-D CMAKE_Fortran_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpif77 \
-	-D CMAKE_CXX_FLAGS=-fdelayed-template-parsing \
-	-DHAVE_ACRO=off \
-	-DHAVE_JEGA=off \
-	-C $DAK_SRC/cmake/BuildDakotaCustom.cmake \
-	-C $DAK_SRC/cmake/DakotaDev.cmake \
-	$DAK_SRC
-cd ..
-
-# Snowleopard: Mpi should be made with these compilers
-#-DCMAKE_CXX_COMPILER=/usr/bin/g++ -DCMAKE_CC_COMPILER=/usr/bin/gcc \
-#-DCMAKE_Fortran_COMPILER=/usr/local/gfortran/bin/x86_64-apple-darwin10-gfortran \
-
-#Compile and install dakota
-cd $DAK_BUILD
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-cd ..
Index: sm/trunk/externalpackages/dakota/install-6.2-macosx64-snowleopard.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-macosx64-snowleopard.sh	(revision 28012)
+++ 	(revision )
@@ -1,69 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src 
-rm -rf build 
-rm -rf install 
-mkdir src build install 
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/dakota-6.2-public.src.tar.gz' 'dakota-6.2-public-src.tar.gz'
-
-#Untar 
-tar -zxvf dakota-6.2-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-6.2.0.src/* src
-rm -rf dakota-6.2.0.src
-
-#Set up Dakota cmake variables and config
-export DAK_SRC=$ISSM_DIR/externalpackages/dakota/src
-export DAK_BUILD=$ISSM_DIR/externalpackages/dakota/build
-export MPIHOME=$ISSM_DIR/externalpackages/mpich/install
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/6.2/BuildDakotaCustom.cmake.mac.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/6.2/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/6.2/CMakeLists.txt.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/6.2/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/6.2/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/6.2/pecos_global_defs.hpp.patch
-
-export BOOST_ROOT=$ISSM_DIR/externalpackages/boost/install
-
-#Configure dakota
-# Set your local gcc compiler here
-cd $DAK_BUILD
-cmake -DBoost_NO_BOOST_CMAKE=TRUE \
-	-DBoost_NO_SYSTEM_PATHS=TRUE \
-	-DBOOST_ROOT:PATHNAME=$BOOST_ROOT \
-	-DBoost_LIBRARY_DIRS:FILEPATH=${BOOST_ROOT}/lib \
-	-D CMAKE_C_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicc \
-	-D CMAKE_CXX_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicxx \
-	-D CMAKE_Fortran_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpif77 \
-	-DHAVE_ACRO=off \
-	-DHAVE_JEGA=off \
-	-D LDFLAGS="-L/usr/lib/ -lstdc++ -L/usr/local/gfortran/lib/gcc/x86_64-apple-darwin10/4.6.2/ -lgfortran" \
-	-C $DAK_SRC/cmake/BuildDakotaCustom.cmake \
-	-C $DAK_SRC/cmake/DakotaDev.cmake \
-	$DAK_SRC
-cd ..
-
-# Snowleopard: Mpi should be made with these compilers
-#-DCMAKE_CXX_COMPILER=/usr/bin/g++ -DCMAKE_CC_COMPILER=/usr/bin/gcc \
-#-DCMAKE_Fortran_COMPILER=/usr/local/gfortran/bin/x86_64-apple-darwin10-gfortran \
-
-#Compile and install dakota
-cd $DAK_BUILD
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-cd ..
Index: sm/trunk/externalpackages/dakota/install-6.2-macosx64-yosemite.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-macosx64-yosemite.sh	(revision 28012)
+++ 	(revision )
@@ -1,63 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src 
-rm -rf build 
-rm -rf install 
-mkdir src build install 
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/dakota-6.2-public.src.tar.gz' 'dakota-6.2-public-src.tar.gz'
-
-#Untar 
-tar -zxvf dakota-6.2-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-6.2.0.src/* src
-rm -rf dakota-6.2.0.src
-
-#Set up Dakota cmake variables and config
-export DAK_SRC=$ISSM_DIR/externalpackages/dakota/src
-export DAK_BUILD=$ISSM_DIR/externalpackages/dakota/build
-export MPIHOME=$ISSM_DIR/externalpackages/mpich/install
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/6.2/BuildDakotaCustom.cmake.yosemite.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/6.2/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/6.2/CMakeLists.txt.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/6.2/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/6.2/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/6.2/pecos_global_defs.hpp.patch
-
-#Configure dakota
-cd $DAK_BUILD
-
-cmake -D CMAKE_C_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicc \
-	   -D CMAKE_CXX_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicxx \
-	   -D CMAKE_Fortran_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpif77 \
-		-DHAVE_ACRO=off \
-		-DHAVE_JEGA=off \
-		-DBLAS_LIBS=$ISSM_DIR/externalpackages/petsc/install/lib/libfblas.a \
-		-DLAPACK_LIBS=$ISSM_DIR/externalpackages/petsc/install/lib/libflapack.a \
-		-C $DAK_SRC/cmake/BuildDakotaCustom.cmake \
-		-C $DAK_SRC/cmake/DakotaDev.cmake \
-		$DAK_SRC
-cd ..
-
-#Make sure to include --download-fblaslapack=1 \ in petsc configure script to use the petsc blas and dlapack libs
-#-DBLAS_LIBS=$ISSM_DIR/externalpackages/petsc/install/lib/libfblas.a -DLAPACK_LIBS=$ISSM_DIR/externalpackages/petsc/install/lib/libflapack.a
-
-#Compile and install dakota
-cd $DAK_BUILD
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-cd ..
Index: /issm/trunk/externalpackages/dakota/install-6.2-pleiades_toss4.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-pleiades_toss4.sh	(revision 28013)
+++ /issm/trunk/externalpackages/dakota/install-6.2-pleiades_toss4.sh	(revision 28013)
@@ -0,0 +1,78 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="6.2"
+
+PREFIX="${ISSM_DIR}/externalpackages/dakota/install" # Set to location where external package should be installed
+
+## Environment
+#
+export BLAS_LIBS="-L/nasa/intel/Compiler/2018.3.222/compilers_and_libraries_2018.3.222/linux/mkl/lib/intel64/ -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it; should upate to /nasa/intel/Compiler/2021.4.0/mkl/2021.4.0/lib/intel64
+export CXXFLAGS='-std=c++98'
+export DAK_BUILD=${ISSM_DIR}/externalpackages/dakota/build # DO NOT CHANGE THIS
+export DAK_INSTALL=${PREFIX} # DO NOT CHANGE THIS
+export DAK_SRC=${ISSM_DIR}/externalpackages/dakota/src # DO NOT CHANGE THIS
+export LAPACK_LIBS="-L/nasa/intel/Compiler/2018.3.222/compilers_and_libraries_2018.3.222/linux/mkl/lib/intel64/lib/intel64/ -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it; should upate to /nasa/intel/Compiler/2021.4.0/mkl/2021.4.0/lib/intel64
+
+# Cleanup
+rm -rf ${DAK_BUILD} ${DAK_INSTALL} ${DAK_SRC}
+mkdir -p ${DAK_BUILD} ${DAK_INSTALL} ${DAK_SRC}
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/dakota-${VER}-public.src.tar.gz" "dakota-${VER}-public-src.tar.gz"
+
+# Unpack source
+tar -zxvf dakota-${VER}-public-src.tar.gz
+
+# Move source to 'src' directory
+mv dakota-${VER}.0.src/* ${DAK_SRC}
+rm -rf dakota-${VER}.0.src
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/${VER}/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp ${DAK_SRC}/packages/DDACE/src/Analyzer
+cp configs/${VER}/packages/queso/src/misc/src/1DQuadrature.C ${DAK_SRC}/packages/queso/src/misc/src
+cp configs/${VER}/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp ${DAK_SRC}/packages/surfpack/src/surfaces/nkm
+cp configs/${VER}/packages/VPISparseGrid/src/sandia_rules.cpp ${DAK_SRC}/packages/VPISparseGrid/src
+cp configs/${VER}/src/DakotaInterface.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDLocalReliability.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDSampling.cpp ${DAK_SRC}/src
+
+# Copy customized source and configuration files specific to Linux to 'src' directory
+cp configs/${VER}/linux/cmake/BuildDakotaCustom.pleiades.cmake ${DAK_SRC}/cmake/BuildDakotaCustom.cmake
+cp configs/${VER}/linux/cmake/DakotaDev.cmake ${DAK_SRC}/cmake
+
+# Configure
+cd ${DAK_BUILD}
+cmake \
+	-DBUILD_SHARED_LIBS=ON \
+	-DBUILD_STATIC_LIBS=OFF \
+	-DCMAKE_C_COMPILER=mpicc \
+	-DCMAKE_C_FLAGS="-Wno-error=implicit-function-declaration" \
+	-DCMAKE_CXX_COMPILER=mpicxx \
+	-DCMAKE_Fortran_COMPILER=gfortran \
+	-DBoost_NO_BOOST_CMAKE=TRUE \
+	-DHAVE_ACRO=OFF \
+	-DHAVE_JEGA=OFF \
+	-DHAVE_QUESO=ON \
+	-DDAKOTA_HAVE_GSL=ON \
+	-C${DAK_SRC}/cmake/BuildDakotaCustom.cmake \
+	-C${DAK_SRC}/cmake/DakotaDev.cmake \
+	${DAK_SRC}
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+cd ${DAK_INSTALL}
+
+# Comment out definition of HAVE_MPI in Teuchos config header file in order to
+# avoid conflict with our definition
+sed -i -e "s/#define HAVE_MPI/\/* #define HAVE_MPI *\//g" include/Teuchos_config.h
Index: sm/trunk/externalpackages/gdal/install-1.10-debian-netcdf.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.10-debian-netcdf.sh	(revision 28012)
+++ 	(revision )
@@ -1,49 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-# Constants
-#
-VER="1.10.0"
-HDF5_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
-NETCDF_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
-PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
-
-# Cleanup
-rm -rf install src
-mkdir install src
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf gdal-${VER}.tar.gz
-
-# Move source into 'src' directory
-mv gdal-${VER}/* src
-rm -rf gdal-${VER}
-
-# Copy customized source files to 'src' directory
-cp configs/1.10/linux/debian/frmts/wms/dataset.cpp src/frmts/wms
-cp configs/1.10/linux/debian/ogr/ogrsf_frmts/vfk/vfkfeature.cpp src/ogr/ogrsf_frmts/vfk
-cp configs/1.10/linux/debian/port/cplkeywordparser.cpp src/port
-
-# Configure
-cd src
-./configure \
-	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
-	--with-hdf5="${HDF5_ROOT}" \
-	--with-netcdf="${NETCDF_ROOT}" \
-	--with-proj="${PROJ_ROOT}"
-
-# Compile and install
-if [ $# -eq 0 ]; then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-
-# Return to initial directory
-cd ..
Index: sm/trunk/externalpackages/gdal/install-1.10-linux64-nopython.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.10-linux64-nopython.sh	(revision 28012)
+++ 	(revision )
@@ -1,45 +1,0 @@
-#!/bin/bash
-set -eu
-
-#WARNING: you need to have python installed in externalpackages
-
-#Some cleanup
-rm -rf src
-rm -rf install
-rm -rf gdal-1.10.0
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/gdal-1.10.0.tar.gz' 'gdal-1.10.0.tar.gz'
-
-#Untar 
-tar -zxvf  gdal-1.10.0.tar.gz
-
-#Move gdal into src directory
-mv gdal-1.10.0/* src
-rm -rf gdal-1.10.0
-
-export CFLAGS=-D_HAVE_STRNDUP
-export CXXFLAGS=-D_HAVE_STRNDUP
-
-#Configure gdal
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/gdal/install" \
-	--with-python=no \
-	--with-netcdf=no \
-	--with-jasper=no \
-	--without-hdf5
-
-#Compile and install gdal
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install
-
-#For some reasons, on thwaites, one needs to do the following to get the python bindings:
-#cd src/swig/python/
-# python setup.py build
-# python setup.py install
Index: sm/trunk/externalpackages/gdal/install-1.10-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.10-linux64.sh	(revision 28012)
+++ 	(revision )
@@ -1,45 +1,0 @@
-#!/bin/bash
-set -eu
-
-#WARNING: you need to have python installed in externalpackages
-
-#Some cleanup
-rm -rf src
-rm -rf install
-rm -rf gdal-1.10.0
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/gdal-1.10.0.tar.gz' 'gdal-1.10.0.tar.gz'
-
-#Untar 
-tar -zxvf  gdal-1.10.0.tar.gz
-
-#Move gdal into src directory
-mv gdal-1.10.0/* src
-rm -rf gdal-1.10.0
-
-export CFLAGS=-D_HAVE_STRNDUP
-export CXXFLAGS=-D_HAVE_STRNDUP
-
-#Configure gdal
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/gdal/install" \
-	--with-python=yes \
-	--with-netcdf=no \
-	--with-jasper=no \
-	--without-hdf5
-
-#Compile and install gdal
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install
-
-#For some reasons, on thwaites, one needs to do the following to get the python bindings:
-#cd src/swig/python/
-# python setup.py build
-# python setup.py install
Index: sm/trunk/externalpackages/gdal/install-1.10-macosx64.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.10-macosx64.sh	(revision 28012)
+++ 	(revision )
@@ -1,40 +1,0 @@
-#!/bin/bash
-set -eu
-
-GDAL_VER="1.10.0"
-
-# Some cleanup
-rm -rf src
-rm -rf install
-rm -rf gdal-${GDAL_VER}
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gdal-${GDAL_VER}.tar.gz" "gdal-${GDAL_VER}.tar.gz"
-
-# Untar
-tar -zxvf gdal-${GDAL_VER}.tar.gz
-
-# Move gdal into src directory
-mv gdal-${GDAL_VER}/* src
-rm -rf gdal-${GDAL_VER}
-
-# Configure gdal
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/gdal/install" \
-	--with-python \
-	--with-netcdf=no \
-	--with-libiconv-prefix="/usr/lib/" \
-	--without-hdf5 \
-	--with-expat=$ISSM_DIR/externalpackages/expat/install
-#Note:
-# look for libiconv in /usr/lib/ because there seemed to be a conflict with port's library
-
-#Compile and install gdal
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install
Index: sm/trunk/externalpackages/gdal/install-1.11.2-macosx64.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.11.2-macosx64.sh	(revision 28012)
+++ 	(revision )
@@ -1,38 +1,0 @@
-#!/bin/bash
-set -eu
-
-GDAL_VER="1.11.2"
-
-# Some cleanup
-rm -rf src
-rm -rf install
-rm -rf gdal-${GDAL_VER}
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gdal-${GDAL_VER}.tar.gz" "gdal-${GDAL_VER}.tar.gz"
-
-# Untar
-tar -zxvf gdal-${GDAL_VER}.tar.gz
-
-# Move gdal into src directory
-mv gdal-${GDAL_VER}/* src
-rm -rf gdal-${GDAL_VER}
-
-# Configure gdal
-# NOTE: Look for libiconv in /usr/lib/ because there seemed to be a conflict with port's library
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/gdal/install" \
-	--with-python \
-	--with-netcdf=no \
-	--with-libiconv-prefix="/usr/lib/" \
-	--without-hdf5
-
-#Compile and install gdal
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install
Index: /issm/trunk/externalpackages/gdal/install-3-python-static.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3-python-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gdal/install-3-python-static.sh	(revision 28013)
@@ -9,12 +9,12 @@
 ## Constants
 #
-VER="3.5.1"
+VER="3.5.3"
 
 ## Environment
 #
 export CC=mpicc
+export CXXFLAGS="-std=c++11"
 export CXX=mpicxx
-export LDFLAGS="-L${HDF5_ROOT}/lib" # Need to do this so HDF5 symbols referenced in NETCDF library are found at link time
-export LIBS="-lhdf5_hl -lhdf5" # Need to do this so HDF5 symbols referenced in NETCDF library are found at link time
+export LIBS="-lsqlite3 -lhdf5_hl -lhdf5"
 export PREFIX="${ISSM_DIR}/externalpackages/gdal/install" # Need this to properly set destination root for Python libraries on macOS (should not affect Linux build; do not need for this configuration, but including it for consistency)
 
@@ -42,5 +42,5 @@
 	--enable-static \
 	--with-pic \
-	--with-python=python3 \
+	--with-python="python3" \
 	--with-curl="${CURL_ROOT}/bin/curl-config" \
 	--with-hdf5="${HDF5_ROOT}" \
Index: /issm/trunk/externalpackages/gdal/install-3-python.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3-python.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gdal/install-3-python.sh	(revision 28013)
@@ -9,5 +9,5 @@
 ## Constants
 #
-VER="3.5.1"
+VER="3.5.3"
 
 ## Environment
@@ -34,5 +34,6 @@
 	--prefix="${PREFIX}" \
 	--enable-fast-install \
-	--with-python=python3 \
+	--with-python="python3" \
+	--with-curl="${CURL_ROOT}/bin/curl-config" \
 	--with-hdf5="${HDF5_ROOT}" \
 	--with-libz="${ZLIB_ROOT}" \
Index: /issm/trunk/externalpackages/gdal/install-3-static.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gdal/install-3-static.sh	(revision 28013)
@@ -9,12 +9,12 @@
 ## Constants
 #
-VER="3.5.1"
+VER="3.5.3"
 
 ## Environment
 #
 export CC=mpicc
+export CXXFLAGS="-std=c++11"
 export CXX=mpicxx
-export LDFLAGS="-L${HDF5_ROOT}/lib" # Need to do this so HDF5 symbols referenced in NETCDF library are found at link time
-export LIBS="-lhdf5_hl -lhdf5" # Need to do this so HDF5 symbols referenced in NETCDF library are found at link time
+export LIBS="-lsqlite3 -lhdf5_hl -lhdf5"
 export PREFIX="${ISSM_DIR}/externalpackages/gdal/install" # NOTE: Need to export this to properly set destination root for Python libraries on macOS (should not affect Linux build). Set to location where external package should be installed.
 
Index: /issm/trunk/externalpackages/gdal/install-3.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gdal/install-3.sh	(revision 28013)
@@ -5,5 +5,5 @@
 ## Constants
 #
-VER="3.5.1"
+VER="3.5.3"
 
 ## Environment
@@ -30,5 +30,4 @@
 	--prefix="${PREFIX}" \
 	--enable-fast-install \
-	--with-hdf5="${HDF5_ROOT}" \
 	--with-libz="${ZLIB_ROOT}" \
 	--with-netcdf="${NETCDF_ROOT}" \
Index: /issm/trunk/externalpackages/gmsh/configs/4.10.5/mac/CMakeLists.txt.patch
===================================================================
--- /issm/trunk/externalpackages/gmsh/configs/4.10.5/mac/CMakeLists.txt.patch	(revision 28013)
+++ /issm/trunk/externalpackages/gmsh/configs/4.10.5/mac/CMakeLists.txt.patch	(revision 28013)
@@ -0,0 +1,4 @@
+284c284
+<       set(CMAKE_INSTALL_RPATH "@executable_path/../lib")
+---
+>       set(CMAKE_INSTALL_RPATH "@executable_path/../lib;${LIBGFORTRAN_ROOT}")
Index: /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/CMakeLists.txt.patch
===================================================================
--- /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/CMakeLists.txt.patch	(revision 28013)
+++ /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/CMakeLists.txt.patch	(revision 28013)
@@ -0,0 +1,9 @@
+858,861c860,862
+<     find_library(METIS_LIB metis PATH_SUFFIXES lib)
+<     find_path(METIS_INC "metis.h" PATH_SUFFIXES include)
+<     if(ENABLE_SYSTEM_CONTRIB AND METIS_LIB AND METIS_INC)
+<       message(STATUS "Using system version of METIS")
+---
+>     if(METIS_ROOT)
+>       find_library(METIS_LIB metis PATHS ${METIS_ROOT} PATH_SUFFIXES lib)
+>       find_path(METIS_INC "metis.h" PATHS ${METIS_ROOT} PATH_SUFFIXES include)
Index: /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/mac/CMakeLists.txt.patch
===================================================================
--- /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/mac/CMakeLists.txt.patch	(revision 28013)
+++ /issm/trunk/externalpackages/gmsh/configs/4.10.5/static/mac/CMakeLists.txt.patch	(revision 28013)
@@ -0,0 +1,4 @@
+207a208
+>   set(CMAKE_FIND_LIBRARY_SUFFIXES ".a" ".so")
+226a228
+>   set(CMAKE_FIND_LIBRARY_SUFFIXES ".a" ".so")
Index: /issm/trunk/externalpackages/gmsh/install-4-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/gmsh/install-4-linux-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmsh/install-4-linux-static.sh	(revision 28013)
@@ -24,7 +24,8 @@
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(find /usr -name libgfortran* | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1)
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1)
 
 # Cleanup
@@ -41,4 +42,7 @@
 mv gmsh-${VER}-source/* src
 rm -rf gmsh-${VER}-source
+
+# Apply patches
+patch src/CMakeLists.txt < configs/${VER}/static/CMakeLists.txt.patch
 
 # Configure
@@ -57,4 +61,5 @@
 cmake \
 	-DCMAKE_INSTALL_PREFIX="${PREFIX}" \
+	-DCMAKE_BUILD_TYPE=Release \
 	-DENABLE_BUILD_LIB=1 \
 	-DBLAS_LAPACK_LIBRARIES="-L${LAPACK_ROOT}/lib -lflapack -L${BLAS_ROOT}/lib -lfblas ${LIBGFORTRAN_ROOT}/libgfortran.a ${LIBGFORTRAN_ROOT}/libquadmath.a ${LIBGCC}" \
Index: /issm/trunk/externalpackages/gmsh/install-4-linux.sh
===================================================================
--- /issm/trunk/externalpackages/gmsh/install-4-linux.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmsh/install-4-linux.sh	(revision 28013)
@@ -24,5 +24,6 @@
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(find /usr -name libgfortran* | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
 
@@ -56,4 +57,6 @@
 cmake \
 	-DCMAKE_INSTALL_PREFIX="${PREFIX}" \
+	-DCMAKE_INSTALL_RPATH="${PREFIX}/lib" \
+	-DCMAKE_BUILD_TYPE=Release \
 	-DENABLE_BUILD_DYNAMIC=1 \
 	-DENABLE_BUILD_SHARED=1 \
Index: /issm/trunk/externalpackages/gmsh/install-4-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/gmsh/install-4-mac-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmsh/install-4-mac-static.sh	(revision 28013)
@@ -19,8 +19,5 @@
 PREFIX="${ISSM_DIR}/externalpackages/gmsh/install" # Set to location where external package should be installed
 
-
 # Find libgfortran and libgcc so we do not have to hardcode them
-#
-# Should retrieve a copy of gfortran that is compiled from source before returning one that is installed via package manager
 #
 # TODO:
@@ -29,7 +26,8 @@
 # - Otherwise, refactor this to work with other gfortran installations.
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(mdfind -onlyin ${LIBGFORTRAN_ROOT} -name libgcc | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 
 # Cleanup
@@ -47,6 +45,7 @@
 rm -rf gmsh-${VER}-source
 
-# Copy customized source and config files to 'src' directory
-cp configs/${VER}/static/CMakeLists.txt src
+# Apply patches
+patch src/CMakeLists.txt < configs/${VER}/static/CMakeLists.txt.patch
+patch src/CMakeLists.txt < configs/${VER}/static/mac/CMakeLists.txt.patch
 
 # Configure
@@ -65,4 +64,5 @@
 cmake \
 	-DCMAKE_INSTALL_PREFIX="${PREFIX}" \
+	-DCMAKE_BUILD_TYPE=Release \
 	-DENABLE_BUILD_LIB=1 \
 	-DBLAS_LAPACK_LIBRARIES="-L${LAPACK_ROOT}/lib -lflapack -L${BLAS_ROOT}/lib -lfblas ${LIBGFORTRAN_ROOT}/libgfortran.a ${LIBGFORTRAN_ROOT}/libquadmath.a ${LIBGCC}" \
@@ -84,8 +84,2 @@
 	make -j $1 install
 fi
-
-# Make necessary link on RHEL
-if [[ -d ${PREFIX}/lib64 && ! -d ${PREFIX}/lib ]]; then
-	cd ${PREFIX}
-	ln -s ./lib64 ./lib
-fi
Index: /issm/trunk/externalpackages/gmsh/install-4-mac.sh
===================================================================
--- /issm/trunk/externalpackages/gmsh/install-4-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmsh/install-4-mac.sh	(revision 28013)
@@ -21,11 +21,9 @@
 # Find libgfortran so that we do not have to hardcode it.
 #
-# Should retrieve a copy of gfortran that is compiled from source before 
-# returning one that is installed via package manager.
-#
 # TODO:
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
 
@@ -44,4 +42,7 @@
 rm -rf gmsh-${VER}-source
 
+# Apply patches
+patch src/CMakeLists.txt < configs/${VER}/mac/CMakeLists.txt.patch
+
 # Configure
 #
@@ -59,7 +60,11 @@
 cmake \
 	-DCMAKE_INSTALL_PREFIX="${PREFIX}" \
+	-DCMAKE_MACOSX_RPATH=ON \
+	-DCMAKE_INSTALL_RPATH="${PREFIX}/lib" \
+	-DCMAKE_BUILD_TYPE=Release \
 	-DENABLE_BUILD_DYNAMIC=1 \
 	-DENABLE_BUILD_SHARED=1 \
 	-DBLAS_LAPACK_LIBRARIES="-L${LAPACK_ROOT}/lib -lflapack -L${BLAS_ROOT}/lib -lfblas -L${LIBGFORTRAN_ROOT} -lgfortran" \
+	-DLIBGFORTRAN_ROOT="${LIBGFORTRAN_ROOT}" \
 	-DENABLE_BLAS_LAPACK=1 \
 	-DENABLE_EIGEN=0 \
@@ -79,8 +84,2 @@
 	make -j $1 install
 fi
-
-# Make necessary link on RHEL
-if [[ -d ${PREFIX}/lib64 && ! -d ${PREFIX}/lib ]]; then
-	cd ${PREFIX}
-	ln -s ./lib64 ./lib
-fi
Index: /issm/trunk/externalpackages/gmt/configs/6/mac/cmake/modules/ConfigCMake.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6/mac/cmake/modules/ConfigCMake.cmake	(revision 28013)
+++ /issm/trunk/externalpackages/gmt/configs/6/mac/cmake/modules/ConfigCMake.cmake	(revision 28013)
@@ -0,0 +1,250 @@
+#
+#
+# Useful CMake variables.
+#
+# There are five configuration files:
+#
+#   1) "ConfigDefault.cmake" - is version controlled and used to add new default
+#      variables and set defaults for everyone.
+#   2) "ConfigUser.cmake" in the source tree - is not version controlled
+#      (currently listed in .gitignore) and used to override basic default settings on
+#      a per-user basis.
+#   3) "ConfigUser.cmake" in the build tree - is used to override
+#      "ConfigUser.cmake" in the source tree.
+#   4) "ConfigUserAdvanced.cmake" in the source tree - is not version controlled
+#      (currently listed in .gitignore) and used to override advanced default settings on
+#      a per-user basis.
+#   5) "ConfigUserAdvanced.cmake" in the build tree - is used to override
+#      "ConfigUserAdvanced.cmake" in the source tree.
+#
+# NOTE: If you want to change CMake behaviour just for yourself,
+#       copy "ConfigUserTemplate.cmake" to "ConfigUser.cmake" and then edit
+#       "ConfigUser.cmake" for basic settings. For advanced settings,
+#       copy "ConfigUserAdvancedTemplate.cmake" to "ConfigUserAdvanced.cmake" and edit it.
+#       DO NOT EDIT "ConfigDefault.cmake" or the CMake template files.
+#
+include ("${CMAKE_SOURCE_DIR}/cmake/ConfigDefault.cmake")
+
+# A "ConfigUser.cmake" in the source tree overrides the advanced defaults.
+if (EXISTS "${CMAKE_SOURCE_DIR}/cmake/ConfigUser.cmake")
+	include ("${CMAKE_SOURCE_DIR}/cmake/ConfigUser.cmake")
+endif (EXISTS "${CMAKE_SOURCE_DIR}/cmake/ConfigUser.cmake")
+
+# If you've got a 'ConfigUser.cmake' in the build tree then that overrides the
+# one in the source tree.
+if (EXISTS "${CMAKE_BINARY_DIR}/cmake/ConfigUser.cmake")
+	include ("${CMAKE_BINARY_DIR}/cmake/ConfigUser.cmake")
+endif (EXISTS "${CMAKE_BINARY_DIR}/cmake/ConfigUser.cmake")
+
+# A "ConfigUserAdvanced.cmake" in the source tree overrides the advanced defaults.
+if (EXISTS "${CMAKE_SOURCE_DIR}/cmake/ConfigUserAdvanced.cmake")
+	include ("${CMAKE_SOURCE_DIR}/cmake/ConfigUserAdvanced.cmake")
+endif (EXISTS "${CMAKE_SOURCE_DIR}/cmake/ConfigUserAdvanced.cmake")
+
+# If you've got a 'ConfigUserAdvanced.cmake' in the build tree then that overrides the
+# one in the source tree.
+if (EXISTS "${CMAKE_BINARY_DIR}/cmake/ConfigUserAdvanced.cmake")
+	include ("${CMAKE_BINARY_DIR}/cmake/ConfigUserAdvanced.cmake")
+endif (EXISTS "${CMAKE_BINARY_DIR}/cmake/ConfigUserAdvanced.cmake")
+
+###########################################################
+# Do any needed processing of the configuration variables #
+###########################################################
+
+# Set default build type to 'Release'
+if (NOT CMAKE_BUILD_TYPE)
+	set (CMAKE_BUILD_TYPE Release)
+endif (NOT CMAKE_BUILD_TYPE)
+
+# Here we change it to add the git commit hash for non-public releases
+set (GMT_PACKAGE_VERSION_WITH_GIT_REVISION ${GMT_PACKAGE_VERSION})
+
+# Check if it's a git repository or not
+if (EXISTS ${GMT_SOURCE_DIR}/.git)
+	set (HAVE_GIT_VERSION TRUE)
+endif (EXISTS ${GMT_SOURCE_DIR}/.git)
+
+# Add the last git commit hash and date to the package version if this is a non-public release.
+# A non-public release has a FALSE 'GMT_PUBLIC_RELEASE' variable in 'ConfigDefault.cmake'.
+if (GIT_FOUND AND HAVE_GIT_VERSION AND NOT GMT_PUBLIC_RELEASE)
+	# Get the last git commit hash
+	execute_process (
+		COMMAND ${GIT_EXECUTABLE} describe --abbrev=7 --always --dirty
+		WORKING_DIRECTORY ${GMT_SOURCE_DIR}
+		RESULT_VARIABLE GIT_RETURN_CODE
+		OUTPUT_VARIABLE GIT_COMMIT_HASH
+		OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+	if (GIT_RETURN_CODE)
+		message (STATUS "Unable to determine git commit hash for non-public release - ignoring.")
+	else (GIT_RETURN_CODE)
+		if (GIT_COMMIT_HASH)
+			# For non-public release, add the last git commit hash and date
+			execute_process (
+				COMMAND ${GIT_EXECUTABLE} log -1 --date=short --pretty=format:%cd
+				WORKING_DIRECTORY ${GMT_SOURCE_DIR}
+				RESULT_VARIABLE GIT_DATE_RETURN_CODE
+				OUTPUT_VARIABLE GIT_COMMIT_DATE
+				OUTPUT_STRIP_TRAILING_WHITESPACE)
+			string(REPLACE "-" "." GIT_COMMIT_DATE "${GIT_COMMIT_DATE}")
+			set (GMT_PACKAGE_VERSION_WITH_GIT_REVISION "${GMT_PACKAGE_VERSION}_${GIT_COMMIT_HASH}_${GIT_COMMIT_DATE}")
+		endif (GIT_COMMIT_HASH)
+	endif (GIT_RETURN_CODE)
+endif (GIT_FOUND AND HAVE_GIT_VERSION AND NOT GMT_PUBLIC_RELEASE)
+
+# apply license restrictions
+if (LICENSE_RESTRICTED) # on
+	if (LICENSE_RESTRICTED STREQUAL GPL)
+		# restrict to GPL
+	elseif (LICENSE_RESTRICTED STREQUAL LGPL)
+		# restrict to LGPL
+	else (LICENSE_RESTRICTED STREQUAL GPL)
+		# unknown license
+		message (WARNING "unknown license: ${LICENSE_RESTRICTED}")
+	endif (LICENSE_RESTRICTED STREQUAL GPL)
+	# restrictions that apply to any of the above licenses
+else (LICENSE_RESTRICTED) # off
+	# no restrictions at all
+endif (LICENSE_RESTRICTED)
+
+# reset list of extra license files
+set (GMT_EXTRA_LICENSE_FILES)
+
+# location of GNU license files
+set (COPYING_GPL ${GMT_SOURCE_DIR}/COPYINGv3)
+set (COPYING_LGPL ${GMT_SOURCE_DIR}/COPYING.LESSERv3)
+
+# GMT paths used in the code
+if (NOT GMT_DATADIR)
+	# do not reset user setting
+	if (GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_DATADIR "share")
+	else(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_DATADIR "share/gmt${GMT_INSTALL_NAME_SUFFIX}")
+	endif(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+endif (NOT GMT_DATADIR)
+
+# Install path GMT_DOCDIR
+if (NOT GMT_DOCDIR)
+	# do not reset user setting
+	if (GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_DOCDIR "${GMT_DATADIR}/doc")
+	else(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_DOCDIR "share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}")
+	endif(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+endif (NOT GMT_DOCDIR)
+
+# Install path GMT_MANDIR
+if (NOT GMT_MANDIR)
+	# do not reset user setting
+	if (GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_MANDIR "${GMT_DATADIR}/man")
+	else(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+		set (GMT_MANDIR "${GMT_DOCDIR}/man")
+	endif(GMT_INSTALL_TRADITIONAL_FOLDERNAMES)
+endif (NOT GMT_MANDIR)
+
+# Install path for GMT binaries, headers and libraries
+include (GNUInstallDirs) # defines CMAKE_INSTALL_LIBDIR (lib/lib64)
+if (NOT GMT_LIBDIR)
+	set (GMT_LIBDIR ${CMAKE_INSTALL_LIBDIR})
+endif(NOT GMT_LIBDIR)
+
+if (NOT GMT_BINDIR)
+	set (GMT_BINDIR bin)
+endif(NOT GMT_BINDIR)
+
+if (NOT GMT_INCLUDEDIR)
+	set (GMT_INCLUDEDIR include/gmt${GMT_INSTALL_NAME_SUFFIX})
+endif(NOT GMT_INCLUDEDIR)
+
+if (GMT_DATA_URL) # Backwards compatibility with old ConfigUser.cmake files
+	message (WARNING "CMake variable GMT_DATA_URL is deprecated and will be removed in the future releases. Use GMT_DATA_SERVER instead.")
+	set (GMT_DATA_SERVER ${GMT_DATA_URL})
+endif (GMT_DATA_URL)
+
+# use, i.e. don't skip the full RPATH for the build tree
+set (CMAKE_SKIP_BUILD_RPATH FALSE)
+
+# when building, don't use the install RPATH already
+# (but later on when installing)
+set (CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
+
+# set the RPATH to be used when installing
+if (NOT DEFINED GMT_INSTALL_RELOCATABLE)
+	set (GMT_INSTALL_RELOCATABLE FALSE)
+endif (NOT DEFINED GMT_INSTALL_RELOCATABLE)
+if (GMT_INSTALL_RELOCATABLE)
+	# make executables relocatable on supported platforms (relative RPATH)
+	if (UNIX AND NOT CYGWIN)
+		# find relative libdir from executable dir
+		file (RELATIVE_PATH _rpath /${GMT_BINDIR} /${GMT_LIBDIR})
+		# remove trailing /
+		string (REGEX REPLACE "/$" "" _rpath "${_rpath}")
+		if (APPLE)
+			# relative RPATH on osx
+			# CMP0042: CMake 3.0: MACOSX_RPATH is enabled by default
+			set (CMAKE_MACOSX_RPATH ON)
+			set (CMAKE_INSTALL_NAME_DIR @rpath)
+			set (CMAKE_INSTALL_RPATH "@rpath;@executable_path/${_rpath};${LIBGFORTRAN_ROOT}")
+		else (APPLE)
+			# relative RPATH on Linux, Solaris, etc.
+			set (CMAKE_INSTALL_RPATH "\$ORIGIN/${_rpath}")
+		endif (APPLE)
+	endif (UNIX AND NOT CYGWIN)
+else (GMT_INSTALL_RELOCATABLE)
+	# set absolute RPATH
+	if (APPLE)
+		# CMP0042: CMake 3.0: MACOSX_RPATH is enabled by default
+		set (CMAKE_MACOSX_RPATH OFF)
+		set (CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/${GMT_LIBDIR}")
+	else (APPLE)
+		# the RPATH to be used when installing, but only if it's not a
+		# system directory
+		list (FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES
+			"${CMAKE_INSTALL_PREFIX}/${GMT_LIBDIR}" isSystemDir)
+		if ("${isSystemDir}" STREQUAL "-1")
+			set (CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${GMT_LIBDIR}")
+		endif ("${isSystemDir}" STREQUAL "-1")
+	endif (APPLE)
+endif (GMT_INSTALL_RELOCATABLE)
+
+# add the automatically determined parts of the RPATH
+# which point to directories outside the build tree to the install RPATH
+set (CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+
+# When running examples/tests with CTest (out-of-source) we need support for
+# running GMT programs from within ${GMT_BINARY_DIR}:
+if (DO_EXAMPLES OR DO_TESTS AND NOT SUPPORT_EXEC_IN_BINARY_DIR)
+	message (WARNING "Enabling SUPPORT_EXEC_IN_BINARY_DIR (required for "
+	"testing). Please disable testing on release builds.")
+	set (SUPPORT_EXEC_IN_BINARY_DIR ON)
+endif (DO_EXAMPLES OR DO_TESTS AND NOT SUPPORT_EXEC_IN_BINARY_DIR)
+
+# Some tests are known to fail, and can be excluded from the test by adding
+# the comment "# GMT_KNOWN_FAILURE".
+if (NOT DEFINED GMT_ENABLE_KNOWN2FAIL)
+	set (GMT_ENABLE_KNOWN2FAIL ON)
+endif (NOT DEFINED GMT_ENABLE_KNOWN2FAIL)
+
+# Make GNU, Intel, Clang and AppleClang compilers default to C99
+if (CMAKE_C_COMPILER_ID MATCHES "(GNU|Intel|Clang)" AND NOT CMAKE_C_FLAGS MATCHES "-std=")
+	set (CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}")
+endif ()
+
+# Suppress MSVC deprecation and security warnings
+if (MSVC)
+    set (CMAKE_C_FLAGS "/D_CRT_SECURE_NO_WARNINGS /D_CRT_SECURE_NO_DEPRECATE ${CMAKE_C_FLAGS}")
+    set (CMAKE_C_FLAGS "/D_CRT_NONSTDC_NO_DEPRECATE /D_SCL_SECURE_NO_DEPRECATE ${CMAKE_C_FLAGS}")
+endif (MSVC)
+
+# Handle the special developer option GMT_DOCS_DEPEND_ON_GMT
+# Normally this is ON.
+if (NOT DEFINED GMT_DOCS_DEPEND_ON_GMT)
+	set (GMT_DOCS_DEPEND_ON_GMT TRUE)
+endif (NOT DEFINED GMT_DOCS_DEPEND_ON_GMT)
+if (GMT_DOCS_DEPEND_ON_GMT)
+	add_custom_target (gmt_for_img_convert DEPENDS gmt)
+else (GMT_DOCS_DEPEND_ON_GMT)
+	add_custom_target (gmt_for_img_convert)
+endif (GMT_DOCS_DEPEND_ON_GMT)
Index: /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindGDAL.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindGDAL.cmake	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindGDAL.cmake	(revision 28013)
@@ -148,8 +148,8 @@
 		NAMES ${_extralib}
 		HINTS
+		${HDF5_ROOT}
+		$ENV{HDF5_ROOT}
 		${NETCDF_ROOT}
 		$ENV{NETCDF_ROOT}
-		${HDF5_ROOT}
-		$ENV{HDF5_ROOT}
 		${ZLIB_ROOT}
 		$ENV{ZLIB_ROOT}
@@ -166,5 +166,5 @@
 # find all manually-supplied libs
 if (GDAL_EXTRA_LIBS)
-	# Ensure -l is precedeced by whitespace to not match
+	# Ensure -l is preceded by whitespace to not match
 	# '-l' in '-L/usr/lib/x86_64-linux-gnu/hdf5/serial'
 	string (REGEX MATCHALL "(^| )-l[^ ]+" _gdal_extra_lib_dashl ${GDAL_EXTRA_LIBS})
Index: /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindNETCDF.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindNETCDF.cmake	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/configs/6/static/cmake/modules/FindNETCDF.cmake	(revision 28013)
@@ -59,5 +59,5 @@
 			OUTPUT_VARIABLE NETCDF_CONFIG_LIBS)
 		if (NETCDF_CONFIG_LIBS)
-			# Ensure -l is precedeced by whitespace to not match
+			# Ensure -l is preceded by whitespace to not match
 			# '-l' in '-L/usr/lib/x86_64-linux-gnu/hdf5/serial'
 			string (REGEX MATCHALL "(^| )-l[^ ]+" _netcdf_dashl ${NETCDF_CONFIG_LIBS})
Index: /issm/trunk/externalpackages/gmt/configs/6/static/linux/cmake/ConfigUser.static.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6/static/linux/cmake/ConfigUser.static.cmake	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/configs/6/static/linux/cmake/ConfigUser.static.cmake	(revision 28013)
@@ -191,5 +191,5 @@
 # Create position independent code on all targets [auto] (needed for static
 # build on non-x86):
-set (CMAKE_POSITION_INDEPENDENT_CODE TRUE)
+#set (CMAKE_POSITION_INDEPENDENT_CODE TRUE)
 
 # Build GMT shared lib with supplemental modules [TRUE]:
Index: /issm/trunk/externalpackages/gmt/configs/6/static/mac/cmake/ConfigUser.static.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6/static/mac/cmake/ConfigUser.static.cmake	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/configs/6/static/mac/cmake/ConfigUser.static.cmake	(revision 28013)
@@ -191,5 +191,5 @@
 # Create position independent code on all targets [auto] (needed for static
 # build on non-x86):
-set (CMAKE_POSITION_INDEPENDENT_CODE TRUE)
+#set (CMAKE_POSITION_INDEPENDENT_CODE TRUE)
 
 # Build GMT shared lib with supplemental modules [TRUE]:
Index: /issm/trunk/externalpackages/gmt/install-6-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-6-linux-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/install-6-linux-static.sh	(revision 28013)
@@ -5,5 +5,5 @@
 ## Constants
 #
-VER="6.4.0"
+VER="6.0.0"
 
 # Find libgfortran and libgcc so we do not have to hardcode them
@@ -11,8 +11,11 @@
 # TODO:
 # - Move this to etc/environment.sh
+# - Test if -static-libgfortran flag will avoid all of this.
+# - Otherwise, refactor this to work with other gfortran installations.
 #
-LIBGFORTRAN=$(find /usr -name libgfortran* | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1)
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | sed "s/[0-9]*://g" | head -1)
 
 GDAL_EXTRA_LIBS="-lstdc++" # Determined by running `$GDAL_ROOT/bin/gdal-config --dep-libs` then removing duplicate libs
@@ -64,5 +67,4 @@
 	-DCURL_LIBRARY="${CURL_ROOT}/lib/libcurl.a" \
 	-DGDAL_EXTRA_LIBS="${GDAL_EXTRA_LIBS}" \
-	-DHDF5_ROOT="${HDF5_ROOT}" \
 	-DLAPACK_LIBRARIES="${LAPACK_ROOT}/lib/libflapack.a;${LIBGFORTRAN_ROOT}/libgfortran.a;${LIBGFORTRAN_ROOT}/libquadmath.a;${LIBGCC}" \
 	-DNETCDF_EXTRA_LIBS="${NETCDF_EXTRA_LIBS}" \
Index: /issm/trunk/externalpackages/gmt/install-6-linux.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-6-linux.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/install-6-linux.sh	(revision 28013)
@@ -12,5 +12,6 @@
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(find /usr -name libgfortran* | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
 
Index: /issm/trunk/externalpackages/gmt/install-6-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-6-mac-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/install-6-mac-static.sh	(revision 28013)
@@ -5,9 +5,7 @@
 ## Constants
 #
-VER="6.4.0"
+VER="6.0.0"
 
 # Find libgfortran and libgcc so we do not have to hardcode them
-#
-# Should retrieve a copy of gfortran that is compiled from source before returning one that is installed via package manager
 #
 # TODO:
@@ -16,10 +14,11 @@
 # - Otherwise, refactor this to work with other gfortran installations.
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
-LIBGCC=$(mdfind -onlyin ${LIBGFORTRAN_ROOT} -name libgcc | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+LIBGCC=$(find ${LIBGFORTRAN_ROOT} -name libgcc* 2>/dev/null | egrep -n libgcc.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 
 GDAL_EXTRA_LIBS="-lc++" # `$GDAL_ROOT/bin/gdal-config --dep-libs` does not report need to link to libc++ (see also customized configuration file ./configs/6/static/cmake/modules/FindGDAL.cmake)
-NETCDF_EXTRA_LIBS="-lsqlite3 -lpthread -ldl -liconv" # `$NETCDF_ROOT/bin/nc-config --libs` does not report certain dependencies of certain static libraries (see also customized configuration file ./configs/6/static/cmake/modules/FindNETCDF.cmake)
+NETCDF_EXTRA_LIBS="-lpthread -ldl -liconv" # `$NETCDF_ROOT/bin/nc-config --libs` does not report certain dependencies of certain static libraries (see also customized configuration file ./configs/6/static/cmake/modules/FindNETCDF.cmake)
 
 # Environment
@@ -28,5 +27,5 @@
 export CURL_INCLUDE_DIRS="${CURL_ROOT}/include"
 export CURL_LIBRARIES="${CURL_ROOT}/lib/libcurl.a;${ZLIB_ROOT}/lib/libz.a"
-export LDFLAGS="-framework CoreFoundation -framework Security"
+export LDFLAGS="-lsqlite3 -framework CoreFoundation -framework Security"
 export PREFIX="${ISSM_DIR}/externalpackages/gmt/install" # NOTE: Need to export this to be picked up by customized ConfigUser.cmake (see below). Set to location where external package should be installed.
 
@@ -68,5 +67,4 @@
 	-DCURL_LIBRARY="${CURL_ROOT}/lib/libcurl.a" \
 	-DGDAL_EXTRA_LIBS="${GDAL_EXTRA_LIBS}" \
-	-DHDF5_ROOT="${HDF5_ROOT}" \
 	-DLAPACK_LIBRARIES="${LAPACK_ROOT}/lib/libflapack.a;${LIBGFORTRAN_ROOT}/libgfortran.a;${LIBGFORTRAN_ROOT}/libquadmath.a;${LIBGCC}" \
 	-DNETCDF_EXTRA_LIBS="${NETCDF_EXTRA_LIBS}" \
Index: /issm/trunk/externalpackages/gmt/install-6-mac.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-6-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/gmt/install-6-mac.sh	(revision 28013)
@@ -9,11 +9,9 @@
 # Find libgfortran so that we do not have to hardcode it.
 #
-# Should retrieve a copy of gfortran that is compiled from source before 
-# returning one that is installed via package manager.
-#
 # TODO:
 # - Move this to etc/environment.sh
 #
-LIBGFORTRAN=$(mdfind -onlyin /usr -name libgfortran | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
+echo "Finding libgfortran..."
+LIBGFORTRAN=$(find /usr /opt -name libgfortran* 2>/dev/null | egrep -n libgfortran.a | egrep -v i386 | sed "s/[0-9]*://g" | head -1)
 LIBGFORTRAN_ROOT=${LIBGFORTRAN%/*}
 
@@ -39,4 +37,5 @@
 # Copy custom configuration files
 cp ./configs/6/mac/cmake/ConfigUser.cmake ./src/cmake
+cp ./configs/6/mac/cmake/modules/ConfigCMake.cmake ./src/cmake/modules
 
 # Configure
@@ -57,4 +56,5 @@
 	-DCURL_LIBRARY="-L${CURL_ROOT}/lib;-lcurl" \
 	-DLAPACK_LIBRARIES="-L${LAPACK_ROOT}/lib;-lflapack;-L${LIBGFORTRAN_ROOT};-lgfortran" \
+	-DLIBGFORTRAN_ROOT="${LIBGFORTRAN_ROOT}" \
 	..
 
Index: /issm/trunk/externalpackages/hdf5/install-1-parallel-static.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1-parallel-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/hdf5/install-1-parallel-static.sh	(revision 28013)
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="1.10.9"
+
+PREFIX="${ISSM_DIR}/externalpackages/hdf5/install" # Set to location where external package should be installed
+
+## Environnment
+#
+export CC=mpicc
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/hdf5-${VER}.tar.gz" "hdf5-${VER}.tar.gz"
+
+# Untar source
+tar -zxvf hdf5-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv hdf5-${VER}/* src/
+rm -rf hdf5-${VER}
+
+# Configure
+cd src
+./configure \
+	--prefix="${PREFIX}" \
+	--disable-dependency-tracking \
+	--disable-shared \
+	--enable-parallel \
+	--with-zlib="${ZLIB_ROOT}" \
+	--enable-hl
+
+# Compile and install
+#
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/hdf5/install-1-parallel-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1-parallel-with_tests.sh	(revision 28012)
+++ /issm/trunk/externalpackages/hdf5/install-1-parallel-with_tests.sh	(revision 28013)
@@ -3,7 +3,7 @@
 
 
-# Constants
+## Constants
 #
-VER="1.14.0"
+VER="1.10.9"
 
 PREFIX="${ISSM_DIR}/externalpackages/hdf5/install" # Set to location where external package should be installed
@@ -31,4 +31,6 @@
 ./configure \
 	--prefix="${PREFIX}" \
+	--disable-dependency-tracking \
+	--disable-static \
 	--enable-parallel \
 	--with-zlib="${ZLIB_ROOT}" \
Index: /issm/trunk/externalpackages/hdf5/install-1-parallel.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1-parallel.sh	(revision 28012)
+++ /issm/trunk/externalpackages/hdf5/install-1-parallel.sh	(revision 28013)
@@ -3,7 +3,7 @@
 
 
-# Constants
+## Constants
 #
-VER="1.14.0"
+VER="1.10.9"
 
 PREFIX="${ISSM_DIR}/externalpackages/hdf5/install" # Set to location where external package should be installed
@@ -31,4 +31,6 @@
 ./configure \
 	--prefix="${PREFIX}" \
+	--disable-dependency-tracking \
+	--disable-static \
 	--enable-parallel \
 	--with-zlib="${ZLIB_ROOT}" \
Index: /issm/trunk/externalpackages/hdf5/install-1-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1-with_tests.sh	(revision 28012)
+++ /issm/trunk/externalpackages/hdf5/install-1-with_tests.sh	(revision 28013)
@@ -5,5 +5,5 @@
 ## Constants
 #
-VER="1.14.0"
+VER="1.10.9"
 
 PREFIX="${ISSM_DIR}/externalpackages/hdf5/install" # Set to location where external package should be installed
@@ -27,4 +27,6 @@
 ./configure \
 	--prefix="${PREFIX}" \
+	--disable-dependency-tracking \
+	--disable-static \
 	--with-zlib="${ZLIB_ROOT}" \
 	--enable-hl
Index: /issm/trunk/externalpackages/hdf5/install-1.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1.sh	(revision 28012)
+++ /issm/trunk/externalpackages/hdf5/install-1.sh	(revision 28013)
@@ -5,5 +5,5 @@
 ## Constants
 #
-VER="1.14.0"
+VER="1.10.9"
 
 PREFIX="${ISSM_DIR}/externalpackages/hdf5/install" # Set to location where external package should be installed
@@ -27,4 +27,6 @@
 ./configure \
 	--prefix="${PREFIX}" \
+	--disable-dependency-tracking \
+	--disable-static \
 	--with-zlib="${ZLIB_ROOT}" \
 	--enable-hl
Index: /issm/trunk/externalpackages/m1qn3/install.sh
===================================================================
--- /issm/trunk/externalpackages/m1qn3/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/m1qn3/install.sh	(revision 28013)
@@ -28,8 +28,11 @@
 if which ifort >/dev/null; then
 	FC="ifort"
+	FFLAGS="-traceback -check all" # -O2 is default 
 else
 	FC="gfortran"
 	if [ `uname` == "Darwin" ]; then
-		FC="gfortran -arch x86_64"
+		FFLAGS="-arch $(uname -m)"
+	else
+		FFLAGS=""
 	fi
 fi
@@ -41,4 +44,5 @@
 LIB_EXT=a
 FC=$FC
+FFLAGS=$FFLAGS
 install: libm1qn3.\$(LIB_EXT)
 	cp libm1qn3.\$(LIB_EXT) ${PREFIX}
@@ -60,4 +64,5 @@
 LIB_EXT=a
 FC=$FC
+FFLAGS=$FFLAGS
 install: libddot.\$(LIB_EXT)
 	cp libddot.\$(LIB_EXT) ${PREFIX}
Index: /issm/trunk/externalpackages/medipack/install.sh
===================================================================
--- /issm/trunk/externalpackages/medipack/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/medipack/install.sh	(revision 28013)
@@ -6,3 +6,3 @@
 
 #Download development version
-svn co https://github.com/SciCompKL/MeDiPack/trunk install
+git clone https://github.com/SciCompKL/MeDiPack.git install
Index: /issm/trunk/externalpackages/msmpi/install-static.sh
===================================================================
--- /issm/trunk/externalpackages/msmpi/install-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/msmpi/install-static.sh	(revision 28013)
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# Creates a local MS-MPI directory to be used for supplying MPI headers files 
+# and libraries to ISSM configuration and certain external packages.
+#
+# Assumes that Microsoft MPI and MPI SDK have been installed. To do so,
+# - Navigate to https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi
+# - Under the 'MS-MPI Downloads' heading, click the link for 
+#	'MS-MPI v<version>', where <version> is the latest version available (as of 
+#	this writing, 10.1.2)
+# - Click the 'Download' button
+# - Make sure both boxes are checked
+# - Click the 'Save File' button in each prompt
+# - When the downloads are complete, run each installer
+#
+# TODO:
+# - Commit Microsoft MPI and Microsoft SDK installers or source code to 
+#	external packages source repository, then update this documentation to note 
+#	that they are available
+# - Attempt to download Microsoft MPI and Microsoft SDK installers or source 
+#	code and (compile and) install with this script
+# - Alternatively, instruct users to install MSYS2 MinGW 64-bit MS-MPI package 
+#	with,
+#
+#		pacman -S mingw-w64-x86_64-msmpi
+#
+# remove this script, its parent directory, and references to it from 
+# configuration files in $ISSM_DIR/jenkins directory and documentation
+#
+
+
+## Constants
+#
+PREFIX="${ISSM_DIR}/externalpackages/msmpi/install"
+
+MSMPI_BIN_DIR=$(cygpath -u $(cygpath -ms "/c/Program Files/Microsoft MPI/Bin"))
+MSMPI_INC_DIR=$(cygpath -u $(cygpath -ms "/c/Program Files (x86)/Microsoft SDKs/MPI/Include"))
+MSMPI_LIB="/c/Windows/System32/msmpi.dll"
+MSMPI_LIC_DIR=$(cygpath -u $(cygpath -ms "/c/Program Files (x86)/Microsoft SDKs/MPI/License"))
+
+# Cleanup
+rm -rf ${PREFIX}
+mkdir -p ${PREFIX} ${PREFIX}/bin ${PREFIX}/include ${PREFIX}/lib ${PREFIX}/license
+
+# Copy MS-MPI binaries to 'bin' directory
+cp -R ${MSMPI_BIN_DIR}/* ${PREFIX}/bin
+
+# Copy MS-MPI header files to 'include' directory
+cp ${MSMPI_INC_DIR}/mpi.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpi.f90 ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpif.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpio.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/x64/mpifptr.h ${PREFIX}/include
+
+# Generate static copy of MS-MPI library
+gendef ${MSMPI_LIB}
+dlltool -d msmpi.def -l libmsmpi.a -D ${MSMPI_LIB}
+
+# Copy MS-MPI library to 'lib' directory
+cp libmsmpi.a ${PREFIX}/lib
+cp libmsmpi.a ${PREFIX}/lib/msmpi.a
+cp libmsmpi.a ${PREFIX}/lib/msmpi.lib
+
+# Copy MS-MPI license files to 'license' directory
+cp -R ${MSMPI_LIC_DIR}/* ${PREFIX}/license
+
+# TODO: Remove all of the following once compilation of a newer version of 
+#		PETSc that does not pick up system DLL is sorted out
+#
+
+# Copy MS-MPI library to 'lib' directory
+cp ${MSMPI_LIB} ${PREFIX}/lib
+
+# Create link to shared library so that libtool can find it
+cd ${PREFIX}/lib
+ln -s msmpi.dll libmsmpi.dll
+
Index: /issm/trunk/externalpackages/msmpi/install.sh
===================================================================
--- /issm/trunk/externalpackages/msmpi/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/msmpi/install.sh	(revision 28013)
@@ -34,17 +34,18 @@
 PREFIX="${ISSM_DIR}/externalpackages/msmpi/install"
 
+MSMPI_BIN_DIR=$(cygpath -u $(cygpath -ms "/c/Program Files/Microsoft MPI/Bin"))
+MSMPI_INC_DIR=$(cygpath -u $(cygpath -ms "/c/Program Files (x86)/Microsoft SDKs/MPI/Include"))
 MSMPI_LIB="/c/Windows/System32/msmpi.dll"
-MSMPI_INC=$(cygpath -u $(cygpath -ms "/c/Program Files (x86)/Microsoft SDKs/MPI/Include"))
 
 # Cleanup
 rm -rf ${PREFIX}
-mkdir -p ${PREFIX} ${PREFIX}/include ${PREFIX}/lib
+mkdir -p ${PREFIX} ${PREFIX}/bin ${PREFIX}/include ${PREFIX}/lib
 
 # Copy MS-MPI header files to 'include' directory
-cp ${MSMPI_INC}/mpi.h ${PREFIX}/include
-cp ${MSMPI_INC}/mpi.f90 ${PREFIX}/include
-cp ${MSMPI_INC}/mpif.h ${PREFIX}/include
-cp ${MSMPI_INC}/mpio.h ${PREFIX}/include
-cp ${MSMPI_INC}/x64/mpifptr.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpi.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpi.f90 ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpif.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/mpio.h ${PREFIX}/include
+cp ${MSMPI_INC_DIR}/x64/mpifptr.h ${PREFIX}/include
 
 # Copy MS-MPI library to 'lib' directory
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static-with_tests.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static-with_tests.sh	(revision 28013)
@@ -17,5 +17,5 @@
 # TODO:
 # - Compile and link curl statically (issue with DAP and system libs on macOS 
-#	with more restirctive Gatekeeper; see also --disable-dap option in 
+#	with more restrictive Gatekeeper; see also --disable-dap option in 
 #	configuration)
 #
@@ -32,5 +32,4 @@
 export CPPFLAGS="-I${ZLIB_ROOT}/include"
 
-export HDF5LIB="${HDF5_ROOT}/lib/libhdf5_hl.a ${HDF5_ROOT}/lib/libhdf5.a"
 export ZLIB="${ZLIB_ROOT}/lib/libz.a"
 
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel-static.sh	(revision 28013)
@@ -17,5 +17,5 @@
 # TODO:
 # - Compile and link curl statically (issue with DAP and system libs on macOS 
-#	with more restirctive Gatekeeper; see also --disable-dap option in 
+#	with more restrictive Gatekeeper; see also --disable-dap option in 
 #	configuration)
 #
@@ -32,5 +32,4 @@
 export CPPFLAGS="-I${ZLIB_ROOT}/include"
 
-export HDF5LIB="${HDF5_ROOT}/lib/libhdf5_hl.a ${HDF5_ROOT}/lib/libhdf5.a"
 export ZLIB="${ZLIB_ROOT}/lib/libz.a"
 
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh	(revision 28013)
@@ -22,6 +22,6 @@
 #
 export CC=mpicc
-export CPPFLAGS="-I${CURL_ROOT}/include -I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include"
-export LDFLAGS="-L${CURL_ROOT}/lib -L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib"
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${CURL_ROOT}/include -I${ZLIB_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${CURL_ROOT}/lib -L${ZLIB_ROOT}/lib"
 
 # Download source
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh	(revision 28013)
@@ -22,6 +22,6 @@
 #
 export CC=mpicc
-export CPPFLAGS="-I${CURL_ROOT}/include -I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include"
-export LDFLAGS="-L${CURL_ROOT}/lib -L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib"
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${CURL_ROOT}/include -I${ZLIB_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${CURL_ROOT}/lib -L${ZLIB_ROOT}/lib"
 
 # Download source
Index: /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh	(revision 28013)
@@ -21,6 +21,6 @@
 # Environment
 #
-export CPPFLAGS="-I${CURL_ROOT}/include -I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include"
-export LDFLAGS="-L${CURL_ROOT}/lib -L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib"
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${CURL_ROOT}/include -I${ZLIB_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${CURL_ROOT}/lib -L${ZLIB_ROOT}/lib"
 
 # Download source
Index: /issm/trunk/externalpackages/netcdf/install-4.7.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7.sh	(revision 28012)
+++ /issm/trunk/externalpackages/netcdf/install-4.7.sh	(revision 28013)
@@ -21,6 +21,6 @@
 # Environment
 #
-export CPPFLAGS="-I${CURL_ROOT}/include -I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include"
-export LDFLAGS="-L${CURL_ROOT}/lib -L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib"
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${CURL_ROOT}/include -I${ZLIB_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${CURL_ROOT}/lib -L${ZLIB_ROOT}/lib"
 
 # Download source
Index: sm/trunk/externalpackages/petsc/install-3.12-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-linux-static.sh	(revision 28012)
+++ 	(revision )
@@ -1,65 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR=${ISSM_DIR}/externalpackages/petsc/src # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# NOTE:
-# - Cannot use --with-fpic option when compiling static libs,
-#
-#		Cannot determine compiler PIC flags if shared libraries is turned off
-#		Either run using --with-shared-libraries or --with-pic=0 and supply the
-#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
-#
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--with-shared-libraries=0 \
-	--CFLAGS="-fPIC" \
-	--CXXFLAGS="-fPIC" \
-	--FFLAGS="-fPIC" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.12-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-linux.sh	(revision 28012)
+++ 	(revision )
@@ -1,47 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-pic=1 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.12-lonestar.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-lonestar.sh	(revision 28012)
+++ 	(revision )
@@ -1,49 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--with-mpi-dir="/opt/cray/pe/mpt/7.7.3/gni/mpich-intel/16.0/" \
-	--with-blas-lapack-dir="$TACC_MKL_LIB" \
-	--with-scalapack-include="$TACC_MKL_INC" \
-	--with-scalapack-lib="$TACC_MKL_LIB/libmkl_scalapack_lp64.so $TACC_MKL_LIB/libmkl_blacs_intelmpi_lp64.so" \
-	--with-shared-libraries=1 \
-	--known-mpi-shared-libraries=1 \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-batch  \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-mumps=1 \
-	--download-scalapack=1
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.12-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-mac-static.sh	(revision 28012)
+++ 	(revision )
@@ -1,69 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR=${ISSM_DIR}/externalpackages/petsc/src # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# NOTE:
-# - Cannot use --with-fpic option when compiling static libs,
-#
-#		Cannot determine compiler PIC flags if shared libraries is turned off
-#		Either run using --with-shared-libraries or --with-pic=0 and supply the
-#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
-#
-# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
-#	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
-# - Added -static-libgfortran to all macOS static builds, but this will not 
-#	work out of the box on Linux.
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--with-shared-libraries=0 \
-	--CFLAGS="-fPIC -Wno-error=implicit-function-declaration" \
-	--CXXFLAGS="-fPIC" \
-	--FFLAGS="-fPIC -fallow-argument-mismatch -static-libgfortran" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.12-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-mac.sh	(revision 28012)
+++ 	(revision )
@@ -1,60 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# NOTE:
-# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
-#	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--CFLAGS="-Wno-error=implicit-function-declaration" \
-	--FFLAGS="-fallow-argument-mismatch" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-pic=1 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.12-win-msys2-gcc-msmpi.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-win-msys2-gcc-msmpi.sh	(revision 28012)
+++ 	(revision )
@@ -1,65 +1,0 @@
-#!/bin/bash
-set -u # NOTE: Do not set -e as it will cause this script to fail when there are errors in underlying Python scripts
-
-
-## Constants
-#
-VER="3.12.3"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# - Added -fallow-argument-mismatch option to FFLAGS in order to clear "Error: 
-#	Rank mismatch between actual argument at [...]"
-# - Added -fallow-invalid-boz option to FFLAGS in order to clear "Error: BOZ 
-#	literal constant at [...]"
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--FFLAGS="-fallow-argument-mismatch -fallow-invalid-boz" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-pic=1 \
-	--with-mpiexec="/c/PROGRA~1/MICROS~1/Bin/mpiexec.exe" \
-	--with-mpi-lib="-L${MSMPI_ROOT}/lib -lmsmpi" \
-	--with-mpi-include="${MSMPI_ROOT}/include" \
-	--with-metis-dir=${METIS_ROOT} \
-	--with-parmetis-dir=${PARMETIS_ROOT} \
-	--with-blas-lib="-L${BLAS_ROOT}/lib -lblas" \
-	--known-64-bit-blas-indices=0 \
-	--with-lapack-lib="-L${LAPACK_ROOT}/lib -llapack" \
-	--with-scalapack-dir=${SCALAPACK_ROOT} \
-	--with-mumps-dir=${MUMPS_ROOT}
-
-# Compile and install
-make
-make install
-
-# NOTE:
-# - Hack to recover from failed installation (appears to happen only on Windows 
-#	when reproducing symbolic links in destination directory) rather than 
-#	trying to patch src/config/install.py
-#
-if [ $? -ne 0 ]; then
-	make install
-fi
Index: sm/trunk/externalpackages/petsc/install-3.13-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.13-pleiades.sh	(revision 28012)
+++ 	(revision )
@@ -1,63 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.13.6"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-echo "OK1 ($ISSM_DIR)"
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-echo OK2
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-
-# Configure
-#
-# NOTE: Based on /nasa/petsc/3.7.5/intel_mpt/lib/petsc/conf/petscvariables; look for CONFIGURE_OPTIONS
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--with-cc=icc \
-	--with-cpp=/usr/bin/cpp \
-	--with-cxx=icpc \
-	--with-fc=ifort \
-	-COPTFLAGS="-g -O3 -axCORE-AVX2,AVX -xSSE4.2" \
-	-CXXOPTFLAGS="-g -O3 -axCORE-AVX2,AVX -xSSE4.2" \
-	-FOPTFLAGS="-g -O3 -axCORE-AVX2,AVX -xSSE4.2" \
-	--with-blas-lapack-dir="/nasa/intel/Compiler/2016.2.181/compilers_and_libraries_2016.2.181/linux/mkl/" \
-	--with-scalapack-include=/nasa/intel/Compiler/2016.2.181/mkl/include \
-	--with-scalapack-lib="/nasa/intel/Compiler/2016.2.181/mkl/lib/intel64/libmkl_scalapack_lp64.so /nasa/intel/Compiler/2016.2.181/mkl/lib/intel64/libmkl_blacs_sgimpt_lp64.so" \
-	--known-mpi-shared-libraries=1 \
-	--with-gnu-compilers=0 \
-	--with-vendor-compilers=intel \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-batch=1 \
-	--with-shared-libraries=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-mumps=1 \
-	--download-zlib=1
-
-# Compile and install
-make
-make install
Index: /issm/trunk/externalpackages/petsc/install-3.14-discover.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-discover.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.14-discover.sh	(revision 28013)
@@ -35,8 +35,8 @@
 	--with-ssl=0 \
 	--with-pic=1 \
-	--with-blas-lapack-dir="/usr/local/intel/2020/compilers_and_libraries_2020.0.166/linux/mkl/" \
-	--with-cc="/usr/local/sgi/mpi/mpt-2.17/bin/mpicc" \
-	--with-cxx="/usr/local/sgi/mpi/mpt-2.17/bin/mpicxx" \
-	--with-fc="/usr/local/sgi/mpi/mpt-2.17/bin/mpif90" \
+	--with-blas-lapack-dir="/usr/local/intel/oneapi/2021/mkl/2021.4.0/" \
+	--with-cc="/usr/local/intel/oneapi/2021/mpi/2021.4.0/bin/mpicc" \
+	--with-cxx="/usr/local/intel/oneapi/2021/mpi/2021.4.0/bin/mpicxx" \
+	--with-fc="/usr/local/intel/oneapi/2021/mpi/2021.4.0/bin/mpif90" \
 	--known-mpi-shared-libraries=1 \
 	--known-64-bit-blas-indices \
Index: /issm/trunk/externalpackages/petsc/install-3.14-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-linux-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.14-linux-static.sh	(revision 28013)
@@ -0,0 +1,58 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.14.6"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd ${PETSC_DIR}
+./configure.py \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 \
+	--download-hdf5=1
+
+# Compile and install
+make
+make install
Index: sm/trunk/externalpackages/petsc/install-3.14-mac-nohdf5.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-mac-nohdf5.sh	(revision 28012)
+++ 	(revision )
@@ -1,59 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.14.6"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# NOTE:
-# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
-#	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
-#
-cd ${PETSC_DIR}
-./config/configure.py \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--CFLAGS="-Wno-error=implicit-function-declaration" \
-	--FFLAGS="-fallow-argument-mismatch" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-pic=1 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1
-
-# Compile and install
-make
-make install
Index: /issm/trunk/externalpackages/petsc/install-3.14-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-mac-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.14-mac-static.sh	(revision 28013)
@@ -0,0 +1,68 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.14.6"
+
+PETSC_DIR=${ISSM_DIR}/externalpackages/petsc/src # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Modify source so that Python 3 can be used to compile PETSc
+sed -i'' 's|#!/usr/bin/env python|#!/usr/bin/env python3|g' ${PETSC_DIR}/config/configure.py
+
+# Modify source so that Python >= 3.9 can be used to compile PETSc
+sed -i'' 's|thread.isAlive|thread.is_alive|g' ${PETSC_DIR}/config/BuildSystem/script.py
+
+# Configure
+#
+# NOTE:
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
+#	(may need to remove it for earlier versions not using the C99 standard).
+# - Added -static-libgfortran to all macOS static builds, but this will not 
+#	work out of the box on Linux.
+#
+cd ${PETSC_DIR}
+./config/configure.py \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC -Wno-error=implicit-function-declaration" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC -static-libgfortran" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5-static.sh	(revision 28013)
@@ -0,0 +1,69 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.14.6"
+
+PETSC_DIR=${ISSM_DIR}/externalpackages/petsc/src # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Modify source so that Python 3 can be used to compile PETSc
+sed -i'' 's|#!/usr/bin/env python|#!/usr/bin/env python3|g' ${PETSC_DIR}/config/configure.py
+
+# Modify source so that Python >= 3.9 can be used to compile PETSc
+sed -i'' 's|thread.isAlive|thread.is_alive|g' ${PETSC_DIR}/config/BuildSystem/script.py
+
+# Configure
+#
+# NOTE:
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
+#	(may need to remove it for earlier versions not using the C99 standard).
+# - Added -static-libgfortran to all macOS static builds, but this will not 
+#	work out of the box on Linux.
+#
+cd ${PETSC_DIR}
+./config/configure.py \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC -Wno-error=implicit-function-declaration" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC -static-libgfortran" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 \
+	--download-hdf5=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.14-mac-with-hdf5.sh	(revision 28013)
@@ -0,0 +1,59 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.14.6"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Modify source so that Python 3 can be used to compile PETSc
+sed -i'' 's|#!/usr/bin/env python|#!/usr/bin/env python3|g' ${PETSC_DIR}/config/configure.py
+
+# Modify source so that Python >= 3.9 can be used to compile PETSc
+sed -i'' 's|thread.isAlive|thread.is_alive|g' ${PETSC_DIR}/config/BuildSystem/script.py
+
+# Configure
+#
+# NOTE:
+# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
+#	(may need to remove it for earlier versions not using the C99 standard).
+#
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--CFLAGS="-Wno-error=implicit-function-declaration" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 \
+	--download-hdf5=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.14-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.14-mac.sh	(revision 28013)
@@ -24,4 +24,10 @@
 rm -rf petsc-${VER}
 
+# Modify source so that Python 3 can be used to compile PETSc
+sed -i'' 's|#!/usr/bin/env python|#!/usr/bin/env python3|g' ${PETSC_DIR}/config/configure.py
+
+# Modify source so that Python >= 3.9 can be used to compile PETSc
+sed -i'' 's|thread.isAlive|thread.is_alive|g' ${PETSC_DIR}/config/BuildSystem/script.py
+
 # Configure
 #
@@ -29,17 +35,10 @@
 # - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
 #	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
 #
 cd ${PETSC_DIR}
-./config/configure.py \
+./configure \
 	--prefix="${PREFIX}" \
 	--PETSC_DIR="${PETSC_DIR}" \
 	--CFLAGS="-Wno-error=implicit-function-declaration" \
-	--FFLAGS="-fallow-argument-mismatch" \
 	--with-debugging=0 \
 	--with-valgrind=0 \
@@ -53,6 +52,5 @@
 	--download-scalapack=1 \
 	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
+	--download-zlib=1
 
 # Compile and install
Index: /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi-static.sh	(revision 28013)
@@ -0,0 +1,88 @@
+#!/bin/bash
+set -u # NOTE: Do not set -e as it will cause this script to fail when there are errors in underlying Python scripts
+
+# NOTE:
+# - You must install various needed packages with,
+#
+#		pacman -S mingw-w64-x86_64-toolchain python
+#
+# - You must use MSYS2 MinGW 64-bit version of cmake to be able to install 
+#	external packages correctly,
+#
+#		pacman -R mingw-w64-x86_64-cmake
+#
+# Sources:
+# - https://gitlab.com/petsc/petsc/-/issues/820#note_487483240
+#
+
+## Constants
+#
+VER="3.14.6"
+
+MAKEFILE_GENERATOR='-G "MSYS Makefiles"'
+PETSC_ARCH="arch-mswin-c-opt"
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Patch source
+sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/python3|' ${PETSC_DIR}/config/configure.py
+sed -i 's|  chkusingwindowspython()|#  chkusingwindowspython()|' ${PETSC_DIR}/config/configure.py
+sed -i 's|(MAKEFLAGS)|(MAKEFLAGS:w=)|' ${PETSC_DIR}/makefile ${PETSC_DIR}/lib/petsc/conf/rules # Fix for issue with GNUMake 4.4.1 (https://gitlab.com/petsc/petsc/-/merge_requests/6140)
+
+# Configure
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+# - Added -fallow-argument-mismatch option to FFLAGS in order to clear "Error: 
+#	Rank mismatch between actual argument at [...]"
+# - Added -fallow-invalid-boz option to FFLAGS in order to clear "Error: BOZ 
+#	literal constant at [...]"
+# - Argument to --with-mpi-include must be a list or it gets expanded 
+#	incorrectly
+#
+cd ${PETSC_DIR}
+./config/configure.py \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--PETSC_ARCH="${PETSC_ARCH}" \
+	--CFLAGS="-fPIC -Wl,-static -Wno-error=implicit-function-declaration" \
+	--CXXFLAGS="-fPIC -Wl,-static" \
+	--FFLAGS="-fPIC -Wl,-static -fallow-argument-mismatch -fallow-invalid-boz" \
+	--with-shared-libraries=0 \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-proc-filesystem=0 \
+	--with-mpiexec="${MPIEXEC_DIR}/mpiexec.exe" \
+	--with-mpi-lib="-L${MSMPI_ROOT}/lib -lmsmpi" \
+	--with-mpi-include="${MSMPI_ROOT}/include" \
+	--download-fblaslapack=1 \
+	--download-metis=1 \
+	--download-metis-cmake-arguments="${MAKEFILE_GENERATOR}" \
+	--download-parmetis=1 \
+	--download-parmetis-cmake-arguments="${MAKEFILE_GENERATOR}" \
+	--download-scalapack=1 \
+	--download-scalapack-cmake-arguments="${MAKEFILE_GENERATOR}" \
+	--download-mumps=1
+
+# Compile and install
+make PETSC_DIR="${PETSC_DIR}" PETSC_ARCH="${PETSC_ARCH}" all
+make PETSC_DIR="${PETSC_DIR}" PETSC_ARCH="${PETSC_ARCH}" install
Index: /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.14-win-msys2-mingw-msmpi.sh	(revision 28013)
@@ -21,4 +21,5 @@
 
 MAKEFILE_GENERATOR='-G "MSYS Makefiles"'
+PETSC_ARCH="arch-mswin-c-opt"
 PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
 PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
@@ -38,6 +39,8 @@
 rm -rf petsc-${VER}
 
-# Copy customized source files to $PETSC_DIR
-cp configs/3.14/win/msys2/mingw64/config/configure.py ${PETSC_DIR}/config
+# Patch source
+sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/python3|' ${PETSC_DIR}/config/configure.py
+sed -i 's|  chkusingwindowspython()|#  chkusingwindowspython()|' ${PETSC_DIR}/config/configure.py
+sed -i 's|(MAKEFLAGS)|(MAKEFLAGS:w=)|' ${PETSC_DIR}/makefile ${PETSC_DIR}/lib/petsc/conf/rules # Fix for issue with GNUMake 4.4.1 (https://gitlab.com/petsc/petsc/-/merge_requests/6140)
 
 # Configure
@@ -59,4 +62,5 @@
 	--prefix="${PREFIX}" \
 	--PETSC_DIR="${PETSC_DIR}" \
+	--PETSC_ARCH="${PETSC_ARCH}" \
 	--CFLAGS="-fPIC -Wno-error=implicit-function-declaration" \
 	--CXXFLAGS="-fPIC" \
@@ -81,4 +85,4 @@
 
 # Compile and install
-make
-make install
+make PETSC_DIR="${PETSC_DIR}" PETSC_ARCH="${PETSC_ARCH}" all
+make PETSC_DIR="${PETSC_DIR}" PETSC_ARCH="${PETSC_ARCH}" install
Index: /issm/trunk/externalpackages/petsc/install-3.15-babylon.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.15-babylon.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.15-babylon.sh	(revision 28013)
@@ -41,6 +41,5 @@
 	--download-scalapack=1 \
 	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
+	--download-zlib=1
 
 # Compile and install
Index: /issm/trunk/externalpackages/petsc/install-3.16-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.16-linux.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.16-linux.sh	(revision 28013)
@@ -29,10 +29,4 @@
 # - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
 #	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
 #
 cd ${PETSC_DIR}
@@ -41,5 +35,4 @@
 	--PETSC_DIR="${PETSC_DIR}" \
 	--CFLAGS="-Wno-error=implicit-function-declaration" \
-	--FFLAGS="-fallow-argument-mismatch" \
 	--with-debugging=0 \
 	--with-valgrind=0 \
@@ -53,6 +46,5 @@
 	--download-scalapack=1 \
 	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
+	--download-zlib=1
 
 # Compile and install
Index: /issm/trunk/externalpackages/petsc/install-3.16-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.16-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.16-mac.sh	(revision 28013)
@@ -29,10 +29,4 @@
 # - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
 #	(may need to remove it for earlier versions not using the C99 standard).
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
 #
 cd ${PETSC_DIR}
@@ -41,5 +35,4 @@
 	--PETSC_DIR="${PETSC_DIR}" \
 	--CFLAGS="-Wno-error=implicit-function-declaration" \
-	--FFLAGS="-fallow-argument-mismatch" \
 	--with-debugging=0 \
 	--with-valgrind=0 \
@@ -53,6 +46,5 @@
 	--download-scalapack=1 \
 	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
+	--download-zlib=1
 
 # Compile and install
Index: /issm/trunk/externalpackages/petsc/install-3.17-discovery.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-discovery.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.17-discovery.sh	(revision 28013)
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -eu
+
+## Constants
+#
+VER="3.17.4"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+cd ${PETSC_DIR}
+./config/configure.py \
+	COPTFLAGS="-g -O3" CXXOPTFLAGS="-g -O3" FOPTFLAGS="-g -O3" \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-make-np=20 \
+	--with-blas-lapack-dir=$MKL_ROOT \
+	--with-mpi-dir="/optnfs/el7/mpich/3.3-intel19.3/" \
+	--known-mpi-shared-libraries=1 \
+	--known-mpi-long-double=1 \
+	--known-mpi-int64_t=1 \
+	--known-mpi-c-double-complex=1 \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.17-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-linux-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.17-linux-static.sh	(revision 28013)
@@ -0,0 +1,57 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.17.4"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.17-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-linux.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.17-linux.sh	(revision 28013)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+## Constants
+VER="3.17.4"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.17-mac-intel.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-mac-intel.sh	(revision 28012)
+++ /issm/trunk/externalpackages/petsc/install-3.17-mac-intel.sh	(revision 28013)
@@ -25,8 +25,14 @@
 
 # Configure
+#
+# NOTE:
+# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
+#	(may need to remove it for earlier versions not using the C99 standard).
+#
 cd ${PETSC_DIR}
 ./configure \
 	--prefix="${PREFIX}" \
 	--PETSC_DIR="${PETSC_DIR}" \
+	--CFLAGS="-Wno-error=implicit-function-declaration" \
 	--with-debugging=0 \
 	--with-valgrind=0 \
@@ -40,6 +46,5 @@
 	--download-scalapack=1 \
 	--download-mumps=1 \
-	--download-zlib=1 \
-	--download-hdf5=1
+	--download-zlib=1
 
 # Compile and install
Index: sm/trunk/externalpackages/petsc/install-3.17-mac-m1-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-mac-m1-static.sh	(revision 28012)
+++ 	(revision )
@@ -1,64 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.17.4"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-#
-# NOTE:
-# - Cannot use --with-fpic option when compiling static libs,
-#
-#		Cannot determine compiler PIC flags if shared libraries is turned off
-#		Either run using --with-shared-libraries or --with-pic=0 and supply the
-#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
-#
-# - Added -fallow-argument-mismatch to FFLAGS in order to clear,
-#
-#		error: The Fortran compiler gfortran will not compile files that call 
-#		the same routine with arguments of different types.
-#
-#	for gfortran 10 or later (may need to remove it for earlier versions).
-cd ${PETSC_DIR}
-./configure \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--LDFLAGS="-Wl,-no_compact_unwind" \
-	--with-shared-libraries=0 \
-	--CFLAGS="-fPIC" \
-	--CXXFLAGS="-fPIC" \
-	--FFLAGS="-fPIC" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 
-
-# Compile and install
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.17-mac-m1.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-mac-m1.sh	(revision 28012)
+++ 	(revision )
@@ -1,47 +1,0 @@
-#!/bin/bash
-set -eu
-
-
-## Constants
-#
-VER="3.17.4"
-
-PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
-PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
-
-# Download source
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
-
-# Unpack source
-tar -zxvf petsc-${VER}.tar.gz
-
-# Cleanup
-rm -rf ${PREFIX} ${PETSC_DIR}
-mkdir -p ${PETSC_DIR}
-
-# Move source to $PETSC_DIR
-mv petsc-${VER}/* ${PETSC_DIR}
-rm -rf petsc-${VER}
-
-# Configure
-cd ${PETSC_DIR}
-./configure \
-	--prefix="${PREFIX}" \
-	--PETSC_DIR="${PETSC_DIR}" \
-	--LDFLAGS="-Wl,-no_compact_unwind" \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-pic=1 \
-	--download-fblaslapack=1 \
-	--download-mpich=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--download-zlib=1 
-
-# Compile and install
-make
-make install
Index: /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon-static.sh	(revision 28013)
@@ -0,0 +1,57 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.17.4"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.17-mac-silicon.sh	(revision 28013)
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.17.4"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.19-discovery.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.19-discovery.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.19-discovery.sh	(revision 28013)
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -eu
+
+## Constants
+#
+VER="3.19.1"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+cd ${PETSC_DIR}
+./config/configure.py \
+	COPTFLAGS="-g -O3" CXXOPTFLAGS="-g -O3" FOPTFLAGS="-g -O3" \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--with-make-np=20 \
+	--with-blas-lapack-dir=$MKL_ROOT \
+	--with-mpi-dir="/optnfs/el7/mpich/3.3-intel19.3/" \
+	--known-mpi-shared-libraries=1 \
+	--known-mpi-long-double=1 \
+	--known-mpi-int64_t=1 \
+	--known-mpi-c-double-complex=1 \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.19-mac-intel.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.19-mac-intel.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.19-mac-intel.sh	(revision 28013)
@@ -0,0 +1,54 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.19.5"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Added -Wno-error=implicit-function-declaration to CFLAGS for Clang >= 12. 
+#	(may need to remove it for earlier versions not using the C99 standard).
+#
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--CFLAGS="-Wno-error=implicit-function-declaration" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-cmake=1 \
+	--download-zlib=1
+
+# Compile and install
+make
+make install
+
Index: /issm/trunk/externalpackages/petsc/install-3.20-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.20-pleiades.sh	(revision 28013)
+++ /issm/trunk/externalpackages/petsc/install-3.20-pleiades.sh	(revision 28013)
@@ -0,0 +1,56 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.20.1"
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" # DO NOT CHANGE THIS
+PREFIX="${ISSM_DIR}/externalpackages/petsc/install" # Set to location where external package should be installed
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://web.cels.anl.gov/projects/petsc/download/release-snapshots/petsc-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf ${PREFIX} ${PETSC_DIR}
+mkdir -p ${PETSC_DIR}
+
+# Move source to $PETSC_DIR
+mv petsc-${VER}/* ${PETSC_DIR}
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Options from,
+#
+# 		cat /nasa/petsc/3.14.5_toss3/lib/petsc/conf/petscvariables | grep CONF
+#
+cd ${PETSC_DIR}
+./configure \
+	--prefix="${PREFIX}" \
+	--PETSC_DIR="${PETSC_DIR}" \
+	--CFLAGS="-g -O3" \
+	--CXXFLAGS="-g -O3" \
+	--FFLAGS="-g -O3" \
+	--with-make-np=10 \
+	--with-batch=1 \
+	--with-pic=1 \
+	--with-shared-libraries=1 \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-scalapack=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-mumps=1 
+
+# Compile and install
+make
+make install
Index: sm/trunk/externalpackages/petsc/install-3.6-win10.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.6-win10.sh	(revision 28012)
+++ 	(revision )
@@ -1,41 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf install petsc-3.6.2 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.6.2.tar.gz' 'petsc-3.6.2.tar.gz'
-
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.6.2.tar.gz
-mv petsc-3.6.2/* src/
-rm -rf petsc-3.6.2
-
-export PETSC_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/src"`
-export PREFIX_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/install"`
-
-#configure
-cd src
-./config/configure.py  \
-	--with-parallel-no \
-	--prefix=$PREFIX_DIR \
-	--PETSC_ARCH=cygwin-intel \
-	--PETSC_DIR=$PETSC_DIR \
-	--with-mpi=0 \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-f2cblaslapack=yes \
-	--with-cc='win32fe cl' \
-	--with-fc=0 \
-	--with-cxx='win32fe cl' \
-	--with-clanguage=cxx 
-
-#Compile petsc and install it
-make
-make install
-
-patch ../install/include/petscfix.h ../configs/3.1/win7/petscfix.h.patch
Index: sm/trunk/externalpackages/petsc/install-3.7-discover.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-discover.sh	(revision 28012)
+++ 	(revision )
@@ -1,62 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf install petsc-3.7.6 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.7.6.tar.gz' 'petsc-3.7.6.tar.gz'
-
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.7.6.tar.gz
-mv petsc-3.7.6/* src/
-rm -rf petsc-3.7.6
-
-#configure
-cd src
-./config/configure.py \
-	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-	--with-blas-lapack-dir="/usr/local/intel/2020/compilers_and_libraries_2020.0.166/linux/mkl/" \
-	--with-mpi-lib="/usr/local/sgi/mpi/mpt-2.17/lib/libmpi.so" \
-	--with-mpi-include="/usr/local/sgi/mpi/mpt-2.17/include/" \
-	--known-mpi-shared-libraries=1 \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-batch=1  \
-	--with-shared-libraries=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-mumps=1 \
-	--download-scalapack=1 
-
-#prepare script to reconfigure petsc
-cat > script.queue << EOF
-#!/bin/bash
-#SBATCH -J petscinstall # Job Name
-#SBATCH -N 1
-#SBATCH -n 1
-#SBATCH -A s1690
-#SBATCH -t 00:01:00 # Run time (hh:mm:ss) - 1.5 hours
-#SBATCH --qos=debug
-#SBATCH -o petscinstall.outlog
-#SBATCH -e petscinstall.errlog
-
-. /usr/share/modules/init/bash
-module purge
-module load comp/intel/20.0.0.166
-module load mpi/sgi-mpt/2.17
-module load cmake/3.17.0
-
-export PATH="$PATH:."
-export MPI_GROUP_MAX=64
-mpiexec -np 1 ./conftest-arch-linux2-c-opt
-EOF
-
-#print instructions
-echo "== Now: cd src/ "
-echo "== sbatch script.queue "
-echo "== Then run reconfigure script generated by PETSc and follow instructions"
Index: sm/trunk/externalpackages/petsc/install-dev-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-dev-linux64.sh	(revision 28012)
+++ 	(revision )
@@ -1,49 +1,0 @@
-#!/bin/bash
-
-STEP=2
-
-if [ $STEP -eq 1 ]; then
-	rm -rf src
-	git clone https://bitbucket.org/petsc/petsc src
-fi
-
-# To update 
-#      cd src
-#      git pull
-
-# configure script
-# Note: 
-# SuperLU: -If download-..-=yes does not work try downloading from
-#    --download-superlu=http://crd.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz \
-
-
-if [ $STEP -eq 2 ]; then
-	rm -rf install
-	cd src
-	./configure \
-		--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-		--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
-		--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-		--with-debugging=1 \
-		--with-valgrind=0 \
-		--with-x=0 \
-		--with-ssl=0 \
-		--with-shared-libraries=1 \
-		--download-metis=1 \
-		--download-parmetis=1 \
-		--download-mumps=1 \
-		--download-scalapack=1 \
-		--download-fblaslapack=1 \
-		--with-pic=1
-
-#	--download-trilinos=yes \
-#	--download-euclid=yes \
-#	--download-spooles=yes \
-#	--download-spai=yes \
-#	--download-superlu=http://crd.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz \
-#	--download-hypre=yes \
-
-	#Compile petsc and install it
-	make
-	make install
-fi
Index: sm/trunk/externalpackages/petsc/install-dev-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-dev-pleiades.sh	(revision 28012)
+++ 	(revision )
@@ -1,78 +1,0 @@
-#!/bin/bash
-#Step 0: download
-#Step 1: install and write script
-STEP=1
-
-if [ $STEP -eq 0 ]; then
-	# Adapted from petsc 3.2. 
-	# Used Mercurial to get code
-	rm -rf src
-	hg clone http://petsc.cs.iit.edu/petsc/petsc-dev src
-	cd src
-	hg clone http://petsc.cs.iit.edu/petsc/BuildSystem config/BuildSystem
-fi
-
-# To update (via Mercurial):
-#      cd petsc-dev
-#      hg pull -u
-#      cd config/BuildSystem
-#      hg pull -u
-
-# configure script
-# Note: using metis from externalpackages did not work...
-# for now downloading new metis
-#   -then rename metis in externalpackages to metis2
-#
-#		--download-prometheus=yes \
-#--download-superlu=http://crd.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz \
-#		--download-plapack=yes \
-
-if [ $STEP -eq 1 ]; then
-
-	#configure
-	cd src
-	./config/configure.py \
-		--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-		--with-batch=1  \
-		--PETSC_ARCH="$ISSM_ARCH" \
-		--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-		--with-debugging=0 \
-		--with-shared-libraries=1 \
-		--with-blas-lapack-dir=/nasa/intel/mkl/10.0.011/ \
-		--known-mpi-shared-libraries=1 \
-		--with-mpi=1 \
-		--download-mumps=yes \
-		--download-scalapack=yes \
-		--download-blacs=yes  \
-		--download-blas=yes \
-		--download-f-blas-lapack=yes \
-		--download-parmetis=yes \
-		--download-metis=yes \
-		--download-trilinos=yes \
-		--download-euclid=yes \
-		--download-spai=yes \
-		--download-superlu=http://crd.lbl.gov/~xiaoye/SuperLU/superlu_4.3.tar.gz \
-		--download-hypre=yes \
-		--with-cxx=/nasa/sgi/mpt/2.06a67/bin/mpicxx \
-		--with-fc=/nasa/sgi/mpt/2.06a67/bin/mpif90 \
-		--COPTFLAGS="-lmpi -O3" \
-		--FOPTFLAGS="-lmpi -O3" \
-		--CXXOPTFLAGS="-lmpi -O3" 
-		cat > script.queue << EOF
-#PBS -S /bin/bash
-#PBS -l select=1:ncpus=1:model=wes 
-#PBS -l walltime=200 
-#PBS -W group_list=s1010 
-#PBS -m e 
-. /usr/share/modules/init/bash 
-module load comp-intel/2012.0.032 
-module load mpi-sgi/mpt.2.06a67  
-module load math/intel_mkl_64_10.0.011 
-export PATH="$PATH:." 
-export MPI_GROUP_MAX=64 
-mpiexec -np 1 ./conftest-linux-gnu-ia64-intel.py
-EOF
-	echo "== Now: cd src/ "
-	echo "== qsub -q devel script.queue "
-	echo "== Then run reconfigure script generated by PETSc and follow instructions"
-fi
Index: sm/trunk/externalpackages/petsc/install-dev-win10-par.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-dev-win10-par.sh	(revision 28012)
+++ 	(revision )
@@ -1,35 +1,0 @@
-#!/bin/bash
-
-STEP=1
-
-rm -rf src
-
-if [ $STEP -eq 1 ]; then
-	git clone -b maint https://bitbucket.org/petsc/petsc src
-fi
-
-export PETSC_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/src"`
-export PREFIX_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/install"`
-
-#configure
-cd src
-./config/configure.py  \
-	--prefix=$PREFIX_DIR \
-	--PETSC_ARCH=cygwin-intel \
-	--PETSC_DIR=$PETSC_DIR \
-	--with-mpi-dir="/cygdrive/c/Programs/MPICH2/"\
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-f2cblaslapack=yes \
-	--with-cc='win32fe cl' \
-	--with-fc=0 \
-	--with-cxx='win32fe cl' \
-	--with-clanguage=cxx 
-
-#Compile petsc and install it
-make
-make install
-
-patch ../install/include/petscfix.h ../configs/3.1/win7/petscfix.h.patch
Index: sm/trunk/externalpackages/petsc/install-dev-win10.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-dev-win10.sh	(revision 28012)
+++ 	(revision )
@@ -1,34 +1,0 @@
-#!/bin/bash
-
-STEP=1
-
-if [ $STEP -eq 1 ]; then
-	git clone -b maint https://bitbucket.org/petsc/petsc src
-fi
-
-export PETSC_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/src"`
-export PREFIX_DIR=`cygpath -u "$ISSM_DIR/externalpackages/petsc/install"`
-
-#configure
-cd src
-./config/configure.py  \
-	--with-parallel-no \
-	--prefix=$PREFIX_DIR \
-	--PETSC_ARCH=cygwin-intel \
-	--PETSC_DIR=$PETSC_DIR \
-	--with-mpi=0 \
-	--with-debugging=0 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--download-f2cblaslapack=yes \
-	--with-cc='win32fe cl' \
-	--with-fc=0 \
-	--with-cxx='win32fe cl' \
-	--with-clanguage=cxx 
-
-#Compile petsc and install it
-make
-make install
-
-patch ../install/include/petscfix.h ../configs/3.1/win7/petscfix.h.patch
Index: /issm/trunk/externalpackages/semic/install.sh
===================================================================
--- /issm/trunk/externalpackages/semic/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/semic/install.sh	(revision 28013)
@@ -16,10 +16,9 @@
 if which ifort >/dev/null; then
 	FC="ifort"
-	FFLAGS="-traceback -check all" #-O2 is default 
+	FFLAGS="-traceback -check all" # -O2 is default 
 else
 	FC="gfortran"
 	if [ `uname` == "Darwin" ]; then
-		FC="gfortran -arch x86_64"
-		FFLAGS="-fcheck=all"
+		FFLAGS="-fcheck=all -arch $(uname -m)"
 	else
 		FFLAGS=""
Index: /issm/trunk/externalpackages/sqlite/install-static.sh
===================================================================
--- /issm/trunk/externalpackages/sqlite/install-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/sqlite/install-static.sh	(revision 28013)
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="3300100"
+
+PREFIX="${ISSM_DIR}/externalpackages/sqlite/install" # Set to location where external package should be installed
+
+# Environment
+#
+export CFLAGS="-DSQLITE_ENABLE_COLUMN_METADATA=1"
+
+# Cleanup
+rm -rf ${PREFIX} src
+mkdir -p ${PREFIX} src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/sqlite-autoconf-${VER}.tar.gz" "sqlite-autoconf-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf sqlite-autoconf-${VER}.tar.gz
+
+# Move source into 'src' directory
+mv sqlite-autoconf-${VER}/* src
+rm -rf sqlite-autoconf-${VER}
+
+# Configure
+cd src
+./configure \
+	--prefix="${PREFIX}" \
+	--enable-fast-install \
+	--enable-shared=no
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/sqlite/install.sh
===================================================================
--- /issm/trunk/externalpackages/sqlite/install.sh	(revision 28012)
+++ /issm/trunk/externalpackages/sqlite/install.sh	(revision 28013)
@@ -30,5 +30,7 @@
 cd src
 ./configure \
-	--prefix="${PREFIX}"
+	--prefix="${PREFIX}" \
+	--enable-fast-install \
+	--enable-static=no
 
 # Compile and install
Index: /issm/trunk/externalpackages/triangle/install-win-msys2-mingw-static.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-win-msys2-mingw-static.sh	(revision 28013)
+++ /issm/trunk/externalpackages/triangle/install-win-msys2-mingw-static.sh	(revision 28013)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+export PREFIX="${ISSM_DIR}/externalpackages/triangle/install" # Set to location where external package should be installed
+
+# Cleanup
+rm -rf ${PREFIX} src
+mkdir -p ${PREFIX} ${PREFIX}/include ${PREFIX}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/win/msys2/mingw64/configure.make src
+
+# Compile
+cd src
+make static
+
+# Install
+cd ..
+cp src/libtriangle.* ${PREFIX}/lib
+cp src/triangle.h ${PREFIX}/include
+
+# Cleanup
+rm -rf src
Index: /issm/trunk/externalpackages/valgrind/install-linux.sh
===================================================================
--- /issm/trunk/externalpackages/valgrind/install-linux.sh	(revision 28012)
+++ /issm/trunk/externalpackages/valgrind/install-linux.sh	(revision 28013)
@@ -3,15 +3,20 @@
 
 
+## Constants
+#
+PREFIX="${ISSM_DIR}/externalpackages/valgrind/install" # Set to location where external package should be installed
+
 # Clean up
-rm -rf install
+rm -rf ${PREFIX} src
 
-# Download development version
-git clone git://sourceware.org/git/valgrind.git install
+# Download development version (the current release never supports the latest 
+# OS X releases)
+git clone git://sourceware.org/git/valgrind.git src
 
 # Configure
-cd install
+cd src
 ./autogen.sh
 ./configure \
-	--prefix="$ISSM_DIR/externalpackages/valgrind/install" \
+	--prefix="${PREFIX}" \
 	--enable-only64bit
 
Index: /issm/trunk/externalpackages/valgrind/install-mac.sh
===================================================================
--- /issm/trunk/externalpackages/valgrind/install-mac.sh	(revision 28012)
+++ /issm/trunk/externalpackages/valgrind/install-mac.sh	(revision 28013)
@@ -8,12 +8,12 @@
 
 # Clean up
-rm -rf ${PREFIX}
+rm -rf ${PREFIX} src
 
 # Download development version (the current release never supports the latest 
 # OS X releases)
-git clone git://sourceware.org/git/valgrind.git ${PREFIX}
+git clone git://sourceware.org/git/valgrind.git src
 
 # Configure
-cd install
+cd src
 ./autogen.sh
 ./configure \
Index: /issm/trunk/externalpackages/vim/addons/vimrc
===================================================================
--- /issm/trunk/externalpackages/vim/addons/vimrc	(revision 28012)
+++ /issm/trunk/externalpackages/vim/addons/vimrc	(revision 28013)
@@ -208,4 +208,5 @@
 au BufRead,BufNewFile *.html   iabbr <silent> CO <code></code><Left><Left><Left><Left><Left><Left><C-R>=Eatchar('\s')<CR>
 au BufRead,BufNewFile *.html   iabbr <silent> PP <p></p><Left><Left><Left><Left><C-R>=Eatchar('\s')<CR>
+au BufRead,BufNewFile *.jl     iabbr <silent> p1  println("")<Left><Left><C-R>=Eatchar('\s')<CR>
 au BufRead,BufNewFile *.m      iabbr <silent> p1  disp('');<Left><Left><Left><C-R>=Eatchar('\s')<CR>
 au BufRead,BufNewFile *.m      iab <expr> p0  "disp('-------------- file: ".expand('%')." line: ".line(".")."');"
@@ -217,4 +218,5 @@
 au BufRead,BufNewFile *.c*     ab VV VecView(ug,PETSC_VIEWER_STDOUT_WORLD);
 au BufRead,BufNewFile *.c*,*.h ab AS _assert_();<Left><Left><C-R>=Eatchar('\s')<CR>
+au BufRead,BufNewFile *jl      iab <expr> p0  "print(\"-------------- file: ".expand('%')." line: $(@__LINE__)\\n\")"
 au BufRead,BufNewFile *.c*,*.h iab <expr> p0  "printf(\"-------------- file: ".expand('%')." line: %i\\n\",__LINE__);"
 au BufRead,BufNewFile *.c*,*.h iab <expr> pp0 "PetscSynchronizedPrintf(MPI_COMM_WORLD,\"-------------- file: ".expand('%')." line: %i\\n\",__LINE__);\nPetscSynchronizedFlush(MPI_COMM_WORLD);"
Index: /issm/trunk/jenkins/aws-amazon_linux-solid_earth
===================================================================
--- /issm/trunk/jenkins/aws-amazon_linux-solid_earth	(revision 28012)
+++ /issm/trunk/jenkins/aws-amazon_linux-solid_earth	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2020a"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2020a"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -34,7 +35,7 @@
 	autotools		install-linux.sh
 	cmake			install.sh
-	petsc			install-3.12-linux.sh
+	petsc			install-3.14-linux.sh
 	triangle		install-linux.sh
-	chaco			install.sh
+	chaco			install-linux.sh
 	m1qn3			install.sh
 	semic			install.sh
Index: /issm/trunk/jenkins/eis-daskhub-python-modules
===================================================================
--- /issm/trunk/jenkins/eis-daskhub-python-modules	(revision 28013)
+++ /issm/trunk/jenkins/eis-daskhub-python-modules	(revision 28013)
@@ -0,0 +1,83 @@
+# NOTE: This configuration adds solid earth and Dakota capabilities to the 
+#		basic build.
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix="${ISSM_DIR}" \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-python-dir="/srv/conda/envs/notebook" \
+	--with-python-version="3.9" \
+	--with-python-numpy-dir="/srv/conda/envs/notebook/lib/python3.9/site-packages/numpy/core/include/numpy" \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include="${ISSM_EXT_DIR}/petsc/install/include" \
+	--with-mpi-libflags="-L${ISSM_EXT_DIR}/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-metis-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-parmetis-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-scalapack-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-mumps-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-hdf5-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-petsc-dir="${ISSM_EXT_DIR}/petsc/install" \
+	--with-gsl-dir="${ISSM_EXT_DIR}/gsl/install" \
+	--with-boost-dir="${ISSM_EXT_DIR}/boost/install" \
+	--with-dakota-dir="${ISSM_EXT_DIR}/dakota/install" \
+	--with-proj-dir="${ISSM_EXT_DIR}/proj/install" \
+	--with-triangle-dir="${ISSM_EXT_DIR}/triangle/install" \
+	--with-chaco-dir="${ISSM_EXT_DIR}/chaco/install" \
+	--with-m1qn3-dir="${ISSM_EXT_DIR}/m1qn3/install" \
+	--with-semic-dir="${ISSM_EXT_DIR}/semic/install" \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-linux.sh
+	cmake		install.sh
+	petsc		install-3.16-linux.sh
+	gsl			install.sh
+	boost		install-1.7-linux.sh
+	dakota		install-6.2-linux.sh
+	curl		install-7-linux.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-linux.sh
+	gmsh		install-4-linux.sh
+	triangle	install-linux.sh
+	chaco		install-linux.sh
+	m1qn3		install.sh
+	semic		install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/eis-daskhub-python-nodules
===================================================================
--- /issm/trunk/jenkins/eis-daskhub-python-nodules	(revision 28012)
+++ 	(revision )
@@ -1,83 +1,0 @@
-# NOTE: This configuration adds solid earth and Dakota capabilities to the 
-#		basic build.
-
-#--------------------#
-# ISSM Configuration #
-#--------------------#
-
-ISSM_CONFIG='\
-	--prefix="${ISSM_DIR}" \
-	--disable-static \
-	--enable-development \
-	--enable-debugging \
-	--with-numthreads=4 \
-	--with-python-dir="/srv/conda/envs/notebook" \
-	--with-python-version="3.9" \
-	--with-python-numpy-dir="/srv/conda/envs/notebook/lib/python3.9/site-packages/numpy/core/include/numpy" \
-	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
-	--with-mpi-include="${ISSM_EXT_DIR}/petsc/install/include" \
-	--with-mpi-libflags="-L${ISSM_EXT_DIR}/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-blas-lapack-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-metis-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-parmetis-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-scalapack-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-mumps-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-hdf5-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-petsc-dir="${ISSM_EXT_DIR}/petsc/install" \
-	--with-gsl-dir="${ISSM_EXT_DIR}/gsl/install" \
-	--with-boost-dir="${ISSM_EXT_DIR}/boost/install" \
-	--with-dakota-dir="${ISSM_EXT_DIR}/dakota/install" \
-	--with-proj-dir="${ISSM_EXT_DIR}/proj/install" \
-	--with-triangle-dir="${ISSM_EXT_DIR}/triangle/install" \
-	--with-chaco-dir="${ISSM_EXT_DIR}/chaco/install" \
-	--with-m1qn3-dir="${ISSM_EXT_DIR}/m1qn3/install" \
-	--with-semic-dir="${ISSM_EXT_DIR}/semic/install" \
-'
-
-#-------------------#
-# External Packages #
-#-------------------#
-
-EXTERNALPACKAGES="
-	autotools	install-linux.sh
-	cmake		install.sh
-	petsc		install-3.16-linux.sh
-	gsl			install.sh
-	boost		install-1.7-linux.sh
-	dakota		install-6.2-linux.sh
-	curl		install-7-linux.sh
-	netcdf		install-4.7-parallel.sh
-	proj		install-6.sh
-	gdal		install-3-python.sh
-	gshhg		install.sh
-	gmt			install-6-linux.sh
-	gmsh		install-4-linux.sh
-	triangle	install-linux.sh
-	chaco		install.sh
-	m1qn3		install.sh
-	semic		install.sh
-"
-
-#---------#
-# Testing #
-#---------#
-
-# Test suites
-MATLAB_TEST=0
-PYTHON_TEST=0
-JAVASCRIPT_TEST=0
-EXAMPLES_TEST=0
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation
-#
-NUMCPUS_INSTALL=8
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=1
-
-# Nightly run options
-MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/eis-smce-binaries
===================================================================
--- /issm/trunk/jenkins/eis-smce-binaries	(revision 28012)
+++ /issm/trunk/jenkins/eis-smce-binaries	(revision 28013)
@@ -40,5 +40,5 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.14-linux.sh
 	gsl			install.sh
 	boost		install-1.7-linux.sh
@@ -53,5 +53,5 @@
 	gmsh		install-4.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/examples_tests.sh
===================================================================
--- /issm/trunk/jenkins/examples_tests.sh	(revision 28012)
+++ /issm/trunk/jenkins/examples_tests.sh	(revision 28013)
@@ -6,5 +6,5 @@
 # called from jenkins/jenkins.sh.
 #
-# runme files are modifed as needed to fill in statements that would otherwise 
+# runme files are modified as needed to fill in statements that would otherwise 
 # be added by user.
 #
Index: /issm/trunk/jenkins/jenkins.sh
===================================================================
--- /issm/trunk/jenkins/jenkins.sh	(revision 28012)
+++ /issm/trunk/jenkins/jenkins.sh	(revision 28013)
@@ -28,6 +28,6 @@
 SERVER='https://ross.ics.uci.edu/jenkins'
 
-#Get configuration
-#Source config file{{{
+# Get configuration
+# Source config file {{{
 if [ $# -ne 1 ]; then
 	#no config file specified: exit
@@ -46,12 +46,11 @@
 EXAMPLES_TEST=0
 
-# Initialize resource variables (to avoid "i<=: syntax error: operand expected"
-# in for loops)
+# Initialize resource variables (to avoid "i<=: syntax error: operand expected" in for loops)
 NUMCPUS_INSTALL=1
 NUMCPUS_RUN=1
 
-#source configuration script
+# Source configuration script
 source $1;
-#}}}
+# }}}
 
 if [[ $EXAMPLES_TEST -eq 1 && $MATLAB_TEST+$PYTHON_TEST+$JAVASCRIPT_TEST -ne 0 ]]; then
@@ -60,6 +59,6 @@
 fi
 
-#Install ISSM
-#Determining installation type depending on svn changes{{{
+# Install ISSM
+# Determining installation type depending on svn changes {{{
 echo "======================================================";
 echo "             Determining Installation type            "
@@ -92,7 +91,6 @@
 	echo "   "
 
-	###################################
-	### Determine installation type ###
-	###################################
+	## Determine installation type
+	#
 	echo "Determining installation type"
 
@@ -107,5 +105,5 @@
 	fi
 
-	# If the Makefile or m4 diirectory were changed in any way or if certain
+	# If the Makefile or m4 directory were changed in any way or if certain
 	# binary files from a previous compilation do not exist, reconfigure
 	if [ ! -z "$(cat ${ISSM_DIR}/TEMP | grep -e "Makefile.am" -e "m4" )" ] ||
@@ -145,5 +143,5 @@
 echo "Recording current svn version: $SVN_REVISION_1"
 echo $SVN_REVISION_1 > ${ISSM_DIR}/svn_revision_old
-#}}}
+# }}}
 
 ## External Packages
@@ -153,16 +151,11 @@
 NUMPACKAGES=$(($(echo ${EXTERNALPACKAGES} | wc -w ) / 2))
 
-#Jenkins XML files for individual packages
+# Jenkins XML files for individual packages
 EXTERNAL_TEST_FILE="${ISSM_DIR}/nightlylog/results/external.xml"
 mkdir -p ${ISSM_DIR}/nightlylog/results
 echo "<testsuite tests=\"$NUMPACKAGES\">" > $EXTERNAL_TEST_FILE
 
-# Need a source here for when builds start midway through installation of externalpackages.
+# Need a source here for when builds start midway through installation of externalpackages
 source ${ISSM_DIR}/etc/environment.sh
-
-if [ "${OS}" == CYGWIN* ]; then
-	echo " == WINDOWS ENVIRONMENT DETECTED =="
-	source ${ISSM_DIR}/externalpackages/windows/windows_environment.sh
-fi
 
 EXTERNALPACKAGES_FAILED=0;
@@ -174,5 +167,5 @@
 	PACKAGEINST=$(echo $EXTERNALPACKAGES | cut -d " " -f $NUM2-$NUM2)
 
-	#install if requested or if previous install has not been successful
+	# Install if requested or if previous install has not been successful
 	if [ "${ISSM_EXTERNALPACKAGES}" == "yes" ]; then # NOTE: Removed check on if 'install' directory exist
 		cd ${ISSM_DIR}/externalpackages/$PACKAGENAME
@@ -199,5 +192,6 @@
 				echo '</failure>' >> $EXTERNAL_TEST_FILE
 				echo '</testcase>' >> $EXTERNAL_TEST_FILE
-				EXTERNALPACKAGES_FAILED=1;
+				echo '</testsuite>' >> $EXTERNAL_TEST_FILE
+				exit 1;
 			else
 				echo "<testcase classname=\"externalpackages\" name=\"$PACKAGENAME\"/>" >> $EXTERNAL_TEST_FILE
@@ -224,11 +218,4 @@
 echo '</testsuite>' >> $EXTERNAL_TEST_FILE
 
-if [ $EXTERNALPACKAGES_FAILED -eq 1 ]; then
-	echo "===================================================================================================";
-	echo "    ERROR: One or more of the externalpackages has failed. Skipping everything remaining steps.    ";
-	echo "===================================================================================================";
-	exit 1;
-fi
-
 # Source here to include any newly-installed external packages on the path
 source ${ISSM_DIR}/etc/environment.sh
@@ -246,5 +233,5 @@
 #	independent from running JavaScript tests (one should be able to do the
 #	former without having to do the latter).
-# - Revisit enviroment variables (especially EMCC_CFLAGS) once support for
+# - Revisit environment variables (especially EMCC_CFLAGS) once support for
 #	Fortran has been accomplished.
 #
@@ -261,6 +248,6 @@
 fi
 
-#}}}
-#ISSM compilation yes/no                (ISSM_COMPILATION) {{{
+# }}}
+# ISSM compilation yes/no (ISSM_COMPILATION) {{{
 if [ "${ISSM_COMPILATION}" == "yes" ]; then
 	cd $ISSM_DIR
@@ -285,10 +272,10 @@
 	fi
 
-	#4: compile and install ISSM
+	# Compile and install ISSM
 	echo "======================================================"
 	echo "                    Compiling ISSM                    "
 	echo "======================================================"
 	if [ $NUMCPUS_INSTALL -gt 1 ]; then
-		echo "Making with ${NUMCPUS_INSTALL} cpus"
+		echo "Making with ${NUMCPUS_INSTALL} CPUs"
 
 		# To debug compilation/linking, add 'V=1' option to the call to make
@@ -313,15 +300,14 @@
 	echo "Skipping ISSM compilation"
 else
-	echo "ISSM_COMPILATION supported values are: yes and no. Exiting..." >&2 # Error message to stderr.
+	echo "ISSM_COMPILATION supported values are: yes and no. Exiting..." >&2 # Redirect error messages to stderr
 	exit 1
 fi
-#}}}
-
-#Restore CXX/CC to their previous values
+# }}}
+
+# Restore CC/CXX to their previous values
+export CC=$CC_PREVIOUS
 export CXX=$CXX_PREVIOUS
-export CC=$CC_PREVIOUS
-
-#matlab tests
-# {{{
+
+# MATLAB tests {{{
 if [ $MATLAB_TEST -eq 1 ]; then
 	MINGW=0
@@ -333,7 +319,7 @@
 	fi
 
-	#Launch all tests on different cpus
+	# Launch all tests on different CPUs
 	for (( i=1;i<=$NUMCPUS_RUN;i++ )); do
-		#Launch matlab and the nightly run script
+		# Launch MATLAB and the nightly run script
 		cat > ${ISSM_DIR}/nightlylog/matlab_run$i.m << EOF
 		warning off %necessary to avoid a log of several Go for parallel runs
@@ -358,6 +344,6 @@
 		cd $ISSM_DIR/test/NightlyRun
 
-		# NOTE: We redirect all output to logfile in order to catch certain errors. For some reason, this does not work under Windows: the logifle option must be used and process must be run in background
-		if [[ "${OS}" == CYGWIN* || ${MINGW} -eq 1 ]]; then
+		# NOTE: We redirect all output to logfile in order to catch certain errors. For some reason, this does not work under Windows: the logfile option must be used and process must be run in background
+		if [[ ${MINGW} -eq 1 ]]; then
 			$MATLAB_PATH/bin/matlab -nodesktop -nosplash -nojvm -r "addpath ${ISSM_DIR_WIN}/src/m/dev; devpath; addpath ${ISSM_DIR_WIN}/nightlylog; matlab_run$i" -logfile ${ISSM_DIR_WIN}/nightlylog/matlab_log$i.log &
 		else
@@ -371,18 +357,5 @@
 	# - Replace by adding -wait option to above calls to matlab?
 	#
-	if [[ "${OS}" == CYGWIN* ]]; then
-		sleep 5;
-		echo "Waiting for MATLAB to exit"
-		pid=$(ps aux -W | grep MATLAB | awk '{printf("%s\n","MATLAB");}')
-		echo '-----------------------------'
-		echo "pid: ${pid}"
-		echo '-----------------------------'
-		while [ -n "${pid}" ]; do
-			pid=$(ps aux -W | grep MATLAB | awk '{printf("%s\n","MATLAB");}')
-			sleep 1;
-		done
-		echo "DONE!"
-	elif [[ ${MINGW} -eq 1 ]]; then
-		# NOTE: The PID query used here may work as well on Cygwin. If so, combine the two conditional blocks.
+	if [[ ${MINGW} -eq 1 ]]; then
 		sleep 5;
 		echo "Waiting for MATLAB to exit"
@@ -429,18 +402,18 @@
 	done
 
-	#filter out windows characters:
+	# Filter out Windows characters
 	cat matlab_log.log | tr -cd '\11\12\40-\176' > matlab_log.log2 && mv matlab_log.log2 matlab_log.log
 fi
 # }}}
 
-# Python tests
+# Python tests # {{{
 #
 # TODO: Figure out why "Running Python test for Rank $i" is printed twice for each CPU
-# {{{
+#
 if [ $PYTHON_TEST -eq 1 ]; then
-	#Launch all tests on different cpus
+	# Launch all tests on different CPUs
 	export PYTHONPATH="${ISSM_DIR}/src/m/dev"
 	export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
-	export PYTHONUNBUFFERED=1 #we don't want python to buffer otherwise issm.exe output is not captured
+	export PYTHONUNBUFFERED=1 # We don't want Python to buffer otherwise issm.exe output is not captured
 	for (( i=1;i<=$NUMCPUS_RUN;i++ ))
 	do
@@ -474,14 +447,18 @@
 rm -f $ISSM_DIR/execution/*/ADOLC-*
 
-# Examples Tests
-# {{{
+# Examples Tests # {{{
 if [ $EXAMPLES_TEST -eq 1 ]; then
 	export MATLAB_PATH
+
+	# Download examples datasets if they are not already present
+	if [[ -z $(ls -A1q $ISSM_DIR/examples/Data) ]]; then
+		$ISSM_DIR/scripts/DownloadExamplesDatasets.sh
+	fi
+
 	$ISSM_DIR/jenkins/examples_tests.sh
 fi
 # }}}
 
-# Process logs to be JUnit compatible
-#{{{
+# Process logs to be JUnit compatible # {{{
 cd $ISSM_DIR/nightlylog
 source $ISSM_EXT_DIR/shell2junit/install/bin/sh2ju.sh
@@ -489,5 +466,5 @@
 
 if [ $MATLAB_TEST -eq 1 ]; then
-	#number tests:
+	# Number tests
 	numtests=`cat matlab_log.log | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
 	testlist=`cat matlab_log.log | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g' | sed 's/-//g'`
@@ -500,5 +477,5 @@
 
 	# Check that MATLAB did not exit in error
-	matlabExitedInError=`grep -E "Activation cannot proceed|Error in matlab_run|Illegal use of reserved keyword" matlab_log.log | wc -l`
+	matlabExitedInError=`grep -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab_log.log | wc -l`
 
 	if [ $matlabExitedInError -ne 0 ]; then
@@ -515,5 +492,5 @@
 
 if [ $PYTHON_TEST -eq 1 ]; then
-	#number tests:
+	# Number tests
 	numtests=`cat python_log.log | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
 	testlist=`cat python_log.log | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g' | sed 's/-//g'`
@@ -526,5 +503,5 @@
 
 	# Check that Python did not exit in error
-	pythonExitedInError=`grep -E "Error|Standard exception|Traceback|bad interpreter" python_log.log | wc -l`
+	pythonExitedInError=`grep -c -E "Error|No such file or directory|Permission denied|Standard exception|Traceback|bad interpreter|syntax error" python_log.log`
 
 	if [ $pythonExitedInError -ne 0 ]; then
@@ -553,5 +530,5 @@
 
 	# Check that MATLAB did not exit in error
-	matlabExitedInError=`grep -E "Activation cannot proceed|Error in matlab_run|Illegal use of reserved keyword" matlab_log_examples.log | wc -l`
+	matlabExitedInError=`grep -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab_log_examples.log | wc -l`
 
 	if [ $matlabExitedInError -ne 0 ]; then
@@ -569,3 +546,3 @@
 # Clean up execution directory
 rm -rf $ISSM_DIR/execution/*
-#}}}
+# }}}
Index: /issm/trunk/jenkins/linux64_caladan
===================================================================
--- /issm/trunk/jenkins/linux64_caladan	(revision 28012)
+++ /issm/trunk/jenkins/linux64_caladan	(revision 28013)
@@ -39,5 +39,5 @@
 #-----------------------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="autotools     install.sh
 						cmake        install.sh
@@ -47,5 +47,5 @@
 						boost         install-1.7-linux.sh
 						dakota        install-6.2-linux64.sh
-						chaco         install.sh
+						chaco         install-linux.sh
 						m1qn3         install.sh
 						shell2junit   install.sh"
Index: /issm/trunk/jenkins/linux64_caladan_ad
===================================================================
--- /issm/trunk/jenkins/linux64_caladan_ad	(revision 28012)
+++ /issm/trunk/jenkins/linux64_caladan_ad	(revision 28013)
@@ -29,5 +29,5 @@
 #-----------------------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools	install-linux.sh
Index: /issm/trunk/jenkins/linux64_caladan_ampi
===================================================================
--- /issm/trunk/jenkins/linux64_caladan_ampi	(revision 28012)
+++ /issm/trunk/jenkins/linux64_caladan_ampi	(revision 28013)
@@ -39,5 +39,5 @@
 #-----------------------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools		install-linux.sh
Index: /issm/trunk/jenkins/mac-intel-basic
===================================================================
--- /issm/trunk/jenkins/mac-intel-basic	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-basic	(revision 28013)
@@ -0,0 +1,70 @@
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/13.1.0/lib/gcc/13 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+# List of external packages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information
+#
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota')]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-intel-binaries-matlab
===================================================================
--- /issm/trunk/jenkins/mac-intel-binaries-matlab	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-binaries-matlab	(revision 28013)
@@ -0,0 +1,93 @@
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+#
+# TODO:
+# - Add static copy of libsqlite and make sure it is found during GMT configuration
+#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="/usr/local/Cellar/gcc/13.1.0/lib/gcc/13/libgfortran.a /usr/local/Cellar/gcc/13.1.0/lib/gcc/13/libquadmath.a /usr/local/Cellar/gcc/13.1.0/lib/gcc/13/gcc/x86_64-apple-darwin21/13/libgcc.a" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac-static.sh
+	gsl			install-static.sh
+	boost		install-1.7-mac-static.sh
+	dakota		install-6.2-mac-static.sh
+	curl		install-7-mac-static.sh
+	hdf5		install-1-parallel-static.sh
+	netcdf		install-4.7-parallel-static.sh
+	proj		install-6-static.sh
+	gdal		install-3-static.sh
+	gshhg		install.sh
+	gmt			install-6-mac-static.sh
+	gmsh		install-4-mac-static.sh
+	triangle	install-mac-static.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+PYTHON_NROPTIONS=""
+MATLAB_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-intel-binaries-python-3
===================================================================
--- /issm/trunk/jenkins/mac-intel-binaries-python-3	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-binaries-python-3	(revision 28013)
@@ -0,0 +1,93 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+#
+# TODO:
+# - Add static copy of libsqlite and make sure it is found during GMT configuration
+#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-python-version=3.9 \
+	--with-python-dir=/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="/usr/local/Cellar/gcc/13.1.0/lib/gcc/13/libgfortran.a /usr/local/Cellar/gcc/13.1.0/lib/gcc/13/libquadmath.a /usr/local/Cellar/gcc/13.1.0/lib/gcc/13/gcc/x86_64-apple-darwin21/13/libgcc.a" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac-static.sh
+	gsl			install-static.sh
+	boost		install-1.7-mac-static.sh
+	dakota		install-6.2-mac-static.sh
+	curl		install-7-mac-static.sh
+	hdf5		install-1-parallel-static.sh
+	netcdf		install-4.7-parallel-static.sh
+	proj		install-6-static.sh
+	gdal		install-3-python-static.sh
+	gshhg		install.sh
+	gmt			install-6-mac-static.sh
+	gmsh		install-4-mac-static.sh
+	triangle	install-mac-static.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+PYTHON_NROPTIONS=""
+MATLAB_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-intel-dakota
===================================================================
--- /issm/trunk/jenkins/mac-intel-dakota	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-dakota	(revision 28013)
@@ -0,0 +1,102 @@
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/13.1.0/lib/gcc/13 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+#List of external packages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - Errors are large for tests 234, 235, 418, and 420 under MATLAB
+# - Tests 444 and 445 fail intermittently under MATLAB with "Index exceeds array bounds."
+# - Errors are large for tests 234 418 and 420 under Python
+# - See test417.py for why it is excluded
+# - Test 444 fails intermittently under Python with "IndexError: list index out of range"
+# - Excluding 2006 until it can be debugged (file I/O)
+#
+MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
+PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
Index: /issm/trunk/jenkins/mac-intel-examples
===================================================================
--- /issm/trunk/jenkins/mac-intel-examples	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-examples	(revision 28013)
@@ -0,0 +1,90 @@
+# NOTE:
+# - Same configuration as mac-intel-full
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/13.1.0/lib/gcc/13 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=1
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-intel-full
===================================================================
--- /issm/trunk/jenkins/mac-intel-full	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-full	(revision 28013)
@@ -0,0 +1,94 @@
+# NOTE: 
+# - This configuration adds solid earth and Dakota capabilities to the basic 
+#	build.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/opt/homebrew/Cellar/gcc/13.1.0/lib/gcc/13 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-intel-solid_earth
===================================================================
--- /issm/trunk/jenkins/mac-intel-solid_earth	(revision 28013)
+++ /issm/trunk/jenkins/mac-intel-solid_earth	(revision 28013)
@@ -0,0 +1,102 @@
+# NOTE:
+# - Same configuration as mac-intel-full
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/13.1.0/lib/gcc/13 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+#List of external packages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.14-mac.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - Excluding 2006 until it can be debugged (file I/O)
+# - Excluding 2012 until it can be looked at by Eric ("FindParam error message: Parameter HydrologyModel not set")
+# - Excluding 2091 until it can be debugged (resource starvation)
+#
+MATLAB_NROPTIONS="'benchmark','slc','exclude',[2004 2006 2012 2051 2052 2053 2085 2424 2425]"
+PYTHON_NROPTIONS="--benchmark slc --exclude 2004 2006 2012 2051 2052 2053 2085 2424 2425"
Index: /issm/trunk/jenkins/mac-silicon-basic
===================================================================
--- /issm/trunk/jenkins/mac-silicon-basic	(revision 28013)
+++ /issm/trunk/jenkins/mac-silicon-basic	(revision 28013)
@@ -0,0 +1,88 @@
+# NOTE:
+# - Currently, this configuration does not compile the MATLAB API. After 
+#	compiling, you will need to download the ISSM pre-compiled binaries from 
+#	https://issm.jpl.nasa.gov/download/binaries, copy the contents of lib/ to 
+#	$ISSM_DIR/lib-precompiled, then add $ISSM_DIR/lib-precompiled to the path 
+#	in MATLAB.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app" # NOTE: Although we do not configure/compile MATLAB wrappers, we need to set this variable for testing
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--without-wrappers \
+	--with-numthreads=8 \
+	--with-fortran-lib="-L/opt/homebrew/lib/gcc/current -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+# List of external packages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.17-mac-silicon.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information
+#
+# NOTE:
+# - Tests 430, 441:442, 470:476 excluded because of large errors.
+# - Tests 462-464, 517 excluded because of "Arrays have incompatible sizes for 
+#	this operation."
+# - Test 701:703 excluded because they use full Stokes equations
+#
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota'),430,435,441:442,462:464,470:476,517,701:703]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-silicon-dakota
===================================================================
--- /issm/trunk/jenkins/mac-silicon-dakota	(revision 28013)
+++ /issm/trunk/jenkins/mac-silicon-dakota	(revision 28013)
@@ -0,0 +1,107 @@
+# NOTE:
+# - Currently, this configuration does not compile the MATLAB API. After 
+#	compiling, you will need to download the ISSM pre-compiled binaries from 
+#	https://issm.jpl.nasa.gov/download/binaries, copy the contents of lib/ to 
+#	$ISSM_DIR/lib-precompiled, then add $ISSM_DIR/lib-precompiled to the path 
+#	in MATLAB.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app" # NOTE: Although we do not configure/compile MATLAB wrappers, we need to set this variable for testing
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=8 \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/opt/homebrew/lib/gcc/current -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.17-mac-silicon.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - Errors are large for tests 234, 235, 418, and 420 under MATLAB
+# - Tests 444 and 445 fail intermittently under MATLAB with "Index exceeds array bounds."
+# - Errors are large for tests 234 418 and 420 under Python
+# - See test417.py for why it is excluded
+# - Test 444 fails intermittently under Python with "IndexError: list index out of range"
+# - Excluding 2006 until it can be debugged (file I/O)
+#
+MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
+PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
Index: /issm/trunk/jenkins/mac-silicon-examples
===================================================================
--- /issm/trunk/jenkins/mac-silicon-examples	(revision 28013)
+++ /issm/trunk/jenkins/mac-silicon-examples	(revision 28013)
@@ -0,0 +1,94 @@
+# NOTE:
+# - Same configuration as mac-silicon-full
+# - Currently, this configuration does not compile the MATLAB API. After 
+#	compiling, you will need to download the ISSM pre-compiled binaries from 
+#	https://issm.jpl.nasa.gov/download/binaries, copy the contents of lib/ to 
+#	$ISSM_DIR/lib-precompiled, then add $ISSM_DIR/lib-precompiled to the path 
+#	in MATLAB.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app" # NOTE: Although we do not configure/compile MATLAB wrappers, we need to set this variable for testing
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=8 \
+	--with-fortran-lib="-L/opt/homebrew/lib/gcc/current -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.17-mac-silicon.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=1
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-silicon-full
===================================================================
--- /issm/trunk/jenkins/mac-silicon-full	(revision 28013)
+++ /issm/trunk/jenkins/mac-silicon-full	(revision 28013)
@@ -0,0 +1,98 @@
+# NOTE: 
+# - This configuration adds solid earth and Dakota capabilities to the basic 
+#	build.
+# - Currently, this configuration does not compile the MATLAB API. After 
+#	compiling, you will need to download the ISSM pre-compiled binaries from 
+#	https://issm.jpl.nasa.gov/download/binaries, copy the contents of lib/ to 
+#	$ISSM_DIR/lib-precompiled, then add $ISSM_DIR/lib-precompiled to the path 
+#	in MATLAB.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app" # NOTE: Although we do not configure/compile MATLAB wrappers, we need to set this variable for testing
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=8 \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/opt/homebrew/Cellar/gcc/12.2.0/lib/gcc/12 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.17-mac-silicon.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/mac-silicon-solid_earth
===================================================================
--- /issm/trunk/jenkins/mac-silicon-solid_earth	(revision 28013)
+++ /issm/trunk/jenkins/mac-silicon-solid_earth	(revision 28013)
@@ -0,0 +1,105 @@
+# NOTE:
+# - Currently, this configuration does not compile the MATLAB API. After 
+#	compiling, you will need to download the ISSM pre-compiled binaries from 
+#	https://issm.jpl.nasa.gov/download/binaries, copy the contents of lib/ to 
+#	$ISSM_DIR/lib-precompiled, then add $ISSM_DIR/lib-precompiled to the path 
+#	in MATLAB.
+#
+# TODO:
+# - Change PROJ installation script to latest version once Jenkins macOS build 
+#	node has been upgraded.
+#
+
+MATLAB_PATH="/Applications/MATLAB_R2022b.app" # NOTE: Although we do not configure/compile MATLAB wrappers, we need to set this variable for testing
+
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-python-version=3.9 \
+	--with-python-dir=/System/Volumes/Data/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9 \
+	--with-python-numpy-dir=/Library/Python/3.9/site-packages/numpy/core/include/numpy \
+	--with-fortran-lib="-L/opt/homebrew/lib/gcc/current -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-mac.sh
+	cmake		install.sh
+	petsc		install-3.17-mac-silicon.sh
+	gsl			install.sh
+	boost		install-1.7-mac.sh
+	dakota		install-6.2-mac.sh
+	curl		install-7-mac.sh
+	hdf5		install-1-parallel.sh
+	netcdf		install-4.7-parallel.sh
+	proj		install-6.sh
+	gdal		install-3-python.sh
+	gshhg		install.sh
+	gmt			install-6-mac.sh
+	gmsh		install-4-mac.sh
+	triangle	install-mac.sh
+	chaco		install-mac.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - Excluding 2006 until it can be debugged (file I/O)
+# - Excluding 2012 until it can be looked at by Eric ("FindParam error message: Parameter HydrologyModel not set")
+# - Excluding 2091 until it can be debugged (resource starvation)
+# - Excluding 2110:2113 until we can compile MEX modules natively for Silicon and then can debug differences in mesh generation between different platforms
+#
+MATLAB_NROPTIONS="'benchmark','slc','exclude',[2004 2006 2012 2051 2052 2053 2085 2424 2425]"
+PYTHON_NROPTIONS="--benchmark slc --exclude 2004 2006 2012 2051 2052 2053 2085 2110:2113 2424 2425"
Index: /issm/trunk/jenkins/pine_island-mac-basic
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-basic	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-basic	(revision 28013)
@@ -31,5 +31,5 @@
 #-------------------#
 
-# List of external pakages to be installed and their installation scripts
+# List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools	install-mac.sh
@@ -37,5 +37,5 @@
 	petsc		install-3.12-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-binaries-matlab
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-binaries-matlab	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-binaries-matlab	(revision 28013)
@@ -9,4 +9,7 @@
 # - We can disable dependency tracking in the Autotools because the binaries
 #	should always be a one-time build.
+#
+# TODO:
+# - Add static copy of libsqlite and make sure it is found during GMT configuration
 #
 
@@ -58,5 +61,5 @@
 	gmsh		install-4-mac-static.sh
 	triangle	install-mac-static.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-binaries-python-2
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-binaries-python-2	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-binaries-python-2	(revision 28013)
@@ -6,4 +6,7 @@
 # - We can disable dependency tracking in the Autotools because the binaries
 #	should always be a one-time build.
+#
+# TODO:
+# - Add static copy of libsqlite and make sure it is found during GMT configuration
 #
 
@@ -56,5 +59,5 @@
 	gmsh		install-4-mac-static.sh
 	triangle	install-mac-static.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-binaries-python-3
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-binaries-python-3	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-binaries-python-3	(revision 28013)
@@ -6,4 +6,7 @@
 # - We can disable dependency tracking in the Autotools because the binaries
 #	should always be a one-time build.
+#
+# TODO:
+# - Add static copy of libsqlite and make sure it is found during GMT configuration
 #
 
@@ -57,5 +60,5 @@
 	gmsh		install-4-mac-static.sh
 	triangle	install-mac-static.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-dakota
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-dakota	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-dakota	(revision 28013)
@@ -42,5 +42,5 @@
 #-------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools	install-mac.sh
@@ -58,5 +58,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-examples
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-examples	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-examples	(revision 28013)
@@ -59,5 +59,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-full
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-full	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-full	(revision 28013)
@@ -56,5 +56,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-full-valgrind
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-full-valgrind	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-full-valgrind	(revision 28013)
@@ -46,9 +46,10 @@
 	petsc		install-3.16-mac.sh
 	gsl			install.sh
-	boost		install-1.7-mac.sh
+	boost		install-1.7-mac-valgrind.sh
 	dakota		install-6.2-mac.sh
 	curl		install-7-mac.sh
 	netcdf		install-4.7-parallel.sh
 	proj		install-6.sh
+	sqlite		install.sh
 	gdal		install-3-python.sh
 	gshhg		install.sh
@@ -56,5 +57,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-solid_earth
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-solid_earth	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-solid_earth	(revision 28013)
@@ -42,5 +42,5 @@
 #-------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools	install-mac.sh
@@ -58,5 +58,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pine_island-mac-solid_earth-lambert
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-solid_earth-lambert	(revision 28012)
+++ /issm/trunk/jenkins/pine_island-mac-solid_earth-lambert	(revision 28013)
@@ -42,5 +42,5 @@
 #-------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	autotools	install-mac.sh
@@ -58,5 +58,5 @@
 	gmsh		install-4-mac.sh
 	triangle	install-mac.sh
-	chaco		install.sh
+	chaco		install-mac.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/pleiades-solid_earth
===================================================================
--- /issm/trunk/jenkins/pleiades-solid_earth	(revision 28012)
+++ /issm/trunk/jenkins/pleiades-solid_earth	(revision 28013)
@@ -51,5 +51,5 @@
 	gmsh		install-4-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -15,6 +16,7 @@
 	--without-Love \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include  \
@@ -41,5 +43,5 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	gsl			install.sh
 	triangle	install-linux.sh
@@ -76,4 +78,4 @@
 #	failure on ampioff configuration (really, likely a segmentation fault)
 #
-MATLAB_NROPTIONS="'benchmark','adolc','id',[3001:3019],'exclude',3010"
-PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3.14 3013 3014 3015 3016 3017 3018 3019 --exclude=3010"
+MATLAB_NROPTIONS="'benchmark','adolc','exclude',[3010]"
+PYTHON_NROPTIONS="--benchmark adolc --exclude 3010"
Index: /issm/trunk/jenkins/ross-debian_linux-adolc-ampion
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-adolc-ampion	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-adolc-ampion	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -12,9 +13,9 @@
 	--without-kriging \
 	--without-kml \
-	--without-Sealevelchange \
 	--without-Love \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include  \
@@ -42,5 +43,5 @@
 	autotools		install-linux.sh
 	cmake			install.sh
-	petsc			install-3.12-linux.sh
+	petsc			install-3.17-linux.sh
 	gsl				install.sh
 	triangle		install-linux.sh
@@ -78,4 +79,4 @@
 #	failure on ampioff configuration (really, likely a segmentation fault)
 #
-MATLAB_NROPTIONS="'benchmark','adolc','id',[3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3119],'exclude',3010"
-PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3101 3102 3103 3104 3105 3106 3107 3108 3109 3119 --exclude=3010"
+MATLAB_NROPTIONS="'benchmark','adolc','exclude',[3010 3019 3020 3021 3110]"
+PYTHON_NROPTIONS="--benchmark adolc --exclude 3010 3019 3020 3021 3110"
Index: /issm/trunk/jenkins/ross-debian_linux-basic
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-basic	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-basic	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -33,7 +34,7 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: sm/trunk/jenkins/ross-debian_linux-binaries
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries	(revision 28012)
+++ 	(revision )
@@ -1,76 +1,0 @@
-#--------------------#
-# ISSM Configuration #
-#--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
-
-# NOTE:
-# - We can disable dependency tracking in the Autotools because the binaries
-#	should always be a one-time build.
-# - libgfortran will not be available in $ISSM_DIR/lib at compile time: it is
-#	copied by packaging script.
-#
-ISSM_CONFIG='\
-	--prefix=${ISSM_DIR} \
-	--enable-standalone-executables \
-	--enable-standalone-modules \
-	--enable-standalone-libraries \
-	--disable-dependency-tracking \
-	--with-numthreads=4 \
-	--with-pic \
-	--with-matlab-dir=${MATLAB_PATH} \
-	--with-fortran-lib="-L${ISSM_DIR}/lib -lgfortran" \
-	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
-	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
-	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
-	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
-	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
-'
-#-------------------#
-# External Packages #
-#-------------------#
-
-EXTERNALPACKAGES="
-	autotools	install-linux.sh
-	cmake		install.sh
-	petsc		install-3.12-linux-static.sh
-	triangle	install-linux-static.sh
-	chaco		install.sh
-	m1qn3		install.sh
-	semic		install.sh
-	shell2junit	install.sh
-"
-
-#---------#
-# Testing #
-#---------#
-
-# Test suites
-MATLAB_TEST=0
-PYTHON_TEST=0
-JAVASCRIPT_TEST=0
-EXAMPLES_TEST=0
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation.
-#
-NUMCPUS_INSTALL=8
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=1
-
-# Nightly run options
-#
-# See documentation in test/NightlyRun/runme.* for more information.
-#
-MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-binaries-matlab
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries-matlab	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-binaries-matlab	(revision 28013)
@@ -1,7 +1,7 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 # NOTE:
@@ -11,4 +11,5 @@
 #	copied by packaging script.
 #
+
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -26,5 +27,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -44,9 +45,10 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux-static.sh
+	petsc		install-3.17-linux-static.sh
 	gsl			install-static.sh
 	boost		install-1.7-linux-static.sh
 	dakota		install-6.2-linux-static.sh
 	curl		install-7-linux-static.sh
+	hdf5		install-1-parallel-static.sh
 	netcdf		install-4.7-parallel-static.sh
 	proj		install-6-static.sh
@@ -56,5 +58,5 @@
 	gmsh		install-4-linux-static.sh
 	triangle	install-linux-static.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: sm/trunk/jenkins/ross-debian_linux-binaries-python-2
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries-python-2	(revision 28012)
+++ 	(revision )
@@ -1,89 +1,0 @@
-#--------------------#
-# ISSM Configuration #
-#--------------------#
-
-# NOTE:
-# - We can disable dependency tracking in the Autotools because the binaries
-#	should always be a one-time build.
-# - libgfortran will not be available in $ISSM_DIR/lib at compile time: it is
-#	copied by packaging script.
-#
-ISSM_CONFIG='\
-	--prefix=${ISSM_DIR} \
-	--enable-standalone-executables \
-	--enable-standalone-modules \
-	--enable-standalone-libraries \
-	--disable-dependency-tracking \
-	--with-pic \
-	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-	--with-fortran-lib="-L${ISSM_DIR}/lib -lgfortran" \
-	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
-	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-parmetis-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
-	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
-	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
-	--with-proj-dir=${ISSM_DIR}/externalpackages/proj/install \
-	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
-	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
-	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
-	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
-'
-#-------------------#
-# External Packages #
-#-------------------#
-
-EXTERNALPACKAGES="
-	autotools	install-linux.sh
-	cmake		install.sh
-	petsc		install-3.12-linux-static.sh
-	gsl			install-static.sh
-	boost		install-1.7-linux-static.sh
-	dakota		install-6.2-linux-static.sh
-	curl		install-7-linux-static.sh
-	netcdf		install-4.7-parallel-static.sh
-	proj		install-6-static.sh
-	gdal		install-3-python-static.sh
-	gshhg		install.sh
-	gmt			install-6-linux-static.sh
-	gmsh		install-4-linux-static.sh
-	triangle	install-linux-static.sh
-	chaco		install.sh
-	m1qn3		install.sh
-	semic		install.sh
-	shell2junit	install.sh
-"
-
-#---------#
-# Testing #
-#---------#
-
-# Test suites
-MATLAB_TEST=0
-PYTHON_TEST=0
-JAVASCRIPT_TEST=0
-EXAMPLES_TEST=0
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation.
-#
-NUMCPUS_INSTALL=8
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=1
-
-# Nightly run options
-#
-# See documentation in test/NightlyRun/runme.* for more information.
-#
-MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-binaries-python-3
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries-python-3	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-binaries-python-3	(revision 28013)
@@ -9,4 +9,5 @@
 #	copied by packaging script.
 #
+
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -16,6 +17,6 @@
 	--disable-dependency-tracking \
 	--with-pic \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-version=3.7 \
 	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L${ISSM_DIR}/lib -lgfortran" \
@@ -27,5 +28,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -45,9 +46,10 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux-static.sh
+	petsc		install-3.17-linux-static.sh
 	gsl			install-static.sh
 	boost		install-1.7-linux-static.sh
 	dakota		install-6.2-linux-static.sh
 	curl		install-7-linux-static.sh
+	hdf5		install-1-parallel-static.sh
 	netcdf		install-4.7-parallel-static.sh
 	proj		install-6-static.sh
@@ -57,5 +59,5 @@
 	gmsh		install-4-linux-static.sh
 	triangle	install-linux-static.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-codipack
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-codipack	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-codipack	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -39,8 +40,8 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	gsl			install.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
@@ -74,4 +75,4 @@
 # See documentation in test/NightlyRun/runme.* for more information.
 #
-MATLAB_NROPTIONS="'benchmark','all','id',[3015,3119,3480,3201:3205]"
+MATLAB_NROPTIONS="'benchmark','all','id',[3015,3119,3480,3201:3206]"
 PYTHON_NROPTIONS="--benchmark='all' -i 3015 3119 3480"
Index: /issm/trunk/jenkins/ross-debian_linux-dakota
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-dakota	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-dakota	(revision 28013)
@@ -1,7 +1,7 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 ISSM_CONFIG='\
@@ -14,6 +14,7 @@
 	--with-numthreads=4 \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
@@ -24,5 +25,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -43,9 +44,10 @@
 	autotools		install-linux.sh
 	cmake			install.sh
-	petsc			install-3.12-linux.sh
+	petsc			install-3.17-linux.sh
 	gsl				install.sh
 	boost			install-1.7-linux.sh
 	dakota			install-6.2-linux.sh
 	curl			install-7-linux.sh
+	hdf5			install-1-parallel.sh
 	netcdf			install-4.7-parallel.sh
 	proj			install-6.sh
@@ -55,5 +57,5 @@
 	gmsh			install-4-linux.sh
 	triangle		install-linux.sh
-	chaco			install.sh
+	chaco			install-linux.sh
 	m1qn3			install.sh
 	semic			install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-full
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-full	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-full	(revision 28013)
@@ -1,10 +1,10 @@
 # NOTE: This configuration adds solid earth and Dakota capabilities to the 
 #		basic build.
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 #--------------------#
 # ISSM Configuration #
 #--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 ISSM_CONFIG='\
@@ -15,6 +15,7 @@
 	--with-numthreads=4 \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include="${ISSM_DIR}/externalpackages/petsc/install/include" \
@@ -25,5 +26,5 @@
 	--with-scalapack-dir="${ISSM_DIR}/externalpackages/petsc/install" \
 	--with-mumps-dir="${ISSM_DIR}/externalpackages/petsc/install" \
-	--with-hdf5-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-hdf5-dir="${ISSM_DIR}/externalpackages/hdf5/install" \
 	--with-petsc-dir="${ISSM_DIR}/externalpackages/petsc/install" \
 	--with-gsl-dir="${ISSM_DIR}/externalpackages/gsl/install" \
@@ -44,9 +45,10 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	gsl			install.sh
 	boost		install-1.7-linux.sh
 	dakota		install-6.2-linux.sh
 	curl		install-7-linux.sh
+	hdf5		install-1-parallel.sh
 	netcdf		install-4.7-parallel.sh
 	proj		install-6.sh
@@ -56,5 +58,5 @@
 	gmsh		install-4-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-full-mplapack
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-full-mplapack	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-full-mplapack	(revision 28013)
@@ -1,10 +1,7 @@
-# NOTE: This configuration adds solid earth and Dakota capabilities to the 
-#		basic build.
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 #--------------------#
 # ISSM Configuration #
 #--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 ISSM_CONFIG='\
@@ -16,6 +13,7 @@
 	--with-numthreads=4 \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include="${ISSM_DIR}/externalpackages/petsc/install/include" \
@@ -26,5 +24,5 @@
 	--with-scalapack-dir="${ISSM_DIR}/externalpackages/petsc/install" \
 	--with-mumps-dir="${ISSM_DIR}/externalpackages/petsc/install" \
-	--with-hdf5-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-hdf5-dir="${ISSM_DIR}/externalpackages/hdf5/install" \
 	--with-petsc-dir="${ISSM_DIR}/externalpackages/petsc/install" \
 	--with-mplapack-dir="${ISSM_DIR}/externalpackages/mplapack/install" \
@@ -46,5 +44,5 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	mplapack	install-2-linux.sh
 	gsl			install.sh
@@ -52,4 +50,5 @@
 	dakota		install-6.2-linux.sh
 	curl		install-7-linux.sh
+	hdf5		install-1-parallel.sh
 	netcdf		install-4.7-parallel.sh
 	proj		install-6.sh
@@ -59,5 +58,5 @@
 	gmsh		install-4-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-full-valgrind
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-full-valgrind	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-full-valgrind	(revision 28013)
@@ -1,4 +1,6 @@
 # NOTE: This configuration adds solid earth and Dakota capabilities to the 
 #		basic build, as well as the ability to debug with Valgrind.
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 #--------------------#
@@ -6,15 +8,13 @@
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
-
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
 	--disable-static \
 	--enable-development \
-	--enable-debugging \
-	--with-numthreads=4 \
+	--enable-debugging \	--with-numthreads=4 \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
@@ -25,5 +25,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -44,9 +44,10 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	gsl			install.sh
 	boost		install-1.7-linux-valgrind.sh
 	dakota		install-6.2-linux.sh
 	curl		install-7-linux.sh
+	hdf5		install-1-parallel.sh
 	netcdf		install-4.7-parallel.sh
 	proj		install-6.sh
@@ -56,5 +57,5 @@
 	gmsh		install-4-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-gia
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-gia	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-gia	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -12,6 +13,7 @@
 	--with-numthreads=4
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
@@ -34,5 +36,5 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	triangle	install-linux.sh
 	math77		install.sh
Index: /issm/trunk/jenkins/ross-debian_linux-iceocean
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-iceocean	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-iceocean	(revision 28013)
@@ -1,7 +1,8 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
 
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -34,7 +35,7 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
@@ -66,7 +67,7 @@
 # See documentation in test/NightlyRun/runme.* for more information.
 #
-# NOTE: Currently not including 4003 while Dimitri and Helenen work on the
+# NOTE: Currently not including 4002:4003 while Dimitri and Helene work on the
 #		coupling
 #
-MATLAB_NROPTIONS="'benchmark','all','id',[4001,4002]"
+MATLAB_NROPTIONS="'benchmark','all','id',[4001]"
 PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-python
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-python	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-python	(revision 28013)
@@ -9,6 +9,6 @@
 	--enable-debugging \
 	--with-numthreads=4 \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-version=3.7 \
 	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
@@ -20,5 +20,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -39,9 +39,10 @@
 	autotools	install-linux.sh
 	cmake		install.sh
-	petsc		install-3.12-linux.sh
+	petsc		install-3.17-linux.sh
 	gsl			install.sh
 	boost		install-1.7-linux.sh
 	dakota		install-6.2-linux.sh
 	curl		install-7-linux.sh
+	hdf5		install-1-parallel.sh
 	netcdf		install-4.7-parallel.sh
 	proj		install-6.sh
@@ -51,5 +52,5 @@
 	gmsh		install-4-linux.sh
 	triangle	install-linux.sh
-	chaco		install.sh
+	chaco		install-linux.sh
 	m1qn3		install.sh
 	semic		install.sh
@@ -83,8 +84,5 @@
 # NOTE:
 # - Errors are large for tests 234, 418, and 420 under Python
-# - Tests 2002, 2003, 2010, 2101, and 2021 are excluded as Gmsh produces 
-#	different-sized meshes on macOS and Linux for 3d objects (archives are 
-#	generated on macOS).
 #
 MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS="--exclude 234 418 420 2002 2003 2010 2021 2101"
+PYTHON_NROPTIONS="--exclude 234 418 420"
Index: /issm/trunk/jenkins/ross-debian_linux-solid_earth
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-solid_earth	(revision 28012)
+++ /issm/trunk/jenkins/ross-debian_linux-solid_earth	(revision 28013)
@@ -1,7 +1,7 @@
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
 #--------------------#
 # ISSM Configuration #
 #--------------------#
-
-MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
 ISSM_CONFIG='\
@@ -12,6 +12,7 @@
 	--with-numthreads=4 \
 	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-version=3.7 \
 	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-python-numpy-dir=/usr/local/lib/python3.7/dist-packages/numpy \
 	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
 	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
@@ -22,5 +23,5 @@
 	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
-	--with-hdf5-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-hdf5-dir=${ISSM_DIR}/externalpackages/hdf5/install \
 	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
 	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
@@ -41,9 +42,10 @@
 	autotools		install-linux.sh
 	cmake			install.sh
-	petsc			install-3.12-linux.sh
+	petsc			install-3.17-linux.sh
 	gsl				install.sh
 	boost			install-1.7-linux.sh
 	dakota			install-6.2-linux.sh
 	curl			install-7-linux.sh
+	hdf5			install-1-parallel.sh
 	netcdf			install-4.7-parallel.sh
 	proj			install-6.sh
@@ -53,5 +55,5 @@
 	gmsh			install-4-linux.sh
 	triangle		install-linux.sh
-	chaco			install.sh
+	chaco			install-linux.sh
 	m1qn3			install.sh
 	semic			install.sh
@@ -90,4 +92,4 @@
 # - Excluding 2012 until it can be looked at by Eric ("FindParam error message: Parameter HydrologyModel not set")
 #
-MATLAB_NROPTIONS="'benchmark','slc','exclude',[2004 2006 2012 2021 2051 2052 2053 2085 2090 2101 2424 2425]"
-PYTHON_NROPTIONS="--benchmark slc --exclude 2004 2006 2012 2021 2051 2052 2053 2085 2090 2101 2424 2425"
+MATLAB_NROPTIONS="'benchmark','slc','exclude',[2004 2006 2012 2051 2052 2053 2085 2424 2425]"
+PYTHON_NROPTIONS="--benchmark slc --exclude 2004 2006 2012 2021 2051 2052 2053 2085 2424 2425"
Index: /issm/trunk/jenkins/ross-win-msys2-gcc-msmpi-basic
===================================================================
--- /issm/trunk/jenkins/ross-win-msys2-gcc-msmpi-basic	(revision 28012)
+++ /issm/trunk/jenkins/ross-win-msys2-gcc-msmpi-basic	(revision 28013)
@@ -1,2 +1,4 @@
+MATLAB_PATH=$(cygpath -u $(cygpath -ms "/c/Program Files/MATLAB/R2019b"))
+
 ISSM_CONFIG='\
 	--prefix=${ISSM_DIR} \
@@ -23,5 +25,5 @@
 #-------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	metis		install-5-win-msys2-gcc.sh
@@ -31,5 +33,5 @@
 	scalapack	install-2-win-msys2-gcc-msmpi.sh
 	mumps		install-5-win-msys2-gcc-msmpi.sh
-	petsc		install-3.12-win-msys2-gcc-msmpi.sh
+	petsc		install-3.14-win-msys2-gcc-msmpi.sh
 	triangle	install-win-msys2-gcc.sh
 	shell2junit	install.sh
Index: /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-basic
===================================================================
--- /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-basic	(revision 28012)
+++ /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-basic	(revision 28013)
@@ -9,7 +9,7 @@
 	--with-matlab-dir="${MATLAB_PATH}" \
 	--with-mpi-include="${MSMPI_ROOT}/include" \
-	--with-mpi-libdir="${MSMPI_ROOT}/lib" \
-	--with-mpi-libflags="-lmsmpi" \
-	--with-fortran-lib="-L/c/msys64/mingw64/lib/gcc/x86_64-w64-mingw32/10.2.0/ -lgfortran" \
+	--with-mpi-libdir="-Wl,-L${MSMPI_ROOT}/lib" \
+	--with-mpi-libflags="-Wl,-lmsmpi" \
+	--with-fortran-lib="-Wl,-L/c/msys64/mingw64/lib/gcc/x86_64-w64-mingw32/13.2.0 -Wl,-lgfortran" \
 	--with-metis-dir="${ISSM_DIR}/externalpackages/petsc/install" \
 	--with-parmetis-dir="${ISSM_DIR}/externalpackages/petsc/install" \
@@ -28,5 +28,5 @@
 #-------------------#
 
-#List of external pakages to be installed and their installation scripts
+#List of external packages to be installed and their installation scripts
 EXTERNALPACKAGES="
 	msmpi		install.sh
Index: /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-binaries-matlab
===================================================================
--- /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-binaries-matlab	(revision 28013)
+++ /issm/trunk/jenkins/ross-win-msys2-mingw-msmpi-binaries-matlab	(revision 28013)
@@ -0,0 +1,65 @@
+MATLAB_PATH=$(cygpath -u $(cygpath -ms "/c/Program Files/MATLAB/R2019b"))
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-vendor="win-msys2" \
+	--with-matlab-dir="${MATLAB_PATH}" \
+	--with-fortran-lib="-Wl,-L/c/msys64/mingw64/lib/gcc/x86_64-w64-mingw32/13.2.0 -Wl,-lgfortran" \
+	--with-mpi-include="${MSMPI_ROOT}/include" \
+	--with-mpi-libdir="-Wl,-L${MSMPI_ROOT}/lib" \
+	--with-mpi-libflags="-Wl,-lmsmpi" \
+	--with-metis-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-parmetis-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-blas-lapack-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-scalapack-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-mumps-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-petsc-dir="${ISSM_DIR}/externalpackages/petsc/install" \
+	--with-triangle-dir="${ISSM_DIR}/externalpackages/triangle/install" \
+	--with-chaco-dir="${ISSM_DIR}/externalpackages/chaco/install" \
+	--with-m1qn3-dir="${ISSM_DIR}/externalpackages/m1qn3/install" \
+	--with-semic-dir="${ISSM_DIR}/externalpackages/semic/install" \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+#List of external packages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	msmpi		install-static.sh
+	petsc		install-3.14-win-msys2-mingw-msmpi-static.sh
+	triangle	install-win-msys2-mingw-static.sh
+	chaco		install-win-msys2-mingw.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/windows
===================================================================
--- /issm/trunk/jenkins/windows	(revision 28012)
+++ 	(revision )
@@ -1,66 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/cygdrive/c/Programs/MATLAB/R2019a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--with-vendor=MSVC-Win64  \
-	--with-cxxoptflags='-fp:strict' \
-	--disable-static \
-	--enable-standalone-libraries \
-	--with-fortran=no  \
-	--without-Sealevelchange \
-	--without-Love \
-	--without-kriging \
-	--without-kml \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir="$ISSM_DIR/externalpackages/triangle/install" \
-	--with-petsc-dir="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-metis-dir=$ISSM_DIR/externalpackages/metis/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install/lib/  \
-	--with-mpi-libdir="$ISSM_DIR/externalpackages/petsc/install/lib" \
-	--with-mpi-libflags="-Wl,libpetsc.lib" \
-	--with-mpi-include="$ISSM_DIR/externalpackages/petsc/install/include/petsc/mpiuni" \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="
-	autotools   install-win.sh
-	petsc       install-3.6-win10.sh
-	metis       install-4.0-win10.sh
-	triangle    install-windows-static.sh
-	shell2junit install.sh
-"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=8
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=8
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-
-#In Windows, we don't yet have MUMPS, can't do full stokes, so exclude all FS runs. Also exclude all runs with Dakota. And exclude higher order runs that have penalties. And 800+ tests because we don't want to have --with-development since we do the binaries with this version
-MATLAB_NROPTIONS="'exclude',[104,119,124,125,126,204,205,211,215,218,220,221,234,235,243,244,250,251,274,280,285,290,291,308,322,331,340,341,404,405,409,412,413,414,417,418,420,421,422,430,435,440,441,442,444,455,460,463,464,465,503,507,510,511,513,514,612,701,702,703,801,802,803,804,805,IdFromString('Dakota')]"
-PYTHON_NROPTIONS="--exclude_name 'Dakota'"
Index: sm/trunk/jenkins/windows-par
===================================================================
--- /issm/trunk/jenkins/windows-par	(revision 28012)
+++ 	(revision )
@@ -1,62 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/cygdrive/c/Programs/MATLAB/R2015a/"
-
-#ISSM CONFIGURATION 
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--with-vendor=MSVC-Win64  \
-   --disable-static \
-	--enable-standalone-libraries \
-	--with-fortran=no  \
-	--without-Sealevelchange \
-	--without-kriging \
-	--without-kml \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir="$ISSM_DIR/externalpackages/triangle/install" \
-	--with-petsc-dir="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-metis-dir=$ISSM_DIR/externalpackages/metis/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install/lib/  \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-mpi-include="/cygdrive/c/Programs/MPICH2/include" \
-	--with-mpi-libflags="-Wl,mpi.lib -Wl,/LIBPATH:C:/Programs/MPICH2/lib" \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools   install-win.sh
-						petsc       install-dev-win10-par.sh
-						metis       install-4.0-win7.sh
-						triangle    install-win7.sh        
-						shell2junit install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=8
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-
-#In Windows, we don't yet have MUMPS, can't do full stokes, so exclude all FS runs. Also exclude all runs with Dakota. And exclude higher order runs that have penalties. And 800+ tests because we don't want to have --with-development since we do the binaries with this version
-MATLAB_NROPTIONS="'exclude',[104, 124, 204, 211, 215, 220, 221, 285, 290, 308, 322, 404, 421, 422, 503, 507, 510, 511, 513, 701, 702, 703, 218, 234, 235, 412, 413, 414, 417, 418, 420, 205, 274, 331, 405, 409, 455, 612, 514, 435, 280,801,802,803,804,805,291,340,341]"
-PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/windows_static
===================================================================
--- /issm/trunk/jenkins/windows_static	(revision 28012)
+++ 	(revision )
@@ -1,65 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/cygdrive/c/Programs/MATLAB/R2015a/"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--disable-static \
-	--enable-standalone-executables \
-	--enable-standalone-libraries \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-vendor=MSVC-Win64 \
-	--with-fortran=no \
-	--without-Sealevelchange \
-	--without-Love \
-	--without-kriging \
-	--without-kml \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install/lib  \
-	--with-mpi-libdir=$ISSM_DIR/externalpackages/petsc/install/lib \
-	--with-mpi-libflags="-Wl,libpetsc.lib" \
-	--with-metis-dir=$ISSM_DIR/externalpackages/metis/install \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-math77-dir=$ISSM_DIR/externalpackages/math77/install \
-	--with-mpi-include="$ISSM_DIR/externalpackages/petsc/install/include/petsc/mpiuni" \
-	--with-cxxoptflags="-fp:strict"'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools   install-win.sh
-					petsc       install-3.6-win10.sh
-					metis       install-4.0-win10.sh
-					triangle    install-win10.sh
-					math77		install.sh
-					gmt			install-win64-precompiled.sh
-					gmsh 		install-win64-precompiled.sh
-					shell2junit install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=8
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=8
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/windows_test
===================================================================
--- /issm/trunk/jenkins/windows_test	(revision 28012)
+++ 	(revision )
@@ -1,34 +1,0 @@
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/cygdrive/c/Program\ Files/MATLAB/R2014b/"
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=4
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-
-#In Windows, we don't yet have MUMPS, can't do full stokes, so exclude all FS runs. Also exclude all runs with Dakota. And exclude higher order runs that have penalties. And 800+ tests because we don't want to have --with-development since we do the binaries with this version
-MATLAB_NROPTIONS="'exclude',[104, 124, 204, 211, 215, 220, 221, 285, 290, 308, 322, 404, 421, 422, 503, 507, 510, 511, 513, 701, 702, 703, 218, 234, 235, 412, 413, 414, 417, 418, 420, 205, 274, 331, 405, 409, 455, 612, 514, 435, 280, 801, 802, 803, 804, 805, 291, 340, 341]"
-PYTHON_NROPTIONS=""
Index: /issm/trunk/m4/analyses.m4
===================================================================
--- /issm/trunk/m4/analyses.m4	(revision 28012)
+++ /issm/trunk/m4/analyses.m4	(revision 28013)
@@ -374,4 +374,18 @@
 AC_MSG_RESULT($HAVE_HYDROLOGYSHREVE)
 dnl }}}
+dnl with-HydrologyArmapw{{{
+AC_ARG_WITH([HydrologyArmapw],
+	AS_HELP_STRING([--with-HydrologyArmapw = YES], [compile with HydrologyArmapw capabilities (default is yes)]),
+	[HYDROLOGYARMAPW=$withval],[HYDROLOGYARMAPW=yes])
+AC_MSG_CHECKING(for HydrologyArmapw capability compilation)
+
+HAVE_HYDROLOGYARMAPW=no 
+if test "x$HYDROLOGYARMAPW" = "xyes"; then
+	HAVE_HYDROLOGYARMAPW=yes
+	AC_DEFINE([_HAVE_HYDROLOGYARMAPW_],[1],[with HydrologyArmapw capability])
+fi
+AM_CONDITIONAL([HYDROLOGYARMAPW], [test x$HAVE_HYDROLOGYARMAPW = xyes])
+AC_MSG_RESULT($HAVE_HYDROLOGYARMAPW)
+dnl }}}
 dnl with-L2ProjectionBase{{{
 AC_ARG_WITH([L2ProjectionBase],
Index: /issm/trunk/m4/issm_options.m4
===================================================================
--- /issm/trunk/m4/issm_options.m4	(revision 28012)
+++ /issm/trunk/m4/issm_options.m4	(revision 28013)
@@ -200,4 +200,14 @@
 			export CFLAGS="-Wno-deprecated-register -Wno-return-type"
 			export CXXFLAGS="-Wno-deprecated-register -Wno-return-type"
+			dnl NOTE: Commenting out the following, for now, as ISSM seems to 
+			dnl 	  compile and run fine, but certain errors (e.g. file not 
+			dnl 	  found) were not bubbling up, and instead causing MATLAB 
+			dnl 	  to crash.
+			dnl
+# 			if test "${LDFLAGS}" == ""; then
+# 				export LDFLAGS="-Wl,-no_compact_unwind"
+# 			else
+# 				export LDFLAGS="${LDFLAGS} -Wl,-no_compact_unwind"
+# 			fi
 		;;
 		*)
@@ -327,5 +337,5 @@
 			export CXXFLAGS="-D_MSYS2_"
 			export LDFLAGS="${LDFLAGS} -no-undefined"
-			export OSLIBS="-Wl,-L/mingw64/lib/gcc/x86_64-w64-mingw32/10.2.0 -Wl,-L/mingw64/x86_64-w64-mingw32/lib -Wl,-lstdc++ -Wl,-lgfortran -Wl,-lmingw32 -Wl,-lgcc_s -Wl,-lmoldname -Wl,-lmingwex -Wl,-lmsvcrt -Wl,-lm -Wl,-lpthread -Wl,-lshell32 -Wl,-luser32 -Wl,-lgdi32 -Wl,-luser32 -Wl,-ladvapi32 -Wl,-lkernel32 -Wl,-lquadmath -Wl,-lstdc++ -Wl,-lgcc"
+			export OSLIBS="-Wl,-L/c/msys64/mingw64/lib -Wl,-lstdc++ -Wl,-lmingw32 -Wl,-lgcc_s -Wl,-lmoldname -Wl,-lmingwex -Wl,-lmsvcrt -Wl,-lm -Wl,-lpthread -Wl,-lshell32 -Wl,-luser32 -Wl,-lgdi32 -Wl,-luser32 -Wl,-ladvapi32 -Wl,-lkernel32 -Wl,-lgcc"
 			IS_MSYS2=yes
 		else
@@ -384,21 +394,4 @@
 		dnl
 		case "${host_os}" in
-			*cygwin*)
-				if test "${VENDOR}" == "intel-win7-32"; then
-					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-					MEXLINKFLAGS="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win32/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
-					MEXEXT=`${MATLAB_ROOT}/bin/mexext.bat`
-					MEXEXT=".${MEXEXT}"
-				elif test "${VENDOR}" == "intel-win7-64"; then
-					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-					MEXLINKFLAGS="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
-					MEXEXT=".mexw64"
-				elif test "${VENDOR}" == "MSVC-Win64" || test "${VENDOR}" == "MSVC-Win64-par"; then
-					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-					MEXLINKFLAGS="-Wl,/link -Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
-					MATLABINCL="-I`cygpath -m ${MATLAB_ROOT}/extern/include`"
-					MEXEXT=".mexw64"
-				fi
-			;;
 			*mingw*)
 				if test "${IS_MSYS2}" == "yes"; then
@@ -413,15 +406,4 @@
 					MEXLIB="-L${MEXLIB_DIR} -lmx -lmex -lmat -lm -lmwlapack -lmwblas"
 				fi
-			;;
-			*msys*)
-				dnl Value to set MEXEXT to can be found on Windows by running $MATLAB_ROOT/bin/mexext.bat
-				MEXEXT=".mexw64"
-				MATLABINCL="-I${MATLAB_ROOT}/extern/include"
-				MEXOPTFLAGS="-O2 -fwrapv -DNDEBUG -g"
-				MEXCFLAGS="-fexceptions -fno-omit-frame-pointer -m64 -DMATLAB_MEX_FILE"
-				MEXCXXFLAGS="-fexceptions -fno-omit-frame-pointer -std=c++11 -m64 -DMATLAB_MEX_FILE"
-				MEXLINKFLAGS="-m64 -Wl,--no-undefined -shared -static -Wl,${MATLAB_ROOT}/extern/lib/win64/mingw64/mexFunction.def"
-				MEXLIB_DIR="${MATLAB_ROOT}/extern/lib/win64/mingw64"
-				MEXLIB="-L${MEXLIB_DIR} -lmx -lmex -lmat -lm -lmwlapack -lmwblas"
 			;;
 			*)
@@ -508,8 +490,4 @@
 		TRIANGLEINCL=-I${TRIANGLE_ROOT}/include
 		case "${host_os}" in
-			*cygwin*)
-				TRIANGLEINCL="/I`cygpath -m ${TRIANGLE_ROOT}/include`"
-				TRIANGLELIB="-Wl,`cygpath -m ${TRIANGLE_ROOT}/lib/libtriangle.lib`"
-			;;
 			*darwin*)
 				if test "x${HAVE_JAVASCRIPT}" == "xyes"; then
@@ -533,13 +511,5 @@
 					TRIANGLELIB=${TRIANGLE_ROOT}/share/triangle.o
 				else
-					TRIANGLELIB="-L${TRIANGLE_ROOT}/lib -ltriangle"
-				fi
-			;;
-			*msys*)
-				if test "x${HAVE_JAVASCRIPT}" == "xyes"; then
-					dnl Link to the object file, not the library
-					TRIANGLELIB=${TRIANGLE_ROOT}/share/triangle.o
-				else
-					TRIANGLELIB="-L${TRIANGLE_ROOT}/lib -ltriangle"
+					TRIANGLELIB="-Wl,-L${TRIANGLE_ROOT}/lib -Wl,-ltriangle"
 				fi
 			;;
@@ -651,16 +621,4 @@
 		dnl
 		case "${host_os}" in
-			*cygwin*)
-				if test "${DAKOTA_VERSION}" == "5.1" || test "${DAKOTA_VERSION}" == "5.2"; then
-					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem"
-				elif test "${DAKOTA_VERSION}" == "6.1" || test "${DAKOTA_VERSION}" == "6.2"; then
-					DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_COLINY -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_JEGA -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -llhs_mods -lmoga -loptpp -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lcport -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -ltinyxml -lutilities -lsparsegrid -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [enabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling parallel MPI])
-				else
-					AC_MSG_ERROR([Dakota version not found or version (${DAKOTA_VERSION}) not supported!]);
-				fi
-			;;
 			*darwin*)
 				if test "${DAKOTA_VERSION}" == "5.1" || test "${DAKOTA_VERSION}" == "5.2"; then
@@ -842,9 +800,6 @@
 		AC_MSG_RESULT([found])
 
-		PYTHONEXT=.so
+		PYWRAPPEREXT=.so
 		case "${host_os}" in
-			*cygwin*)
-				PYTHONLINK="-shared"
-			;;
 			*darwin*)
 				PYTHONLINK="-dynamiclib"
@@ -856,12 +811,9 @@
 				PYTHONLINK="-shared"
 			;;
-			*msys*)
-				PYTHONLINK="-shared"
-			;;
 		esac
 		AC_DEFINE([_HAVE_PYTHON_], [1], [with Python in ISSM src])
 		AC_SUBST([PYTHONINCL])
 		AC_SUBST([PYTHONLIB])
-		PYTHONWRAPPEREXT=${PYTHONEXT}
+		PYTHONWRAPPEREXT=${PYWRAPPEREXT}
 		AC_SUBST([PYTHONWRAPPEREXT])
 		AC_SUBST([PYTHONLINK])
@@ -1110,7 +1062,4 @@
 	if test "x${HAVE_ATLAS}" == "xyes"; then
 		case "${host_os}" in
-			*cygwin*)
-				ATLASLIB="-L`cygpath -m ${ATLAS_ROOT}` -Wl,libatlas.lib  -Wl,libcblas.lib"
-			;;
 			*darwin*)
 				ATLASLIB="-L${ATLAS_ROOT}/lib -lcblas -latlas -lm"
@@ -1122,7 +1071,4 @@
 				ATLASLIB="-L${ATLAS_ROOT}/lib -lcblas -latlas -lm"
 			;;
-			*msys*)
-				ATLASLIB="-L${ATLAS_ROOT}/lib -lcblas -latlas -lm"
-			;;
 		esac
 		AC_DEFINE([_HAVE_ATLAS_], [1], [with ATLAS in ISSM src])
@@ -1289,7 +1235,4 @@
 	if test "x${HAVE_HDF5}" == "xyes"; then
 		case "${host_os}" in
-			*cygwin*)
-				HDF5LIB="-L`cygpath -m ${HDF5_ROOT}` -Wl,libhdf5.lib  -Wl,libhdf5_hl.lib"
-			;;
 			*darwin*)
 				HDF5LIB="-L${HDF5_ROOT}/lib -lhdf5 -lhdf5_hl"
@@ -1299,7 +1242,4 @@
 			;;
 			*mingw*)
-				HDF5LIB="-L${HDF5_ROOT}/lib -lhdf5 -lhdf5_hl"
-			;;
-			*msys*)
 				HDF5LIB="-L${HDF5_ROOT}/lib -lhdf5 -lhdf5_hl"
 			;;
@@ -1373,12 +1313,4 @@
 
 		case "${host_os}" in
-			*cygwin*)
-				if test ${PETSC_MAJOR} -lt 3; then
-					PETSCLIB=-Wl,/LIBPATH:`cygpath -w ${PETSC_ROOT}/lib` -Wl,libpetscksp.lib  -Wl,libpetscdm.lib -Wl,libpetscmat.lib -Wl,libpetscvec.lib -Wl,libpetscsnes.lib  -Wl,libpetscts.lib -Wl,libmpiuni.lib -Wl,libpetsc.lib
-				else
-					PETSCLIB="/link -Wl,/LIBPATH:`cygpath -m ${PETSC_ROOT}/lib` -Wl,libpetsc.lib"
-					PETSCINCL="/I`cygpath -m ${PETSC_ROOT}/include`"
-				fi
-			;;
 			*darwin*)
 				if test ${PETSC_MAJOR} -lt 3; then
@@ -1408,8 +1340,5 @@
 			;;
 			*mingw*)
-				PETSCLIB="-Wl,${PETSC_ROOT}/lib/libpetsc.a"
-			;;
-			*msys*)
-				PETSCLIB="${PETSC_ROOT}/lib -lpetsc"
+				PETSCLIB="-Wl,-L${PETSC_ROOT}/lib -Wl,-lpetsc"
 			;;
 		esac
@@ -1464,5 +1393,5 @@
 			MPILIB="${MPI_LIBFLAGS}"
 		else
-			MPILIB="-L${MPI_LIBDIR} ${MPI_LIBFLAGS}"
+			MPILIB="${MPI_LIBDIR} ${MPI_LIBFLAGS}"
 		fi
 
@@ -1547,8 +1476,4 @@
 			METISINCL="-I${METIS_ROOT}/Lib"
 			case "${host_os}" in
-				*cygwin*)
-					METISINCL="/I`cygpath -m ${METIS_ROOT}/Lib`"
-					METISLIB="-Wl,/link -Wl,/LIBPATH:`cygpath -m ${METIS_ROOT}` -Wl,libmetis.lib"
-				;;
 				*darwin*)
 					METISLIB="-L${METIS_ROOT} -lmetis"
@@ -1558,8 +1483,5 @@
 				;;
 				*mingw*)
-					METISLIB="-Wl,${METIS_ROOT}/lib/libmetis.a"
-				;;
-				*msys*)
-					METISLIB="-L${METIS_ROOT} -lmetis"
+					METISLIB="-Wl,-L${METIS_ROOT}/lib -Wl,-lmetis"
 				;;
 			esac
@@ -1567,7 +1489,4 @@
 			METISINCL="-I${METIS_ROOT}/include"
 			case "${host_os}" in
-				*cygwin*)
-					METISLIB="-L${METIS_ROOT} libmetis.lib"
-				;;
 				*darwin*)
 					METISLIB="-L${METIS_ROOT}/lib -lmetis"
@@ -1577,8 +1496,5 @@
 				;;
 				*mingw*)
-					METISLIB="-Wl,${METIS_ROOT}/lib/libmetis.a"
-				;;
-				*msys*)
-					METISLIB="-L${METIS_ROOT}/lib -lmetis"
+					METISLIB="-Wl,-L${METIS_ROOT}/lib -Wl,-lmetis"
 				;;
 			esac
@@ -1624,7 +1540,4 @@
 			PARMETISINCL="-I${PARMETIS_ROOT}/include"
 			case "${host_os}" in
-				*cygwin*)
-					PARMETISLIB="-L${PARMETIS_ROOT}/lib -lparmetis"
-				;;
 				*darwin*)
 					PARMETISLIB="-L${PARMETIS_ROOT}/lib -lparmetis"
@@ -1634,8 +1547,5 @@
 				;;
 				*mingw*)
-					PARMETISLIB="-Wl,${PARMETIS_ROOT}/lib/libparmetis.a"
-				;;
-				*msys*)
-					PARMETISLIB="-L${PARMETIS_ROOT}/lib -lparmetis"
+					PARMETISLIB="-Wl,-L${PARMETIS_ROOT}/lib -Wl,-lparmetis"
 				;;
 			esac
@@ -1706,5 +1616,5 @@
 	if test "x${HAVE_M1QN3}" == "xyes"; then
 		if test "${IS_MSYS2}" == "yes"; then
-			M1QN3LIB="-Wl,${M1QN3_ROOT}/libm1qn3.a -Wl,${M1QN3_ROOT}/libddot.a"
+			M1QN3LIB="-Wl,-L${M1QN3_ROOT} -Wl,-lm1qn3 -Wl,-lddot"
 		else
 			M1QN3LIB="${M1QN3_ROOT}/libm1qn3.a ${M1QN3_ROOT}/libddot.a"
@@ -1826,5 +1736,5 @@
 		HAVE_SCALAPACK=yes
 		if test "${VENDOR}" == "win-msys2"; then
-			SCALAPACKLIB="-Wl,${SCALAPACK_ROOT}/lib/libscalapack.a"
+			SCALAPACKLIB="-Wl,-L${SCALAPACK_ROOT}/lib -Wl,-lscalapack"
 		else
 			SCALAPACKLIB="-L${SCALAPACK_ROOT}/lib -lscalapack"
@@ -1879,7 +1789,4 @@
 	if test "x${HAVE_BLASLAPACK}" == "xyes"; then
 		case "${host_os}" in
-			*cygwin*)
-				BLASLAPACKLIB="-L`cygpath -m ${BLASLAPACK_ROOT}` -Wl,libf2cblas.lib  -Wl,libf2clapack.lib"
-			;;
 			*darwin*)
 				BLASLAPACKLIB="-L${BLASLAPACK_ROOT}/lib"
@@ -1908,5 +1815,5 @@
 			*mingw*)
 				if test -d "${BLASLAPACK_ROOT}"; then
-					BLASLAPACKLIB="-L${BLASLAPACK_ROOT}/lib"
+					BLASLAPACKLIB="-Wl,-L${BLASLAPACK_ROOT}/lib"
 					if ls ${BLASLAPACK_ROOT}/lib/libopenblas.* 1> /dev/null 2>&1; then
 						BLASLAPACKLIB+=" -lopenblas"
@@ -1914,25 +1821,10 @@
 						BLASLAPACKLIB+=" -lf2clapack -lf2cblas"
 					elif ls ${BLASLAPACK_ROOT}/lib/libflapack.* 1> /dev/null 2>&1; then
-						BLASLAPACKLIB="-Wl,${BLASLAPACK_ROOT}/lib/libflapack.a -Wl,${BLASLAPACK_ROOT}/lib/libfblas.a"
+						BLASLAPACKLIB="-Wl,-L${BLASLAPACK_ROOT}/lib -Wl,-lflapack -Wl,-lfblas"
 					else
-						BLASLAPACKLIB+=" -llapack -lblas"
+						BLASLAPACKLIB+=" -Wl,-llapack -Wl,-lblas"
 					fi
 				else
-					BLASLAPACKLIB="-Wl,${LAPACK_ROOT}/lib/liblapack.a -Wl,${BLAS_ROOT}/lib/libblas.a"
-				fi
-			;;
-			*msys*)
-				if test -d "${BLASLAPACK_ROOT}"; then
-					if ls ${BLASLAPACK_ROOT}/lib/libopenblas.* 1> /dev/null 2>&1; then
-						BLASLAPACKLIB=" -lopenblas"
-					elif ls ${BLASLAPACK_ROOT}/lib/libf2clapack.* 1> /dev/null 2>&1; then
-						BLASLAPACKLIB=" -lf2clapack -lf2cblas"
-					elif ls ${BLASLAPACK_ROOT}/lib/libflapack.* 1> /dev/null 2>&1; then
-						BLASLAPACKLIB+=" -lflapack -lfblas"
-					else
-						BLASLAPACKLIB=" -llapack -lblas"
-					fi
-				else
-					BLASLAPACKLIB="-L${LAPACK_ROOT}/lib -llapack -L${BLAS_ROOT}/lib -lblas"
+					BLASLAPACKLIB="${LAPACK_ROOT}/lib/liblapack.a ${BLAS_ROOT}/lib/libblas.a"
 				fi
 			;;
@@ -2046,5 +1938,5 @@
 		if test "x${MUMPS_ROOT}" == "x${PETSC_ROOT}"; then
 			if test "${VENDOR}" == "win-msys2"; then
-				MUMPSLIB="-Wl,${MUMPS_ROOT}/lib/libcmumps.a -Wl,${MUMPS_ROOT}/lib/libdmumps.a -Wl,${MUMPS_ROOT}/lib/libsmumps.a -Wl,${MUMPS_ROOT}/lib/libzmumps.a -Wl,${MUMPS_ROOT}/lib/libmumps_common.a -Wl,${MUMPS_ROOT}/lib/libpord.a"
+				MUMPSLIB="-Wl,-L${MUMPS_ROOT}/lib -Wl,-lcmumps -Wl,-ldmumps -Wl,-lsmumps -Wl,-lzmumps -Wl,-lmumps_common -Wl,-lpord"
 			else
 				MUMPSLIB="-L${MUMPS_ROOT}/lib -ldmumps -lcmumps -lmumps_common -lpord -lparmetis -lzmumps -lmetis"
@@ -2428,4 +2320,5 @@
 	AC_MSG_RESULT([${FORTRAN}])
 
+	IS_FORTRANDIR_A_DIR=no
 	if test "x${FORTRAN}" == "xyes"; then
 		dnl Fortran library
@@ -2438,8 +2331,13 @@
 		)
 		if test -n "${FORTRAN_LIB}"; then
-			FORTRAN_DIR=$(echo ${FORTRAN_LIB} | sed -e "s/-L//g" | awk '{print $[1]}')
+			FORTRAN_DIR=$(echo ${FORTRAN_LIB} | sed -e "s/-Wl,//g" | sed -e "s/-L//g" | awk '{print $[1]}')
 			if test -d "${FORTRAN_DIR}" || test -f "${FORTRAN_DIR}"; then
+				FORTRANDIR="${FORTRAN_DIR}"
+				if test -n "${FORTRAN_DIR}"; then
+					IS_FORTRANDIR_A_DIR=yes
+				fi
 				FORTRANLIB="${FORTRAN_LIB}"
 				AC_DEFINE([_HAVE_FORTRAN_], [1], [with Fortran library in ISSM src])
+				AC_SUBST([FORTRANDIR])
 				AC_SUBST([FORTRANLIB])
 			else
@@ -2455,4 +2353,5 @@
 		AC_MSG_RESULT([done])
 	fi
+	AM_CONDITIONAL([HAVE_FORTRANDIR], [test "x${IS_FORTRANDIR_A_DIR}" == "xyes"])
 	dnl }}}
 	dnl Xlib (graphics library){{{
@@ -2779,7 +2678,4 @@
 		MULTITHREADINGLIB="-lpthread -lrt"
 		case "${host_os}" in
-			*cygwin*)
-				MULTITHREADINGLIB="-lpthread -lrt"
-			;;
 			*darwin*)
 				MULTITHREADINGLIB="-lpthread"
@@ -2791,7 +2687,4 @@
 				MULTITHREADINGLIB=""
 			;;
-			*msys*)
-				MULTITHREADINGLIB=""
-			;;
 		esac
 		AC_DEFINE([_MULTITHREADING_], [1], [with multithreading enabled])
Index: sm/trunk/packagers/linux/complete-issm-linux-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/linux/complete-issm-linux-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,94 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Wrapper script to build, package, and transfer to ISSM Web site ISSM 
-# distributable package for Linux with Python 2 API.
-#
-# Normally, we would put this directly into the project configuration under 
-# 'Build' -> 'Execute shell', but because it is a bit more involved, it is a 
-# good idea to version it.
-#
-# When no failures/errors occur, performs the following:
-# - Builds ISSM according to configuration.
-# - Packages executables and libraries.
-# - Runs test suite against package.
-# - Transmits package to ISSM Web site for distribution.
-#
-# Options:
-# -b/--skipbuild		Skip ISSM compilation.
-# -s/--skiptests		Skip ISSM compilation and testing during packaging 
-#						step. Use if packaging fails for some reason but build 
-#						is valid.
-# -t/--transferonly		Transfer package to ISSM Web site only. Use if transfer 
-#						fails for some reason to skip building, packaging, and 
-#						signing.
-#
-# NOTE:
-# - Use only *one* of the above options at a time, and make sure it is removed 
-#	again after a single run.
-# - Builds will fail when any of the above options are used on a clean 
-#	workspace. For example, if 'Source Code Management' -> 'Check-out Strategy' 
-#	select menu is set to "Always check out a fresh copy".
-################################################################################
-
-## Constants
-#
-PKG="ISSM-Linux-Python-2" # Name of directory to copy distributable files to
-
-COMPRESSED_PKG="${PKG}.tar.gz"
-
-## Environment
-#
-export COMPRESSED_PKG
-export PKG
-
-## Parse options
-#
-if [ $# -gt 1 ]; then
-	echo "Can use only one option at a time"
-	exit 1
-fi
-
-# NOTE: We could do this with binary switching (i.e. 0011 to sign and transfer, 
-#		but the following is self-documenting).
-#
-build=1
-package=1
-transfer=1
-
-if [ $# -eq 1 ]; then
-	case $1 in
-		-b|--skipbuild)		build=0;				shift	;;
-		-s|--skiptests)		build=0;						;;
-		-t|--transferonly)	build=0;	package=0;			;;
-		*) echo "Unknown parameter passed: $1"; exit 1 		;;
-	esac
-fi
-
-# Build
-if [ ${build} -eq 1 ]; then
-	./jenkins/jenkins.sh ./jenkins/ross-debian_linux-binaries-python-2
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
-# Package
-if [ ${package} -eq 1 ]; then
-	./packagers/linux/package-issm-linux-binaries-python-2.sh $1
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
-# Transfer distributable package to ISSM Web site
-if [ ${transfer} -eq 1 ]; then
-	./packagers/linux/transfer-issm-linux-binaries.sh
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
Index: /issm/trunk/packagers/linux/package-issm-linux-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/linux/package-issm-linux-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/linux/package-issm-linux-binaries-matlab.sh	(revision 28013)
@@ -30,5 +30,5 @@
 LIBGFORTRAN="/usr/lib/x86_64-linux-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
 LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
-MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2021,2051,2052,2053,2084,2085,2090,2101,2424,2425,3001:3200,3201,3202,3300,3480,3481,4001,4002,4003]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,129,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2012,2013,2020,2021,2051,2052,2053,2084,2085,2090,2091,2092,2101,2424,2425,3001:3300,3480,3481,4001:4100]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
 MATLAB_PATH="/usr/local/MATLAB/R2019b"
 
@@ -140,5 +140,5 @@
 
 	# Check that MATLAB did not exit in error
-	matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in matlab_run|Illegal use of reserved keyword|Invalid MEX-file" matlab.log`
+	matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab.log`
 
 	if [ ${matlabExitedInError} -ne 0 ]; then
Index: sm/trunk/packagers/linux/package-issm-linux-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/linux/package-issm-linux-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,185 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Packages and tests ISSM distributable package for Linux with Python 2 API.
-#
-# Options:
-# -s/--skiptests		Skip testing during packaging Use if packaging fails 
-#						for some reason but build is valid.
-#
-# NOTE:
-# - Assumes that the following constants are defined,
-#
-#		COMPRESSED_PKG
-#		ISSM_DIR
-#		PKG
-#
-# See also:
-# - packagers/linux/complete-issm-linux-binaries-python-2.sh
-################################################################################
-
-# Expand aliases within the context of this script
-shopt -s expand_aliases
-
-## Override certain aliases
-#
-alias grep=$(which grep)
-
-## Constants
-#
-LIBGFORTRAN="/usr/lib/x86_64-linux-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
-LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
-
-## Environment
-#
-export PATH="${ISSM_DIR}/bin:$(getconf PATH)" # Ensure that we pick up binaries from 'bin' directory rather than 'externalpackages'
-
-## Parse options
-#
-if [ $# -gt 1 ]; then
-	echo "Can use only one option at a time"
-	exit 1
-fi
-
-skip_tests=0
-
-if [ $# -eq 1 ]; then
-	case $1 in
-		-s|--skiptests) skip_tests=1;					;;
-		*) echo "Unknown parameter passed: $1"; exit 1	;;
-	esac
-fi
-
-# Clean up from previous packaging
-echo "Cleaning up existing assets"
-cd ${ISSM_DIR}
-rm -rf ${PKG} ${COMPRESSED_PKG}
-mkdir ${PKG}
-
-# Add required binaries and libraries to package and modify them where needed
-cd ${ISSM_DIR}/bin
-
-echo "Modify generic"
-cat generic_static.py | sed -e "s/generic_static/generic/g" > generic.py
-
-echo "Moving MPICH binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
-	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
-	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
-elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
-	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
-	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
-else
-	echo "MPICH not found"
-	exit 1
-fi
-
-echo "Moving GDAL binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gdal/install/bin/gdal-config ]; then
-	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdalsrsinfo .
-	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdaltransform .
-else
-	echo "GDAL not found"
-	exit 1
-fi
-
-echo "Moving GMT binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt-config ]; then
-	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt .
-	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmtselect .
-else
-	echo "GMT not found"
-	exit 1
-fi
-
-echo "Moving Gmsh binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh ]; then
-	cp ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh .
-else
-	echo "Gmsh not found"
-	exit 1
-fi
-
-echo "Moving libgfortran to lib/"
-cp ${LIBGFORTRAN} ${LIBGFORTRAN_DIST} 2> /dev/null
-
-echo "Moving GSHHG assets to share/"
-if [ -d ${ISSM_DIR}/externalpackages/gmt/install/share/coast ]; then
-	mkdir ${ISSM_DIR}/share 2> /dev/null
-	cp -R ${ISSM_DIR}/externalpackages/gmt/install/share/coast ${ISSM_DIR}/share
-else
-	echo "GSHHG not found"
-	exit 1
-fi
-
-echo "Moving PROJ assets to share/"
-if [ -d ${ISSM_DIR}/externalpackages/proj/install/share/proj ]; then
-	mkdir ${ISSM_DIR}/share 2> /dev/null
-	cp -R ${ISSM_DIR}/externalpackages/proj/install/share/proj ${ISSM_DIR}/share
-else
-	echo "PROJ not found"
-	exit 1
-fi
-
-# Run tests
-if [ ${skip_tests} -eq 0 ]; then
-	echo "Running tests"
-	cd ${ISSM_DIR}/test/NightlyRun
-	rm python.log 2> /dev/null
-
-	# Set Python environment
-	export PYTHONPATH="${ISSM_DIR}/src/m/dev"
-	export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
-	export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
-
-	# Run tests, redirecting output to logfile and suppressing output to console
-	./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
-
-	# Check that Python did not exit in error
-	pythonExitCode=`echo $?`
-	pythonExitedInError=`grep -c -E "Error|Standard exception|Traceback|bad interpreter" python.log`
-
-	if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
-		echo "----------Python exited in error!----------"
-		cat python.log
-		echo "-----------End of python.log-----------"
-
-		# Clean up execution directory
-		rm -rf ${ISSM_DIR}/execution/*
-
-		exit 1
-	fi
-
-	# Check that all tests passed
-	sed -i "/FAILED TO establish the default connection to the WindowServer/d" python.log # First, need to remove WindowServer error message
-	numTestsFailed=`grep -c -E "FAILED|ERROR" python.log`
-
-	if [[ ${numTestsFailed} -ne 0 ]]; then
-		echo "One or more tests FAILED"
-		cat python.log
-		exit 1
-	else
-		echo "All tests PASSED"
-	fi
-else
-	echo "Skipping tests"
-fi
-
-# Create package
-cd ${ISSM_DIR}
-svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
-echo "Copying assets to package: ${PKG}"
-cp -rf bin examples lib scripts share test ${PKG}
-mkdir ${PKG}/execution
-${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
-echo "Cleaning up unneeded/unwanted files"
-rm -f ${PKG}/bin/*.py # Remove all Python scripts
-rm -f ${PKG}/bin/generic_static.* # Remove static versions of generic cluster classes
-rm -f ${PKG}/lib/*.a # Remove static libraries from package
-rm -f ${PKG}/lib/*.la # Remove libtool libraries from package
-rm -rf ${PKG}/test/SandBox # Remove testing sandbox from package
-
-# Compress package
-echo "Compressing package"
-tar -czf ${COMPRESSED_PKG} ${PKG}
Index: /issm/trunk/packagers/linux/package-issm-linux-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/linux/package-issm-linux-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/linux/package-issm-linux-binaries-python-3.sh	(revision 28013)
@@ -30,5 +30,5 @@
 LIBGFORTRAN="/usr/lib/x86_64-linux-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
 LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+PYTHON_NROPTIONS="--benchmark all --exclude 125 126 129 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2012 2013 2020 2021 2051 2052 2053 2084 2085 2090 2091 2092 2101 2424 2425 3001:3300 3480 3481 4001:4100" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
 
 ## Environment
@@ -134,7 +134,4 @@
 	export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
 
-	# Ensure that runme.py uses Python 3 interpreter
-	sed -i "s|/usr/bin/env python|/usr/bin/python3|g" ./runme.py
-
 	# Run tests, redirecting output to logfile and suppressing output to console
 	./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
@@ -142,5 +139,5 @@
 	# Check that Python did not exit in error
 	pythonExitCode=`echo $?`
-	pythonExitedInError=`grep -c -E "Error|Standard exception|Traceback|bad interpreter" python.log`
+	pythonExitedInError=`grep -c -E "Error|No such file or directory|Permission denied|Standard exception|Traceback|bad interpreter|syntax error" python.log`
 
 	if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
@@ -176,7 +173,7 @@
 cp -rf bin examples lib scripts share test ${PKG}
 mkdir ${PKG}/execution
-${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
+# ${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
 echo "Cleaning up unneeded/unwanted files"
-rm -f ${PKG}/bin/*.py # Remove all Python scripts
+# rm -f ${PKG}/bin/*.py # Remove all Python scripts
 rm -f ${PKG}/bin/generic_static.* # Remove static versions of generic cluster classes
 rm -f ${PKG}/lib/*.a # Remove static libraries from package
Index: /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-matlab.sh	(revision 28013)
@@ -35,20 +35,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
+#
+#			svn: command not found
+#
+#		even though it is installed via Homebrew and available at the following 
+#		path.
+#
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -234,6 +227,7 @@
 
 		# Check status
-		STATUS=$(grep 'Status:' ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
+		STATUS=$(grep '"status": "Accepted"' ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} | wc -l)
+
+		if [[ ${STATUS} -gt 0 ]]; then
 			echo "Notarization successful!"
 			break
Index: sm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,256 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Commits ISSM distributable package for macOS with Python 2 API to repository 
-# for signing. This repository is polled by a project running on a JPL 
-# Cybersecurity Jenkins server and performs the actual signing and 
-# notarization.
-#
-# Options:
-# -r/--resign			Skip ISSM compilation and packaging. Use to retrigger 
-#						signing/notarization if it fails but build and package 
-#						are valid.
-# -u/--unlock			Remove lock file from signed package repository. Use if 
-#						build is aborted to allow for subsequent fresh build.
-#
-# NOTE:
-# - Assumes that the following constants are defined,
-#
-#		COMPRESSED_PKG
-#		ISSM_BINARIES_REPO_PASS
-#		ISSM_BINARIES_REPO_USER
-#		SIGNED_REPO_COPY
-#		SIGNED_REPO_URL
-#
-# See also:
-# - packagers/mac/complete-issm-mac-binaries-python-2.sh
-# - packagers/mac/sign-issm-mac-binaries-python-2.sh
-#
-# TODO:
-# - Generalize checkout_*_repo_copy and validate_*_repo_copy functions (e.g. 
-#	pass 'signed' or 'unsigned' as argument)
-################################################################################
-
-# Expand aliases within the context of this script
-shopt -s expand_aliases
-
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
-
-## Override certain other aliases
-#
-alias cp=$(which cp)
-alias grep=$(which grep)
-
-## Constants
-#
-MAX_SIGNING_CHECK_ATTEMPTS=30
-NOTARIZATION_LOGFILE="notarization.log"
-RETRIGGER_SIGNING_FILE="retrigger.txt"
-SIGNING_CHECK_PERIOD=60 # in seconds
-SIGNING_LOCK_FILE="signing.lock"
-UNSIGNED_REPO_COPY="./unsigned"
-UNSIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/python/2/unsigned"
-
-## Functions
-#
-checkout_signed_repo_copy(){
-	echo "Checking out copy of repository for signed packages"
-
-	# NOTE: Get empty copy because we do not want to have to check out package 
-	#		from previous signing.
-	#
-	svn checkout \
-		--trust-server-cert \
-		--non-interactive \
-		--depth empty \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		${SIGNED_REPO_URL} \
-		${SIGNED_REPO_COPY} > /dev/null 2>&1
-}
-checkout_unsigned_repo_copy(){
-	echo "Checking out copy of repository for unsigned packages"
-	svn checkout \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		${UNSIGNED_REPO_URL} \
-		${UNSIGNED_REPO_COPY} > /dev/null 2>&1
-}
-validate_signed_repo_copy(){
-	# Validate copy of repository for signed binaries (e.g. 
-	# 'Check-out Strategy' was set to 'Use 'svn update' as much as possible'; 
-	# initial checkout failed)
-	if [[ ! -d ${SIGNED_REPO_COPY} || ! -d ${SIGNED_REPO_COPY}/.svn ]]; then
-		rm -rf ${SIGNED_REPO_COPY}
-		checkout_signed_repo_copy
-	fi
-}
-validate_unsigned_repo_copy(){
-	# Validate copy of repository for unsigned binaries (e.g. 
-	# 'Check-out Strategy' was set to 'Use 'svn update' as much as possible'; 
-	# initial checkout failed)
-	if [[ ! -d ${UNSIGNED_REPO_COPY} || ! -d ${UNSIGNED_REPO_COPY}/.svn ]]; then
-		rm -rf ${UNSIGNED_REPO_COPY}
-		checkout_unsigned_repo_copy
-	fi
-}
-
-## Parse options
-#
-if [ $# -gt 1 ]; then
-	echo "Can use only one option at a time"
-	exit 1
-fi
-
-retrigger_signing=0
-unlock=0
-
-if [ $# -eq 1 ]; then
-	case $1 in
-		-r|--resign)	retrigger_signing=1;	;;
-		-u|--unlock)	unlock=1;				;;
-		*) echo "Unknown parameter passed: $1"; exit 1	;;
-	esac
-fi
-
-validate_signed_repo_copy
-
-if [ ${unlock} -eq 1 ]; then
-	# Remove signing lock file from signed package repository so that a new 
-	# build can run
-	echo "Removing lock file from repository for signed packages"
-	svn update \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} > /dev/null 2>&1
-	svn delete ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} > /dev/null 2>&1
-	svn commit \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		--message "DEL: Removing lock file after failed build" ${SIGNED_REPO_COPY} > /dev/null 2>&1
-	svn cleanup ${SIGNED_REPO_COPY} > /dev/null 2>&1
-
-	echo "Remove -u/--unlock option from configuration and run again"
-	exit 1
-fi
-
-# If lock file exists, a signing build is still in process by JPL Cybersecurity
-svn update \
-	--trust-server-cert \
-	--non-interactive \
-	--username ${ISSM_BINARIES_REPO_USER} \
-	--password ${ISSM_BINARIES_REPO_PASS} \
-	${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} > /dev/null 2>&1
-
-if [ -f ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} ]; then
-	echo "Previous signing job still in process by JPL Cybersecurity. Please try again later."
-	exit 1
-fi
-
-# Commit lock file to repository for signed packages
-echo "Committing lock file to repository for signed packages"
-touch ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE}
-svn add ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} > /dev/null 2>&1
-svn commit \
-	--trust-server-cert \
-	--non-interactive \
-	--username ${ISSM_BINARIES_REPO_USER} \
-	--password ${ISSM_BINARIES_REPO_PASS} \
-	--message "ADD: New lock file" ${SIGNED_REPO_COPY} > /dev/null 2>&1
-
-# Check out copy of repository for unsigned packages
-validate_unsigned_repo_copy
-
-if [ ${retrigger_signing} -eq 0 ]; then
-	# Commit new compressed package to repository for unsigned binaries
-	echo "Committing package to repository for unsigned packages"
-	cp ${COMPRESSED_PKG} ${UNSIGNED_REPO_COPY}
-	svn add ${UNSIGNED_REPO_COPY}/${COMPRESSED_PKG} > /dev/null 2>&1
-	svn commit \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		--message "CHG: New unsigned package" ${UNSIGNED_REPO_COPY} > /dev/null 2>&1
-else
-	# NOTE: If notarize_only == 1, we commit a dummy file as we do not want to 
-	#		have to commit the entire compressed package again simply to 
-	#		retrigger the signing build on the remote JPL Cybersecurity Jenkins 
-	#		server.
-	#
-	echo "Attempting to sign existing package again"
-	echo $(date +'%Y-%m-%d-%H-%M-%S') > ${UNSIGNED_REPO_COPY}/${RETRIGGER_SIGNING_FILE} # Write datetime stamp to file to ensure modification is made
-	svn add ${UNSIGNED_REPO_COPY}/${RETRIGGER_SIGNING_FILE} > /dev/null 2>&1
-	svn commit \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		--message "ADD: Retriggering signing with same package (previous attempt failed)" ${UNSIGNED_REPO_COPY} > /dev/null 2>&1
-fi
-
-# Check status of signing
-echo "Checking progress of signing..."
-SIGNING_CHECK_ATTEMPT=0
-while [ ${SIGNING_CHECK_ATTEMPT} -lt ${MAX_SIGNING_CHECK_ATTEMPTS} ]; do
-	echo "...in progress still; checking again in ${SIGNING_CHECK_PERIOD} seconds"
-	sleep ${SIGNING_CHECK_PERIOD}
-	svn update \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${ISSM_BINARIES_REPO_USER} \
-		--password ${ISSM_BINARIES_REPO_PASS} \
-		${SIGNED_REPO_COPY} > /dev/null 2>&1
-
-	if [ ! -f ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE} ]; then
-		# Retrieve notarization lock file
-		svn update \
-			--trust-server-cert \
-			--non-interactive \
-			--username ${ISSM_BINARIES_REPO_USER} \
-			--password ${ISSM_BINARIES_REPO_PASS} \
-			${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE}
-
-		# Check status
-		STATUS=$(grep 'Status:' ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
-			echo "Notarization successful!"
-			break
-		else
-			echo "Notarization failed!"
-			echo "----------------------- Contents of notarization logfile -----------------------"
-			cat ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE}
-			echo "--------------------------------------------------------------------------------"
-
-			exit 1
-		fi
-	else
-		((++SIGNING_CHECK_ATTEMPT))
-	fi
-done
-
-if [ ! -f ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} ]; then
-	echo "Signing timed out!"
-	exit 1
-fi
Index: /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/commit_for_signing-issm-mac-binaries-python-3.sh	(revision 28013)
@@ -35,20 +35,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
+#
+#			svn: command not found
+#
+#		even though it is installed via Homebrew and available at the following 
+#		path.
+#
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -234,6 +227,7 @@
 
 		# Check status
-		STATUS=$(grep 'Status:' ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
+		STATUS=$(grep '"status": "Accepted"' ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} | wc -l)
+
+		if [[ ${STATUS} -gt 0 ]]; then
 			echo "Notarization successful!"
 			break
Index: /issm/trunk/packagers/mac/complete-issm-mac-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/mac/complete-issm-mac-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/complete-issm-mac-binaries-matlab.sh	(revision 28013)
@@ -98,5 +98,5 @@
 # Build
 if [ ${build} -eq 1 ]; then
-	./jenkins/jenkins.sh ./jenkins/pine_island-mac-binaries-matlab
+	./jenkins/jenkins.sh ./jenkins/mac-intel-binaries-matlab
 
 	if [ $? -ne 0 ]; then
@@ -128,12 +128,21 @@
 fi
 
-# Transfer distributable package to ISSM Web site
-if [ ${transfer} -eq 1 ]; then
-	./packagers/mac/transfer-issm-mac-binaries.sh
+# NOTE: Because Mac build nodes are no longer directly connected to UCI 
+#		network and because remote access requires a VPN connection, we can 
+#		no longer transfer signed distributables via SSH. For now, there is 
+#		a cron job running every five minutes under user jenkins on 
+#		ross.ics.uci.edu that runs 
+#		/home/jenkins/bin/update-issm-mac-binaries.sh, which checks for 
+#		updated, signed distributables in the ISSM Binaries SVN repository
+#		and if they are available, copies them to the public directory.
+#
 
-	if [ $? -ne 0 ]; then
-		echo "Failure while transferring package to ISSM server"
-		exit 1
-	fi
-fi
+# # Transfer distributable package to ISSM Web site
+# if [ ${transfer} -eq 1 ]; then
+# 	./packagers/mac/transfer-issm-mac-binaries.sh
 
+# 	if [ $? -ne 0 ]; then 
+# 		exit 1
+# 	fi
+# fi
+
Index: sm/trunk/packagers/mac/complete-issm-mac-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/mac/complete-issm-mac-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,135 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Wrapper script to build, package, send for signing, and transfer to ISSM Web 
-# site ISSM distributable package for macOS with Python 2 API.
-#
-# Normally, we would put this directly into the project configuration under 
-# 'Build' -> 'Execute shell', but because it is a bit more involved, it is a 
-# good idea to version it.
-#
-# When no failures/errors occur, performs the following:
-# - Builds ISSM according to configuration.
-# - Packages executables and libraries.
-# - Runs test suite against package.
-# - Commits compressed package to repository to be signed by JPL Cybersecurity.
-# - Retrieves signed package and transmits it to ISSM Web site for 
-#	distribution.
-#
-# Options:
-# -b/--skipbuild		Skip ISSM compilation.
-# -r/--resign			Skip ISSM compilation and packaging. Use to retrigger 
-#						signing/notarization if it fails but build and package 
-#						are valid.
-# -s/--skiptests		Skip ISSM compilation and testing during packaging 
-#						step. Use if packaging fails for some reason but build 
-#						is valid.
-# -t/--transferonly		Transfer package to ISSM Web site only. Use if transfer 
-#						fails for some reason to skip building, packaging, and 
-#						signing.
-# -u/--unlock			Remove lock file from signed package repository. Use if 
-#						build is aborted to allow for subsequent fresh build.
-#
-# Debugging:
-# - Relies on a very tight handshake with project on remote JPL Cybersecurity 
-#	Jenkins server. Debugging may be performed locally by running,
-#
-#		packagers/mac/sign-issm-mac-binaries-python-2.sh
-#
-#	with "AD_IDENTITY", "AD_USERNAME", and "ASC_PROVIDER" hardcoded to Apple 
-#	Developer credentials (make sure to also set keychain password in 
-#	"ALTOOL_PASSWORD").
-# - Removing stdout/stderr redirections to null device (> /dev/null 2>&1) can 
-#	help debug potential SVN issues.
-#
-# NOTE:
-# - Use only *one* of the above options at a time, and make sure it is removed 
-#	again after a single run.
-# - Builds will fail when any of the above options are used on a clean 
-#	workspace. For example, if 'Source Code Management' -> 'Check-out Strategy' 
-#	select menu is set to "Always check out a fresh copy".
-# - Assumes that "ISSM_BINARIES_USER" and "ISSM_BINARIES_PASS" are set up in 
-#	the 'Bindings' section under a 'Username and password (separated)' binding 
-#	(requires 'Credentials Binding Plugin') with 'Credentials' select menu set 
-#	to "jenkins/****** (SVN repository for ISSM binaries)".
-################################################################################
-
-## Constants
-#
-PKG="ISSM-macOS-Python-2" # Name of directory to copy distributable files to
-SIGNED_REPO_COPY="./signed"
-SIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/python/2/signed"
-
-COMPRESSED_PKG="${PKG}.zip"
-
-## Environment
-#
-export COMPRESSED_PKG
-export PKG
-export SIGNED_REPO_COPY
-export SIGNED_REPO_URL
-
-## Parse options
-#
-if [ $# -gt 1 ]; then
-	echo "Can use only one option at a time"
-	exit 1
-fi
-
-# NOTE: We could do this with binary switching (i.e. 0011 to sign and transfer, 
-#		but the following is self-documenting).
-#
-build=1
-package=1
-sign=1
-transfer=1
-
-if [ $# -eq 1 ]; then
-	case $1 in
-		-b|--skipbuild)		build=0;							shift	;;
-		-r|--resign)		build=0;	package=0;						;;
-		-s|--skiptests)		build=0;									;;
-		-t|--transferonly)	build=0;	package=0;	sign=0;				;;
-		-u|--unlock)		build=0;	package=0;	transfer=0;			;;
-		*) echo "Unknown parameter passed: $1"; exit 1 					;;
-	esac
-fi
-
-# Build
-if [ ${build} -eq 1 ]; then
-	./jenkins/jenkins.sh ./jenkins/pine_island-mac-binaries-python-2
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
-# Package
-if [ ${package} -eq 1 ]; then
-	./packagers/mac/package-issm-mac-binaries-python-2.sh $1
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-
-	shift # Clear $1 so that it is not passed to commit_for_signing script
-fi
-
-# Commit for signing
-if [ ${sign} -eq 1 ]; then
-	./packagers/mac/commit_for_signing-issm-mac-binaries-python-2.sh $1
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
-# Transfer distributable package to ISSM Web site
-if [ ${transfer} -eq 1 ]; then
-	./packagers/mac/transfer-issm-mac-binaries.sh
-
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
-
Index: /issm/trunk/packagers/mac/complete-issm-mac-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/mac/complete-issm-mac-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/complete-issm-mac-binaries-python-3.sh	(revision 28013)
@@ -98,5 +98,5 @@
 # Build
 if [ ${build} -eq 1 ]; then
-	./jenkins/jenkins.sh ./jenkins/pine_island-mac-binaries-python-3
+	./jenkins/jenkins.sh ./jenkins/mac-intel-binaries-python-3
 
 	if [ $? -ne 0 ]; then 
@@ -125,11 +125,21 @@
 fi
 
-# Transfer distributable package to ISSM Web site
-if [ ${transfer} -eq 1 ]; then
-	./packagers/mac/transfer-issm-mac-binaries.sh
+# NOTE: Because Mac build nodes are no longer directly connected to UCI 
+#		network and because remote access requires a VPN connection, we can 
+#		no longer transfer signed distributables via SSH. For now, there is 
+#		a cron job running every five minutes under user jenkins on 
+#		ross.ics.uci.edu that runs 
+#		/home/jenkins/bin/update-issm-mac-binaries.sh, which checks for 
+#		updated, signed distributables in the ISSM Binaries SVN repository
+#		and if they are available, copies them to the public directory.
+#
 
-	if [ $? -ne 0 ]; then 
-		exit 1
-	fi
-fi
+# # Transfer distributable package to ISSM Web site
+# if [ ${transfer} -eq 1 ]; then
+# 	./packagers/mac/transfer-issm-mac-binaries.sh
 
+# 	if [ $? -ne 0 ]; then 
+# 		exit 1
+# 	fi
+# fi
+
Index: /issm/trunk/packagers/mac/package-issm-mac-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/mac/package-issm-mac-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/package-issm-mac-binaries-matlab.sh	(revision 28013)
@@ -23,20 +23,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
 #
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
+#			svn: command not found
 #
-# which results in,
+#		even though it is installed via Homebrew and available at the following 
+#		path.
 #
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -47,6 +40,6 @@
 ## Constants
 #
-MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2021,2051,2052,2053,2084,2085,2090,2101,2424,2425,3001:3200,3201,3202,3300,3480,3481,4001,4002,4003]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
-MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,129,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2012,2013,2020,2021,2051,2052,2053,2084,2085,2090,2091,2092,2101,2424,2425,3001:3300,3480,3481,4001:4100]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
 
 ## Environment
@@ -154,5 +147,5 @@
 
 	# Check that MATLAB did not exit in error
-	matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in matlab_run|Illegal use of reserved keyword" matlab.log`
+	matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab.log`
 
 	if [ ${matlabExitedInError} -ne 0 ]; then
Index: sm/trunk/packagers/mac/package-issm-mac-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/mac/package-issm-mac-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,199 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Packages and tests ISSM distributable package for macOS with Python 2 API.
-#
-# Options:
-# -s/--skiptests		Skip testing during packaging Use if packaging fails 
-#						for some reason but build is valid.
-#
-# NOTE:
-# - Assumes that the following constants are defined,
-#
-#		COMPRESSED_PKG
-#		ISSM_DIR
-#		PKG
-#
-# See also:
-# - packagers/mac/complete-issm-mac-binaries-python-2.sh
-# - packagers/mac/sign-issm-mac-binaries-python-2.sh
-################################################################################
-
-# Expand aliases within the context of this script
-shopt -s expand_aliases
-
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
-
-## Override certain other aliases
-#
-alias grep=$(which grep)
-
-## Constants
-#
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
-
-## Environment
-#
-export PATH="${ISSM_DIR}/bin:$(getconf PATH)" # Ensure that we pick up binaries from 'bin' directory rather than 'externalpackages'
-
-## Parse options
-#
-if [ $# -gt 1 ]; then
-	echo "Can use only one option at a time"
-	exit 1
-fi
-
-skip_tests=0
-
-if [ $# -eq 1 ]; then
-	case $1 in
-		-s|--skiptests)	skip_tests=1;					;;
-		*) echo "Unknown parameter passed: $1"; exit 1	;;
-	esac
-fi
-
-# Clean up from previous packaging
-echo "Cleaning up existing assets"
-cd ${ISSM_DIR}
-rm -rf ${PKG} ${COMPRESSED_PKG}
-mkdir ${PKG}
-
-# Add required binaries and libraries to package and modify them where needed
-cd ${ISSM_DIR}/bin
-
-echo "Modify generic"
-cat generic_static.py | sed -e "s/generic_static/generic/g" > generic.py
-
-echo "Moving MPICH binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
-	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
-	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
-elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
-	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
-	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
-else
-	echo "MPICH not found"
-	exit 1
-fi
-
-echo "Moving GDAL binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gdal/install/bin/gdal-config ]; then
-	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdalsrsinfo .
-	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdaltransform .
-else
-	echo "GDAL not found"
-	exit 1
-fi
-
-echo "Moving Gmsh binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh ]; then
-	cp ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh .
-else
-	echo "Gmsh not found"
-	exit 1
-fi
-
-echo "Moving GMT binaries to bin/"
-if [ -f ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt-config ]; then
-	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt .
-	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmtselect .
-else
-	echo "GMT not found"
-	exit 1
-fi
-
-echo "Moving GSHHG assets to share/"
-if [ -d ${ISSM_DIR}/externalpackages/gmt/install/share/coast ]; then
-	mkdir ${ISSM_DIR}/share 2> /dev/null
-	cp -R ${ISSM_DIR}/externalpackages/gmt/install/share/coast ${ISSM_DIR}/share
-else
-	echo "GSHHG not found"
-	exit 1
-fi
-
-echo "Moving PROJ assets to share/"
-if [ -d ${ISSM_DIR}/externalpackages/proj/install/share/proj ]; then
-	mkdir ${ISSM_DIR}/share 2> /dev/null
-	cp -R ${ISSM_DIR}/externalpackages/proj/install/share/proj ${ISSM_DIR}/share
-else
-	echo "PROJ not found"
-	exit 1
-fi
-
-# Run tests
-if [ ${skip_tests} -eq 0 ]; then
-	echo "Running tests"
-	cd ${ISSM_DIR}/test/NightlyRun
-	rm python.log 2> /dev/null
-
-	# Set Python environment
-	export PYTHONPATH="${ISSM_DIR}/src/m/dev"
-	export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
-	export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
-
-	# Run tests, redirecting output to logfile and suppressing output to console
-	./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
-
-	# Check that Python did not exit in error
-	pythonExitCode=`echo $?`
-	pythonExitedInError=`grep -c -E "Error|Standard exception|Traceback|bad interpreter" python.log`
-
-	if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
-		echo "----------Python exited in error!----------"
-		cat python.log
-		echo "-----------End of python.log-----------"
-
-		# Clean up execution directory
-		rm -rf ${ISSM_DIR}/execution/*
-
-		exit 1
-	fi
-
-	# Check that all tests passed
-	sed -i '' "/FAILED TO establish the default connection to the WindowServer/d" python.log # First, need to remove WindowServer error message
-	numTestsFailed=`grep -c -E "FAILED|ERROR" python.log`
-
-	if [ ${numTestsFailed} -ne 0 ]; then
-		echo "One or more tests FAILED"
-		cat python.log
-		exit 1
-	else
-		echo "All tests PASSED"
-	fi
-else
-	echo "Skipping tests"
-fi
-
-# Create package
-cd ${ISSM_DIR}
-svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
-echo "Copying assets to package: ${PKG}"
-cp -rf bin examples lib scripts share test ${PKG}
-mkdir ${PKG}/execution
-cp packagers/mac/issm-executable_entitlements.plist ${PKG}/bin/entitlements.plist
-${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
-echo "Cleaning up unneeded/unwanted files"
-rm -f ${PKG}/bin/*.py # Remove all Python scripts
-rm -f ${PKG}/bin/generic_static.* # Remove static versions of generic cluster classes
-rm -f ${PKG}/lib/*.a # Remove static libraries from package
-rm -f ${PKG}/lib/*.la # Remove libtool libraries from package
-rm -rf ${PKG}/test/SandBox # Remove testing sandbox from package
-
-# Compress package
-echo "Compressing package"
-ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
Index: /issm/trunk/packagers/mac/package-issm-mac-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/mac/package-issm-mac-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/package-issm-mac-binaries-python-3.sh	(revision 28013)
@@ -23,20 +23,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
 #
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
+#			svn: command not found
 #
-# which results in,
+#		even though it is installed via Homebrew and available at the following 
+#		path.
 #
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -46,5 +39,5 @@
 ## Constants
 #
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+PYTHON_NROPTIONS="--benchmark all --exclude 125 126 129 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2012 2013 2020 2021 2051 2052 2053 2084 2085 2090 2091 2092 2101 2424 2425 3001:3300 3480 3481 4001:4100" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
 
 ## Environment
@@ -147,7 +140,4 @@
 	export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
 
-	# Ensure that runme.py uses Python 3 interpreter
-	sed -i '' "s|/usr/bin/env python|/usr/local/bin/python3|g" ./runme.py
-
 	# Run tests, redirecting output to logfile and suppressing output to console
 	./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
@@ -155,5 +145,5 @@
 	# Check that Python did not exit in error
 	pythonExitCode=`echo $?`
-	pythonExitedInError=`grep -c -E "Error|Standard exception|Traceback|bad interpreter" python.log`
+	pythonExitedInError=`grep -c -E "Error|No such file or directory|Permission denied|Standard exception|Traceback|bad interpreter|syntax error" python.log`
 
 	if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
@@ -190,7 +180,7 @@
 mkdir ${PKG}/execution
 cp packagers/mac/issm-executable_entitlements.plist ${PKG}/bin/entitlements.plist
-${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
+# ${ISSM_DIR}/scripts/py_to_pyc.sh ${PKG}/bin # Compile Python source files
 echo "Cleaning up unneeded/unwanted files"
-rm -f ${PKG}/bin/*.py # Remove all Python scripts
+# rm -f ${PKG}/bin/*.py # Remove all Python scripts
 rm -f ${PKG}/bin/generic_static.* # Remove static versions of generic cluster classes
 rm -f ${PKG}/lib/*.a # Remove static libraries from package
Index: /issm/trunk/packagers/mac/sign-issm-mac-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/mac/sign-issm-mac-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/sign-issm-mac-binaries-matlab.sh	(revision 28013)
@@ -8,12 +8,12 @@
 # builds.
 #
-# In order to replicate the requried Jenkins project configuration:
+# In order to replicate the required Jenkins project configuration:
 # - First, navigate to 'Manage Jenkins' -> 'Manage Plugins' and install the 
 #	'Credentials Bindings Plugin' if it is not already installed.
-# - Contact one of the members of the ISSM development team for crendentials 
-#	for the ISSM binaries repository (mention that the credentials are stored 
-#	in ISSM-Infrastructure.pdf).
+# - Contact one of the members of the ISSM development team for credentials for 
+#	the ISSM binaries repository (mention that the credentials are stored in 
+#	ISSM-Infrastructure.pdf).
 # - Navigate to 'Manage Jenkins' -> 'Manage Credentials' -> <domain> -> 
-#	'Add Credentials' and enter the crendentials from above.
+#	'Add Credentials' and enter the credentials from above.
 # - From the 'Dashboard', select 'New Item' -> 'Freestyle project'.
 # - Under 'Source Code Management', select 'Subversion'.
@@ -24,5 +24,5 @@
 #		- The 'Local module directory' text field should be set to the same 
 #		value as the constant UNSIGNED_REPO_COPY (set below to './unsigned').
-# - Under 'Build Trigggers', check the box for 'Poll SCM' and set the 
+# - Under 'Build Triggers', check the box for 'Poll SCM' and set the 
 #	'Schedule' text area to "H/5 * * * *".
 # - Under 'Build Environment', check the box for 'Use secret text(s) or 
@@ -52,20 +52,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
+#
+#			svn: command not found
+#
+#		even though it is installed via Homebrew and available at the following 
+#		path.
+#
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -87,5 +80,4 @@
 PASSWORD=${ISSM_BINARIES_PASS}
 PKG="ISSM-macOS-MATLAB"
-PRIMARY_BUNDLE_ID="gov.nasa.jpl.issm.matlab"
 SIGNED_REPO_COPY="./signed"
 SIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/matlab/signed"
@@ -131,4 +123,5 @@
 	find ${PKG}/bin -type f -name *.exe; \
 	find ${PKG}/lib -type f -name *.mexmaci64; \
+	find ${PKG}/test -type f -name *.pkg; \
 )
 
@@ -157,16 +150,10 @@
 # Submit compressed package for notarization
 echo "Submitting signed package to Apple for notarization"
-xcrun altool --notarize-app --primary-bundle-id ${PRIMARY_BUNDLE_ID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} --asc-provider ${ASC_PROVIDER} --file ${COMPRESSED_PKG} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-
-# Sleep until notarization request response is received
-echo "Waiting for notarization request response"
-while [[ ! -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} || ! -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; do
-	sleep 30
-done
+xcrun notarytool submit ${COMPRESSED_PKG} --apple-id "$AD_USERNAME" --team-id "$TEAM_ID" --password "$NOTARY_PASSWORD" --wait &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
 
 echo "Notarization request response received"
 
 # Check if UUID exists in response
-HAS_UUID=$(grep 'RequestUUID = ' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}) # NOTE: Checking for "RequestUUID = " because "RequestUUID" shows up in some error messages
+HAS_UUID=$(grep 'id: ' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE})
 if [ -z "${HAS_UUID}" ]; then
 	echo "Notarization failed!"
@@ -182,5 +169,5 @@
 
 # Get UUID from notarization request response
-UUID=$(echo ${HAS_UUID} | sed 's/[[:space:]]*RequestUUID = //')
+UUID=$(echo ${HAS_UUID} | sed 's/[[:space:]]*id: //')
 echo "UUID: ${UUID}" 
 
@@ -194,51 +181,21 @@
 echo "Checking notarization status"
 SUCCESS=0
-for ATTEMPT in $(seq 1 ${NOTARIZATION_CHECK_ATTEMPTS}); do
-	echo "    Attempt #${ATTEMPT}..."
-	xcrun altool --notarization-info ${UUID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-	if [[ -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} && -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; then
-
-		# First, check if there is an error
-		ERROR_CHECK=$(grep 'Error' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE})
-		if [ ! -z "${ERROR_CHECK}" ]; then
-			break
-		fi
-
-		# No error, so check status
-		STATUS=$(grep 'Status:' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
-			# Staple notarization to all elements of package that were previously signed
-			#xcrun stapler staple ${EXECUTABLES} # NOTE: Fails with "Stapler is incapable of working with MATLAB Mex files."
-
-			# Validate stapling of notarization
-			#xcrun stapler validation ${EXECUTABLES} # NOTE: Skipping notarization stapling validation because this is not a true package nor app
-
-			# Compress signed and notarized package
-			ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
-
-			# Set flag indicating notarization was successful
-			SUCCESS=1
-
-			break
-		elif [[ "${STATUS}" == "in progress" ]]; then
-			echo "    ...in progress still; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds."
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		elif [[ "${STATUS}" == "invalid" ]]; then
-			break
-		fi
-	else
-		if [ ${ATTEMPT} -lt ${NOTARIZATION_CHECK_ATTEMPTS} ]; then
-			echo "    ...not ready yet; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds."
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		else
-			echo "    ...maximum attempts reached, but no response, or something else went wrong."
-			echo "    If contents of notarization status check logfile appear to be valid, increase NOTARIZATION_CHECK_ATTEMPTS and run again."
-			break
-		fi
-	fi
-done
-
-if [ ${SUCCESS} -eq 1 ]; then
+xcrun notarytool log ${UUID} --apple-id "$AD_USERNAME" --team-id "$TEAM_ID" --password "$NOTARY_PASSWORD" &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
+STATUS=$(grep 'status: Accepted' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} | wc -l)
+
+if [[ ${STATUS} -gt 0 ]]; then
+	# Staple notarization to all elements of package that were previously signed
+	#xcrun stapler staple ${THIRD_PARTY_BINS} # NOTE: Fails with "Stapler is incapable of working with MATLAB Mex files."
+
+	# Validate stapling of notarization
+	#xcrun stapler validation ${THIRD_PARTY_BINS} # NOTE: Skipping notarization stapling validation because this is not a true package nor app
+
+	# Compress signed and notarized package
+	ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
+
 	echo "Notarization successful!"
+
+	# Set flag indicating notarization was successful
+	SUCCESS=1
 else
 	echo "Notarization failed!"
Index: sm/trunk/packagers/mac/sign-issm-mac-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/mac/sign-issm-mac-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,348 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# Intended to be run in the context of a Jenkins project on a JPL 
-# Cybersecurity server for signing macOS applications. Polls SCM of the 
-# Subversion repository hosted at 
-# https://issm.ess.uci.edu/svn/issm-binaries/mac/python/2/unsigned to trigger 
-# new builds.
-#
-# In order to replicate the requried Jenkins project configuration:
-# - First, navigate to 'Manage Jenkins' -> 'Manage Plugins' and install the 
-#	'Credentials Bindings Plugin' if it is not already installed.
-# - Contact one of the members of the ISSM development team for crendentials 
-#	for the ISSM binaries repository (mention that the credentials are stored 
-#	in ISSM-Infrastructure.pdf).
-# - Navigate to 'Manage Jenkins' -> 'Manage Credentials' -> <domain> -> 
-#	'Add Credentials' and enter the crendentials from above.
-# - From the 'Dashboard', select 'New Item' -> 'Freestyle project'.
-# - Under 'Source Code Management', select 'Subversion'.
-#		- The 'Repository URL' text field should be set to 
-#		"https://issm.ess.uci.edu/svn/issm-binaries/mac/matlab/unsigned".
-#		- The 'Credentials' select menu should be set to the new credentials 
-#		created previously.
-#		- The 'Local module directory' text field should be set to the same 
-#		value as the constant UNSIGNED_REPO_COPY (set below to './unsigned').
-# - Under 'Build Trigggers', check the box for 'Poll SCM' and set the 
-#	'Schedule' text area to "H/5 * * * *".
-# - Under 'Build Environment', check the box for 'Use secret text(s) or 
-#	file(s)', then under 'Bindings' click the 'Add...' button and select 
-#	'Username and password (separated)'.
-#		- Set 'Username Variable' to "ISSM_BINARIES_USER".
-#		- Set 'Password Variable' to "ISSM_BINARIES_PASS".
-# - Under 'Credentials', select the same, new credentials that created 
-#	previously.
-# - The contents of this script can be copied/pasted directly into the ‘Build' 
-#	-> 'Execute Shell' -> ‘Command' textarea of the project configuration (or 
-#	you can simply store the script on disk and call it from there).
-# - Make sure to click the 'Save' button.
-#
-# Current point of contact at JPL Cybersecurity:
-#	Alex Coward, alexander.g.coward@jpl.nasa.gov
-#
-# NOTE:
-# - Assumes that "ISSM_BINARIES_USER" and "ISSM_BINARIES_PASS" are set up in 
-#	the 'Bindings' section under a 'Username and password (separated)' binding 
-#	(requires 'Credentials Binding Plugin').
-# - For local debugging, the aforementioned credentials can be hardcoded into 
-#	the 'USERNAME' and 'PASSWORD' constants below.
-################################################################################
-
-# Expand aliases within the context of this script
-shopt -s expand_aliases
-
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
-
-## Override certain other aliases
-#
-alias cp=$(which cp)
-alias grep=$(which grep)
-
-## Constants
-#
-AD_IDENTITY="**********" # Apple Developer identity
-AD_USERNAME="**********" # Apple Developer username
-ALTOOL_PASSWORD="@keychain:**********" # altool password (assumed to be stored in keychain)
-ASC_PROVIDER="**********"
-MAX_SVN_ATTEMPTS=10
-NOTARIZATION_CHECK_ATTEMPTS=20
-NOTARIZATION_CHECK_PERIOD=60
-NOTARIZATION_LOGFILE="notarization.log"
-NOTARIZATION_LOGFILE_PATH="."
-PASSWORD=${ISSM_BINARIES_PASS}
-PKG="ISSM-macOS-Python-2"
-PRIMARY_BUNDLE_ID="gov.nasa.jpl.issm.python"
-SIGNED_REPO_COPY="./signed"
-SIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/python/2/signed"
-SIGNING_LOCK_FILE="signing.lock"
-SUCCESS_LOGFILE="${SIGNED_REPO_COPY}/success.log"
-UNSIGNED_REPO_COPY="./unsigned"
-UNSIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/python/2/unsigned"
-USERNAME=${ISSM_BINARIES_USER}
-
-COMPRESSED_PKG="${PKG}.zip"
-EXE_ENTITLEMENTS_PLIST="${PKG}/bin/entitlements.plist"
-
-# NOTE: Uncomment the following for local testing (Jenkins checks out copy of 
-#		repository for unsigned packages to working directory)
-#
-
-# # Clean up from previous packaging (not necessary for single builds on Jenkins, 
-# # but useful when testing packaging locally)
-# echo "Cleaning up existing assets"
-# rm -rf ${COMPRESSED_PKG} ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} ${UNSIGNED_REPO_COPY}
-
-# # Check out copy of repository for unsigned packages
-# echo "Checking out copy of repository for unsigned packages"
-# svn checkout \
-# 	--trust-server-cert \
-# 	--non-interactive \
-# 	--username ${USERNAME} \
-# 	--password ${PASSWORD} \
-# 	${UNSIGNED_REPO_URL} \
-# 	${UNSIGNED_REPO_COPY}
-
-rm -rf ${PKG} ${SIGNED_REPO_COPY}
-
-
-# Extract package contents
-echo "Extracting package contents"
-ditto -xk ${UNSIGNED_REPO_COPY}/${COMPRESSED_PKG} .
-
-# Clear extended attributes on all files
-xattr -cr ${PKG}
-
-# Build list of ISSM executables
-ISSM_BINS=$(\
-	find ${PKG}/bin -type f -name *.exe; \
-	find ${PKG}/bin -type f -name *.pyc; \
-)
-
-# Build list of third party executables
-THIRD_PARTY_BINS=$(\
-	echo ${PKG}/bin/mpiexec; \
-	echo ${PKG}/bin/hydra_pmi_proxy; \
-	echo ${PKG}/bin/gdalsrsinfo; \
-	echo ${PKG}/bin/gdaltransform; \
-	echo ${PKG}/bin/gmt; \
-	echo ${PKG}/bin/gmtselect; \
-	echo ${PKG}/bin/gmsh; \
-)
-
-# Sign all executables in package
-echo "Signing all executables in package"
-codesign -s ${AD_IDENTITY} --timestamp --options=runtime --entitlements ${EXE_ENTITLEMENTS_PLIST} ${ISSM_BINS}
-codesign -s ${AD_IDENTITY} --timestamp --options=runtime ${THIRD_PARTY_BINS}
-
-# Build list of ISSM libraries
-ISSM_LIBS=$(\
-	find ${PKG}/lib -type f -name *.so; \
-)
-
-# Sign all libraries in package
-echo "Signing all libraries in package"
-codesign -s ${AD_IDENTITY} --timestamp --options=runtime ${ISSM_LIBS}
-
-# NOTE: Skipping signature validation because this is not a true package nor app
-
-# Compress signed package
-echo "Compressing signed package"
-ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
-
-# Submit compressed package for notarization
-echo "Submitting signed package to Apple for notarization"
-xcrun altool --notarize-app --primary-bundle-id ${PRIMARY_BUNDLE_ID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} --asc-provider ${ASC_PROVIDER} --file ${COMPRESSED_PKG} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-
-# Sleep until notarization request response is received
-echo "Waiting for notarization request response"
-while [[ ! -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} || ! -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; do
-	sleep 30
-done
-
-echo "Notarization request response received"
-
-# Check if UUID exists in response
-HAS_UUID=$(grep 'RequestUUID = ' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}) # NOTE: Checking for "RequestUUID = " because "RequestUUID" shows up in some error messages
-if [ -z "${HAS_UUID}" ]; then
-	echo "Notarization failed!"
-	echo "----------------------- Contents of notarization logfile -----------------------"
-	cat ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-	echo "--------------------------------------------------------------------------------"
-
-	# Clean up
-	rm -rf ${PKG} ${COMPRESSED_PKG}
-
-	exit 1
-fi
-
-# Get UUID from notarization request response
-UUID=$(echo ${HAS_UUID} | sed 's/[[:space:]]*RequestUUID = //')
-echo "UUID: ${UUID}" 
-
-# Check notarization status
-#
-# NOTE: Currently, this checks if notarization was successful, but we are not 
-#		able to staple notarization as this is not a true package nor app and, 
-#		at the very least, MATLAB Mex files cannot be stapled. As such, clients 
-#		will not be able to clear Gatekeeper if they are offline.
-#
-echo "Checking notarization status"
-SUCCESS=0
-for ATTEMPT in $(seq 1 ${NOTARIZATION_CHECK_ATTEMPTS}); do
-	echo "    Attempt #${ATTEMPT}..."
-	xcrun altool --notarization-info ${UUID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-	if [[ -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} && -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; then
-
-		# First, check if there is an error
-		ERROR_CHECK=$(grep 'Error' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE})
-		if [ ! -z "${ERROR_CHECK}" ]; then
-			break
-		fi
-
-		# No error, so check status
-		STATUS=$(grep 'Status:' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
-			# Staple notarization to all elements of package that were previously signed
-			#xcrun stapler staple ${THIRD_PARTY_BINS} # NOTE: Fails with "Stapler is incapable of working with MATLAB Mex files."
-
-			# Validate stapling of notarization
-			#xcrun stapler validation ${THIRD_PARTY_BINS} # NOTE: Skipping notarization stapling validation because this is not a true package nor app
-
-			# Compress signed and notarized package
-			ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
-
-			# Set flag indicating notarization was successful
-			SUCCESS=1
-
-			break
-		elif [[ "${STATUS}" == "in progress" ]]; then
-			echo "    ...in progress still; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds."
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		elif [[ "${STATUS}" == "invalid" ]]; then
-			break
-		fi
-	else
-		if [ ${ATTEMPT} -lt ${NOTARIZATION_CHECK_ATTEMPTS} ]; then
-			echo "    ...not ready yet; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds"
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		else
-			echo "    ...maximum attempts reached, but no response, or something else went wrong"
-			echo "    If contents of notarization status check logfile appear to be valid, increase NOTARIZATION_CHECK_ATTEMPTS and run again"
-			break
-		fi
-	fi
-done
-
-if [ ${SUCCESS} -eq 1 ]; then
-	echo "Notarization successful!"
-else
-	echo "Notarization failed!"
-	echo "----------------------- Contents of notarization logfile -----------------------"
-	cat ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-	echo "--------------------------------------------------------------------------------"
-fi
-
-# Check out copy of repository for signed packages
-echo "Checking out copy of repository for signed packages"
-SVN_ATTEMPT=0
-SVN_SUCCESS=0
-while [[ ${SVN_ATTEMPT} -lt ${MAX_SVN_ATTEMPTS} && ${SVN_SUCCESS} -eq 0 ]]; do
-	rm -rf ${SIGNED_REPO_COPY}
-	svn checkout \
-		--trust-server-cert \
-		--non-interactive \
-		--username ${USERNAME} \
-		--password ${PASSWORD} \
-		${SIGNED_REPO_URL} \
-		${SIGNED_REPO_COPY} > /dev/null 2>&1
-	if [ $? -eq 0 ]; then
-		SVN_SUCCESS=1
-		break
-	else
-		((++SVN_ATTEMPT))
-		sleep 5
-	fi
-done
-
-if [ ${SVN_SUCCESS} -eq 0 ]; then
-	echo "Checkout of repository for signed packages failed"
-	exit 1
-fi
-
-# Copy notarization file to repository for signed packages
-cp ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} ${SIGNED_REPO_COPY}
-svn add ${SIGNED_REPO_COPY}/${NOTARIZATION_LOGFILE} > /dev/null 2>&1
-
-# Remove lock file from repository for signed packages
-svn delete ${SIGNED_REPO_COPY}/${SIGNING_LOCK_FILE}
-
-SVN_ATTEMPT=0
-SVN_SUCCESS=0
-if [ ${SUCCESS} -eq 1 ]; then
-	# Copy signed package to repository for signed packages
-	cp ${COMPRESSED_PKG} ${SIGNED_REPO_COPY}
-	svn add ${SIGNED_REPO_COPY}/${COMPRESSED_PKG} > /dev/null 2>&1
-
-	# Commit changes
-	echo "Committing changes to repository for signed packages"
-	while [[ ${SVN_ATTEMPT} -lt ${MAX_SVN_ATTEMPTS} && ${SVN_SUCCESS} -eq 0 ]]; do
-		svn commit \
-			--trust-server-cert \
-			--non-interactive \
-			--username ${USERNAME} \
-			--password ${PASSWORD} \
-			--message "CHG: New signed package (success)" ${SIGNED_REPO_COPY} > /dev/null 2>&1
-		if [ $? -eq 0 ]; then
-			SVN_SUCCESS=1
-			break
-		else
-			((++SVN_ATTEMPT))
-			sleep 5
-		fi
-	done
-
-	if [ ${SVN_SUCCESS} -eq 0 ]; then
-		echo "Commit to repository for signed packages failed"
-		exit 1
-	fi
-else
-	# Commit changes
-	echo "Committing changes to repository for signed packages"
-	while [[ ${SVN_ATTEMPT} -lt ${MAX_SVN_ATTEMPTS} && ${SVN_SUCCESS} -eq 0 ]]; do
-		svn commit \
-			--trust-server-cert \
-			--non-interactive \
-			--username ${USERNAME} \
-			--password ${PASSWORD} \
-			--message "CHG: New signed package (failure)" ${SIGNED_REPO_COPY} > /dev/null 2>&1
-		if [ $? -eq 0 ]; then
-			SVN_SUCCESS=1
-			break
-		else
-			((++SVN_ATTEMPT))
-			sleep 5
-		fi
-	done
-
-	if [ ${SVN_SUCCESS} -eq 0 ]; then
-		echo "Commit to repository for signed packages failed"
-		exit 1
-	fi
-
-	exit 1
-fi
Index: /issm/trunk/packagers/mac/sign-issm-mac-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/mac/sign-issm-mac-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/sign-issm-mac-binaries-python-3.sh	(revision 28013)
@@ -8,12 +8,12 @@
 # new builds.
 #
-# In order to replicate the requried Jenkins project configuration:
+# In order to replicate the required Jenkins project configuration:
 # - First, navigate to 'Manage Jenkins' -> 'Manage Plugins' and install the 
 #	'Credentials Bindings Plugin' if it is not already installed.
-# - Contact one of the members of the ISSM development team for crendentials 
-#	for the ISSM binaries repository (mention that the credentials are stored 
-#	in ISSM-Infrastructure.pdf).
+# - Contact one of the members of the ISSM development team for credentials for 
+#	the ISSM binaries repository (mention that the credentials are stored in 
+#	ISSM-Infrastructure.pdf).
 # - Navigate to 'Manage Jenkins' -> 'Manage Credentials' -> <domain> -> 
-#	'Add Credentials' and enter the crendentials from above.
+#	'Add Credentials' and enter the credentials from above.
 # - From the 'Dashboard', select 'New Item' -> 'Freestyle project'.
 # - Under 'Source Code Management', select 'Subversion'.
@@ -24,5 +24,5 @@
 #		- The 'Local module directory' text field should be set to the same 
 #		value as the constant UNSIGNED_REPO_COPY (set below to './unsigned').
-# - Under 'Build Trigggers', check the box for 'Poll SCM' and set the 
+# - Under 'Build Triggers', check the box for 'Poll SCM' and set the 
 #	'Schedule' text area to "H/5 * * * *".
 # - Under 'Build Environment', check the box for 'Use secret text(s) or 
@@ -52,20 +52,13 @@
 shopt -s expand_aliases
 
-# From https://developer.apple.com/documentation/macos-release-notes/macos-catalina-10_15-release-notes,
-#
-#	Command line tool support for Subversion — including svn, git-svn, and 
-#	related commands — is no longer provided by Xcode. (50266910)
-#
-# which results in,
-#
-#	svn: error: The subversion command line tools are no longer provided by 
-#	Xcode.
-#
-# when calling svn, even when subversion is installed via Homebrew and its path 
-# is available in PATH.
-#
-# NOTE: May be able to remove this after updating macOS.
-#
-#alias svn='/usr/local/bin/svn'
+# NOTE: For some reason, calling svn from within the context of this script 
+#		gives,
+#
+#			svn: command not found
+#
+#		even though it is installed via Homebrew and available at the following 
+#		path.
+#
+alias svn='/usr/local/bin/svn'
 
 ## Override certain other aliases
@@ -87,5 +80,4 @@
 PASSWORD=${ISSM_BINARIES_PASS}
 PKG="ISSM-macOS-Python-3"
-PRIMARY_BUNDLE_ID="gov.nasa.jpl.issm.python"
 SIGNED_REPO_COPY="./signed"
 SIGNED_REPO_URL="https://issm.ess.uci.edu/svn/issm-binaries/mac/python/3/signed"
@@ -120,5 +112,4 @@
 rm -rf ${PKG} ${SIGNED_REPO_COPY}
 
-
 # Extract package contents
 echo "Extracting package contents"
@@ -132,4 +123,5 @@
 	find ${PKG}/bin -type f -name *.exe; \
 	find ${PKG}/bin -type f -name *.pyc; \
+	find ${PKG}/test -type f -name *.pkg; \
 )
 
@@ -167,16 +159,10 @@
 # Submit compressed package for notarization
 echo "Submitting signed package to Apple for notarization"
-xcrun altool --notarize-app --primary-bundle-id ${PRIMARY_BUNDLE_ID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} --asc-provider ${ASC_PROVIDER} --file ${COMPRESSED_PKG} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-
-# Sleep until notarization request response is received
-echo "Waiting for notarization request response"
-while [[ ! -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} || ! -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; do
-	sleep 30
-done
+xcrun notarytool submit ${COMPRESSED_PKG} --apple-id "$AD_USERNAME" --team-id "$TEAM_ID" --password "$NOTARY_PASSWORD" --wait &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
 
 echo "Notarization request response received"
 
 # Check if UUID exists in response
-HAS_UUID=$(grep 'RequestUUID = ' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}) # NOTE: Checking for "RequestUUID = " because "RequestUUID" shows up in some error messages
+HAS_UUID=$(grep 'id: = ' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE})
 if [ -z "${HAS_UUID}" ]; then
 	echo "Notarization failed!"
@@ -192,5 +178,5 @@
 
 # Get UUID from notarization request response
-UUID=$(echo ${HAS_UUID} | sed 's/[[:space:]]*RequestUUID = //')
+UUID=$(echo ${HAS_UUID} | sed 's/[[:space:]]*id: //')
 echo "UUID: ${UUID}" 
 
@@ -204,51 +190,21 @@
 echo "Checking notarization status"
 SUCCESS=0
-for ATTEMPT in $(seq 1 ${NOTARIZATION_CHECK_ATTEMPTS}); do
-	echo "    Attempt #${ATTEMPT}..."
-	xcrun altool --notarization-info ${UUID} --username ${AD_USERNAME} --password ${ALTOOL_PASSWORD} &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
-	if [[ -f ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} && -z $(find ${NOTARIZATION_LOGFILE_PATH} -empty -name ${NOTARIZATION_LOGFILE}) ]]; then
-
-		# First, check if there is an error
-		ERROR_CHECK=$(grep 'Error' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE})
-		if [ ! -z "${ERROR_CHECK}" ]; then
-			break
-		fi
-
-		# No error, so check status
-		STATUS=$(grep 'Status:' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} | sed -e 's/[[:space:]]*Status: //')
-		if [[ "${STATUS}" == "success" ]]; then
-			# Staple notarization to all elements of package that were previously signed
-			#xcrun stapler staple ${THIRD_PARTY_BINS} # NOTE: Fails with "Stapler is incapable of working with MATLAB Mex files."
-
-			# Validate stapling of notarization
-			#xcrun stapler validation ${THIRD_PARTY_BINS} # NOTE: Skipping notarization stapling validation because this is not a true package nor app
-
-			# Compress signed and notarized package
-			ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
-
-			# Set flag indicating notarization was successful
-			SUCCESS=1
-
-			break
-		elif [[ "${STATUS}" == "in progress" ]]; then
-			echo "    ...in progress still; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds."
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		elif [[ "${STATUS}" == "invalid" ]]; then
-			break
-		fi
-	else
-		if [ ${ATTEMPT} -lt ${NOTARIZATION_CHECK_ATTEMPTS} ]; then
-			echo "    ...not ready yet; checking again in ${NOTARIZATION_CHECK_PERIOD} seconds"
-			sleep ${NOTARIZATION_CHECK_PERIOD}
-		else
-			echo "    ...maximum attempts reached, but no response, or something else went wrong"
-			echo "    If contents of notarization status check logfile appear to be valid, increase NOTARIZATION_CHECK_ATTEMPTS and run again"
-			break
-		fi
-	fi
-done
-
-if [ ${SUCCESS} -eq 1 ]; then
+xcrun notarytool log ${UUID} --apple-id "$AD_USERNAME" --team-id "$TEAM_ID" --password "$NOTARY_PASSWORD" &> ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE}
+STATUS=$(grep 'status: Accepted' ${NOTARIZATION_LOGFILE_PATH}/${NOTARIZATION_LOGFILE} | wc -l)
+
+if [[ ${STATUS} -gt 0 ]]; then
+	# Staple notarization to all elements of package that were previously signed
+	#xcrun stapler staple ${THIRD_PARTY_BINS} # NOTE: Fails with "Stapler is incapable of working with MATLAB Mex files."
+
+	# Validate stapling of notarization
+	#xcrun stapler validation ${THIRD_PARTY_BINS} # NOTE: Skipping notarization stapling validation because this is not a true package nor app
+
+	# Compress signed and notarized package
+	ditto -ck --sequesterRsrc --keepParent ${PKG} ${COMPRESSED_PKG}
+
 	echo "Notarization successful!"
+
+	# Set flag indicating notarization was successful
+	SUCCESS=1
 else
 	echo "Notarization failed!"
Index: /issm/trunk/packagers/mac/test-issm-mac-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/mac/test-issm-mac-binaries-matlab.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/test-issm-mac-binaries-matlab.sh	(revision 28013)
@@ -10,7 +10,7 @@
 ## Constants
 #
-INSTALL_DIR=.
-MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2021,2051,2052,2053,2084,2085,2090,2101,2424,2425,3001:3200,3201,3202,3300,3480,3481,4001,4002,4003]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
-MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+INSTALL_DIR=${PWD}
+MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,129,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2012,2013,2020,2021,2051,2052,2053,2084,2085,2090,2091,2092,2101,2424,2425,3001:3300,3480,3481,4001:4100]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+MATLAB_PATH="/Applications/MATLAB_R2022b.app"
 PKG="ISSM-macOS-MATLAB"
 
@@ -34,5 +34,5 @@
 # Check that MATLAB did not exit in error
 matlabExitCode=`echo $?`
-matlabExitedInError=`grep -c -E "Activation cannot proceed|license|Error|Warning: Name is nonexistent or not a directory" matlab.log`
+matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab.log`
 
 if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
Index: sm/trunk/packagers/mac/test-issm-mac-binaries-python-2.sh
===================================================================
--- /issm/trunk/packagers/mac/test-issm-mac-binaries-python-2.sh	(revision 28012)
+++ 	(revision )
@@ -1,54 +1,0 @@
-#!/bin/bash
-
-################################################################################
-# This script is intended to test ISSM macOS Python 2 binaries on an end-user 
-# machine after successful packaging and signing.
-#
-# NOTE: Tarball must already exist in INSTALL_DIR
-################################################################################
-
-## Constants
-#
-INSTALL_DIR=.
-PKG="ISSM-macOS-Python-2"
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
-
-COMPRESSED_PKG="${PKG}.zip"
-
-export ISSM_DIR="${INSTALL_DIR}/${PKG}"
-export PATH="${PATH}:${ISSM_DIR}/bin:${ISSM_DIR}/scripts"
-export PYTHONPATH="${ISSM_DIR}/scripts"
-export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
-export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
-
-cd ${INSTALL_DIR}
-rm -rf ${PKG}
-ditto -xk ${COMPRESSED_PKG} .
-cd ${PKG}/test/NightlyRun
-
-# Run tests, redirecting output to logfile and suppressing output to console
-echo "Running tests"
-rm python.log 2> /dev/null
-./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
-
-# Check that Python did not exit in error
-pythonExitCode=`echo $?`
-pythonExitedInError=`grep -c -E "runme.py: error" python.log`
-
-if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
-	echo "----------Python exited in error!----------"
-	cat python.log
-	echo "-----------End of python.log-----------"
-	exit 1
-fi
-
-# Check that all tests passed
-numTestsFailed=`grep -c -E "FAILED|ERROR" python.log`
-
-if [[ ${numTestsFailed} -ne 0 ]]; then
-	echo "One or more tests FAILED"
-	cat python.log
-	exit 1
-else
-	echo "All tests PASSED"
-fi
Index: /issm/trunk/packagers/mac/test-issm-mac-binaries-python-3.sh
===================================================================
--- /issm/trunk/packagers/mac/test-issm-mac-binaries-python-3.sh	(revision 28012)
+++ /issm/trunk/packagers/mac/test-issm-mac-binaries-python-3.sh	(revision 28013)
@@ -10,7 +10,7 @@
 ## Constants
 #
-INSTALL_DIR=.
+INSTALL_DIR=${PWD}
 PKG="ISSM-macOS-Python-3"
-PYTHON_NROPTIONS="--benchmark all --exclude 125 126 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2021 2051 2052 2053 2084 2085 2090 2101 2424 2425 3001:3200 3201 3202 3300 3480 3481 4001 4002 4003" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+PYTHON_NROPTIONS="--benchmark all --exclude 125 126 129 234 235 418 420 435 444 445 701 702 703 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1201 1202 1203 1204 1205 1206 1207 1208 1301 1302 1303 1304 1401 1402 1601 1602 2002 2003 2004 2005 2006 2007 2008 2010 2011 2012 2013 2020 2021 2051 2052 2053 2084 2085 2090 2091 2092 2101 2424 2425 3001:3300 3480 3481 4001:4100" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
 
 COMPRESSED_PKG="${PKG}.zip"
@@ -34,5 +34,5 @@
 # Check that Python did not exit in error
 pythonExitCode=`echo $?`
-pythonExitedInError=`grep -c -E "runme.py: error" python.log`
+pythonExitedInError=`grep -c -E "Error|No such file or directory|Permission denied|Standard exception|Traceback|bad interpreter|syntax error" python.log`
 
 if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
Index: /issm/trunk/packagers/win/complete-issm-win-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/win/complete-issm-win-binaries-matlab.sh	(revision 28013)
+++ /issm/trunk/packagers/win/complete-issm-win-binaries-matlab.sh	(revision 28013)
@@ -0,0 +1,94 @@
+#!/bin/bash
+
+################################################################################
+# Wrapper script to build, package, and transfer to ISSM Web site ISSM 
+# distributable package for Windows with MATLAB API.
+#
+# Normally, we would put this directly into the project configuration under 
+# 'Build' -> 'Execute shell', but because it is a bit more involved, it is a 
+# good idea to version it.
+#
+# When no failures/errors occur, performs the following:
+# - Builds ISSM according to configuration.
+# - Packages executables and libraries.
+# - Runs test suite against package.
+# - Transmits package to ISSM Web site for distribution.
+#
+# Options:
+# -b/--skipbuild		Skip ISSM compilation.
+# -s/--skiptests		Skip ISSM compilation and testing during packaging 
+#						step. Use if packaging fails for some reason but build 
+#						is valid.
+# -t/--transferonly		Transfer package to ISSM Web site only. Use if transfer 
+#						fails for some reason to skip building, packaging, and 
+#						signing.
+#
+# NOTE:
+# - Use only *one* of the above options at a time, and make sure it is removed 
+#	again after a single run.
+# - Builds will fail when any of the above options are used on a clean 
+#	workspace. For example, if 'Source Code Management' -> 'Check-out Strategy' 
+#	select menu is set to "Always check out a fresh copy".
+################################################################################
+
+## Constants
+#
+PKG="ISSM-Windows-MATLAB" # Name of directory to copy distributable files to
+
+COMPRESSED_PKG="${PKG}.tar.gz"
+
+## Environment
+#
+export COMPRESSED_PKG
+export PKG
+
+## Parse options
+#
+if [ $# -gt 1 ]; then
+	echo "Can use only one option at a time"
+	exit 1
+fi
+
+# NOTE: We could do this with binary switching (i.e. 0011 to sign and transfer, 
+#		but the following is self-documenting).
+#
+build=1
+package=1
+transfer=1
+
+if [ $# -eq 1 ]; then
+	case $1 in
+		-b|--skipbuild)		build=0;				shift	;;
+		-s|--skiptests)		build=0;						;;
+		-t|--transferonly)	build=0;	package=0;			;;
+		*) echo "Unknown parameter passed: $1"; exit 1 		;;
+	esac
+fi
+
+# Build
+if [ ${build} -eq 1 ]; then
+	./jenkins/jenkins.sh ./jenkins/ross-win-msys2-mingw-msmpi-binaries-matlab
+
+	if [ $? -ne 0 ]; then 
+		exit 1
+	fi
+fi
+
+# Package
+if [ ${package} -eq 1 ]; then
+	./packagers/win/package-issm-win-binaries-matlab.sh $1
+
+	if [ $? -ne 0 ]; then 
+		exit 1
+	fi
+fi
+
+# Transfer distributable package to ISSM Web site
+if [ ${transfer} -eq 1 ]; then
+	./packagers/win/transfer-issm-win-binaries.sh
+
+	if [ $? -ne 0 ]; then 
+		exit 1
+	fi
+fi
+
Index: /issm/trunk/packagers/win/package-issm-win-binaries-matlab.sh
===================================================================
--- /issm/trunk/packagers/win/package-issm-win-binaries-matlab.sh	(revision 28013)
+++ /issm/trunk/packagers/win/package-issm-win-binaries-matlab.sh	(revision 28013)
@@ -0,0 +1,242 @@
+#!/bin/bash
+
+################################################################################
+# Packages and tests ISSM distributable package for Windows with MATLAB API.
+#
+# Options:
+# -s/--skiptests		Skip testing during packaging Use if packaging fails 
+#						for some reason but build is valid.
+#
+# NOTE:
+# - Assumes that the following constants are defined,
+#
+#		COMPRESSED_PKG
+#		ISSM_DIR
+#		PKG
+#
+# See also:
+# - packagers/win/complete-issm-win-binaries-matlab.sh
+################################################################################
+
+# Expand aliases within the context of this script
+shopt -s expand_aliases
+
+## Override certain aliases
+#
+alias grep=$(which grep)
+
+## Constants
+#
+#LIBGFORTRAN="/usr/lib/x86_64-win-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
+#LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota'),125,126,129,435,701,702,703]"
+#MATLAB_NROPTIONS="'benchmark','all','exclude',[125,126,129,234,235,418,420,435,444,445,701,702,703,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1201,1202,1203,1204,1205,1206,1207,1208,1301,1302,1303,1304,1401,1402,1601,1602,2002,2003,2004,2006,2007,2008,2010,2011,2012,2013,2020,2021,2051,2052,2053,2084,2085,2090,2091,2092,2101,2424,2425,3001:3300,3480,3481,4001:4100]" # NOTE: Combination of test suites from basic, Dakota, and Solid Earth builds, with tests that require a restart and those that require the JVM excluded
+MATLAB_PATH=$(cygpath -u $(cygpath -ms "/c/Program Files/MATLAB/R2019b"))
+MSMPI_ROOT="${ISSM_DIR}/externalpackages/msmpi/install"
+
+## Environment
+#
+export PATH="${ISSM_DIR}/bin:$(getconf PATH)" # Ensure that we pick up binaries from 'bin' directory rather than 'externalpackages'
+
+## Parse options
+#
+if [ $# -gt 1 ]; then
+	echo "Can use only one option at a time"
+	exit 1
+fi
+
+skip_tests=0
+
+if [ $# -eq 1 ]; then
+	case $1 in
+		-s|--skiptests) skip_tests=1;					;;
+		*) echo "Unknown parameter passed: $1"; exit 1	;;
+	esac
+fi
+
+# Check if MATLAB exists
+if ! [ -d ${MATLAB_PATH} ]; then
+	echo "${MATLAB_PATH} does not point to a MATLAB installation! Please modify MATLAB_PATH variable in $(basename $0) and try again."
+	exit 1
+fi
+
+# Clean up from previous packaging
+echo "Cleaning up existing assets"
+cd ${ISSM_DIR}
+rm -rf ${PKG} ${COMPRESSED_PKG}
+mkdir ${PKG}
+
+# Add required binaries and libraries to package and modify them where needed
+cd ${ISSM_DIR}/bin
+
+echo "Modify generic"
+cat generic_static.m | sed -e "s/generic_static/generic/g" > generic.m
+
+echo "Moving system and MinGW libraries to bin/"
+cp /c/msys64/mingw64/bin/libgcc_s_seh-1.dll .
+cp /c/msys64/mingw64/bin/libstdc++-6.dll .
+cp /c/msys64/mingw64/bin/libwinpthread-1.dll .
+cp /c/msys64/mingw64/bin/libgfortran-5.dll .
+cp /c/msys64/mingw64/bin/libquadmath-0.dll .
+
+echo "Moving MSMPI binaries and libraries to bin/"
+if [ -f ${MSMPI_ROOT}/bin/mpiexec.exe ]; then
+	cp ${MSMPI_ROOT}/bin/* .
+	cp ${MSMPI_ROOT}/lib/libmsmpi.dll .
+	cp ${MSMPI_ROOT}/lib/msmpi.dll .
+else
+	echo "MSMPI not found"
+	exit 1
+fi
+
+echo "Moving MEX-files to bin/"
+mv ${ISSM_DIR}/lib/*.mexw64 .
+
+# echo "Moving MPICH binaries to bin/"
+# if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
+# 	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
+# 	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
+# elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
+# 	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
+# 	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
+# else
+# 	echo "MPICH not found"
+# 	exit 1
+# fi
+
+# echo "Moving GDAL binaries to bin/"
+# if [ -f ${ISSM_DIR}/externalpackages/gdal/install/bin/gdal-config ]; then
+# 	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdalsrsinfo .
+# 	cp ${ISSM_DIR}/externalpackages/gdal/install/bin/gdaltransform .
+# else
+# 	echo "GDAL not found"
+# 	exit 1
+# fi
+
+# echo "Moving GMT binaries to bin/"
+# if [ -f ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt-config ]; then
+# 	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmt .
+# 	cp ${ISSM_DIR}/externalpackages/gmt/install/bin/gmtselect .
+# else
+# 	echo "GMT not found"
+# 	exit 1
+# fi
+
+# echo "Moving Gmsh binaries to bin/"
+# if [ -f ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh ]; then
+# 	cp ${ISSM_DIR}/externalpackages/gmsh/install/bin/gmsh .
+# else
+# 	echo "Gmsh not found"
+# 	exit 1
+# fi
+
+cd ${ISSM_DIR}/lib
+
+# echo "Moving libgfortran to lib/"
+# cp ${LIBGFORTRAN} ${LIBGFORTRAN_DIST} 2> /dev/null
+
+# echo "Moving GSHHG assets to share/"
+# if [ -d ${ISSM_DIR}/externalpackages/gmt/install/share/coast ]; then
+# 	mkdir ${ISSM_DIR}/share 2> /dev/null
+# 	cp -R ${ISSM_DIR}/externalpackages/gmt/install/share/coast ${ISSM_DIR}/share
+# else
+# 	echo "GSHHG not found"
+# 	exit 1
+# fi
+
+# echo "Moving PROJ assets to share/"
+# if [ -d ${ISSM_DIR}/externalpackages/proj/install/share/proj ]; then
+# 	mkdir ${ISSM_DIR}/share 2> /dev/null
+# 	cp -R ${ISSM_DIR}/externalpackages/proj/install/share/proj ${ISSM_DIR}/share
+# else
+# 	echo "PROJ not found"
+# 	exit 1
+# fi
+
+# Run tests
+if [ ${skip_tests} -eq 0 ]; then
+	echo "Running tests"
+	cd ${ISSM_DIR}/test/NightlyRun
+	rm matlab.log 2> /dev/null
+
+	# Run tests, redirecting output to logfile and suppressing output to console
+	export ISSM_DIR_WIN=$(cygpath -w "${ISSM_DIR}")
+	${MATLAB_PATH}/bin/matlab -nodesktop -nosplash -nojvm -r "try, addpath ${ISSM_DIR_WIN}/bin ${ISSM_DIR_WIN}/lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &
+
+	# Wait for MATLAB to exit
+	#
+	# TODO:
+	# - Replace by adding -wait option to above calls to matlab?
+	#
+	sleep 5;
+	echo "Waiting for MATLAB to exit"
+	pid=$(ps -W | grep MATLAB | awk '{print $1}')
+	echo '-----------------------------'
+	echo "pid: ${pid}"
+	echo '-----------------------------'
+
+	# Time out after $max_time seconds because sometimes multiple MATLAB processes get locked in race condition
+	timer=0
+	max_time=7200
+	while [[ $timer -lt $max_time && -n "${pid}" ]]; do
+		pid=$(ps -W | grep MATLAB | awk '{print $1}')
+		timer=$((timer + 1))
+		sleep 1;
+	done
+
+	# Check if timer hit $max_time
+	if [ $timer -eq $max_time ]; then
+		echo "Testing timed out at ${timer} seconds"
+		# Kill MATLAB processes
+		pid=$(ps -W | grep MATLAB | awk '{print $1}')
+		echo "${pid}" | xargs /bin/kill -f
+		exit 1
+	fi
+
+	# Filter out Windows characters
+	cat matlab.log | tr -cd '\11\12\40-\176' > matlab.log2 && mv matlab.log2 matlab.log
+
+	# Check that MATLAB did not exit in error
+	matlabExitedInError=`grep -c -E "Activation cannot proceed|Error in|Illegal|Invalid MEX-file|license|Warning: Name is nonexistent or not a directory" matlab.log`
+
+	if [ ${matlabExitedInError} -ne 0 ]; then
+		echo "----------MATLAB exited in error!----------"
+		cat matlab.log
+		echo "-----------End of matlab.log-----------"
+
+		# Clean up execution directory
+		rm -rf ${ISSM_DIR}/execution/*
+
+		exit 1
+	fi
+
+	# Check that all tests passed
+	sed -i "/FAILED TO establish the default connection to the WindowServer/d" matlab.log # First, need to remove WindowServer error message
+	numTestsFailed=`grep -c -E "FAILED|ERROR" matlab.log`
+
+	if [ ${numTestsFailed} -ne 0 ]; then
+		echo "One or more tests FAILED"
+		cat matlab.log
+		exit 1
+	else
+		echo "All tests PASSED"
+	fi
+else
+	echo "Skipping tests"
+fi
+
+# Create package
+cd ${ISSM_DIR}
+svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
+echo "Copying assets to package: ${PKG}"
+# NOTE: We do not copy lib directory to package as MATLAB seems to perform differently under Windows and so we package all DDL's and MEX-files in bin directory
+cp -rf bin examples scripts share test ${PKG}
+mkdir ${PKG}/execution
+
+echo "Cleaning up unneeded/unwanted files"
+rm -f ${PKG}/bin/generic_static.* # Remove static versions of generic cluster classes
+rm -rf ${PKG}/test/SandBox # Remove testing sandbox from package
+
+# Compress package
+echo "Compressing package"
+tar -czf ${COMPRESSED_PKG} ${PKG}
Index: /issm/trunk/packagers/win/transfer-issm-win-binaries.sh
===================================================================
--- /issm/trunk/packagers/win/transfer-issm-win-binaries.sh	(revision 28013)
+++ /issm/trunk/packagers/win/transfer-issm-win-binaries.sh	(revision 28013)
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+################################################################################
+# Transfers ISSM distributable package for Windows to ISSM website.
+#
+# NOTE:
+# - Assumes that the following constants are defined,
+#
+#		COMPRESSED_PKG
+#
+# See also:
+# - packagers/win/complete-issm-win-binaries-matlab.sh
+# - packagers/win/complete-issm-win-binaries-python-2.sh
+# - packagers/win/complete-issm-win-binaries-python-3.sh
+################################################################################
+
+# Transfer package to ISSM Web site
+echo "Transferring package to ISSM Web site"
+scp -i ~/.ssh/windows_10-vm_to_ross ${COMPRESSED_PKG} jenkins@ross.ics.uci.edu:/var/www/html/${COMPRESSED_PKG}
+
+if [ $? -ne 0 ]; then
+	echo "Transfer failed! Verify connection then build this project again (with -t/--transferonly option to skip building and packaging)."
+	exit 1
+fi
Index: /issm/trunk/scripts/BinRead.py
===================================================================
--- /issm/trunk/scripts/BinRead.py	(revision 28012)
+++ /issm/trunk/scripts/BinRead.py	(revision 28013)
@@ -1,3 +1,3 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 import numpy as np
 from os import environ, path
@@ -6,113 +6,113 @@
 from argparse import ArgumentParser
 
-def BinRead(filin, filout='', verbose=0):  #{{{
+def BinRead(infile, outfile='', verbose=0):  #{{{
 
-    print("reading binary file.")
-    f = open(filin, 'rb')
+    print('reading binary file')
+    f = open(infile, 'rb')
 
-    if filout:
-        sys.stdout = open(filout, 'w')
+    if outfile:
+        sys.stdout = open(outfile, 'w')
 
     while True:
         try:
-            #Step 1: read size of record name
+            # Step 1: Read size of record name
             recordnamesize = struct.unpack('i', f.read(struct.calcsize('i')))[0]
         except struct.error as e:
-            print("probable EOF: {}".format(e))
+            print('probable EOF: {}'.format(e))
             break
 
-        print("============================================================================ ")
+        print('============================================================================')
         if verbose > 2:
-            print("\n recordnamesize = {}".format(recordnamesize))
-        recordname = struct.unpack('{}s'.format(recordnamesize), f.read(recordnamesize))[0]
-        print("field: {}".format(recordname))
+            print('\n recordnamesize = {}'.format(recordnamesize))
+        recordname = struct.unpack('{}s'.format(recordnamesize), f.read(recordnamesize))[0].decode('ASCII')
+        print('field: {}'.format(recordname))
 
-        #Step 2: read the data itself.
-        #first read length of record
+        # Step 2: Read the data itself
+        # First read length of record
         #reclen = struct.unpack('i', f.read(struct.calcsize('i')))[0]
         reclen = struct.unpack('q', f.read(struct.calcsize('q')))[0]
         if verbose > 1:
-            print("reclen = {}".format(reclen))
+            print('reclen = {}'.format(reclen))
 
-        #read data code:
+        # Read data code
         code = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-        print("Format = {} (code {})".format(CodeToFormat(code), code))
+        print('Format = {} (code {})'.format(CodeToFormat(code), code))
 
         if code == FormatToCode('Boolean'):
             bval = struct.unpack('i', f.read(reclen - struct.calcsize('i')))[0]
-            print("value = {}".format(bval))
+            print('value = {}'.format(bval))
 
         elif code == FormatToCode('Integer'):
             ival = struct.unpack('i', f.read(reclen - struct.calcsize('i')))[0]
-            print("value = {}".format(ival))
+            print('value = {}'.format(ival))
 
         elif code == FormatToCode('Double'):
             dval = struct.unpack('d', f.read(reclen - struct.calcsize('i')))[0]
-            print("value = {}".format(dval))
+            print('value = {}'.format(dval))
 
         elif code == FormatToCode('String'):
             strlen = struct.unpack('i', f.read(struct.calcsize('i')))[0]
             if verbose > 1:
-                print("strlen = {}".format(strlen))
+                print('strlen = {}'.format(strlen))
             sval = struct.unpack('{}s'.format(strlen), f.read(strlen))[0]
-            print("value = '{}'".format(sval))
+            print('value = {}'.format(sval))
 
         elif code == FormatToCode('BooleanMat'):
-            #read matrix type:
+            # Read matrix type
             mattype = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("mattype = {}".format(mattype))
+            print('mattype = {}'.format(mattype))
 
-            #now read matrix
+            # Read matrix
             s = [0, 0]
             s[0] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
             s[1] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("size = [{}x{}]".format(s[0], s[1]))
+            print('size = [{}x{}]'.format(s[0], s[1]))
             data = np.zeros((s[0], s[1]))
             for i in range(s[0]):
                 for j in range(s[1]):
-                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]    #get to the "c" convention, hence the transpose
+                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]
                     if verbose > 2:
-                        print("data[{}, {}] = {}".format(i, j, data[i][j]))
+                        print('data[{}, {}] = {}'.format(i, j, data[i][j]))
 
         elif code == FormatToCode('IntMat'):
-            #read matrix type:
+            # Read matrix type
             mattype = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("mattype = {}".format(mattype))
+            print('mattype = {}'.format(mattype))
 
-            #now read matrix
+            # Read matrix
             s = [0, 0]
             s[0] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
             s[1] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("size = [{}x{}]".format(s[0], s[1]))
+            print('size = [{}x{}]'.format(s[0], s[1]))
             data = np.zeros((s[0], s[1]))
             for i in range(s[0]):
                 for j in range(s[1]):
-                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]    #get to the "c" convention, hence the transpose
+                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]
                     if verbose > 2:
-                        print("data[{}, {}] = {}".format(i, j, data[i][j]))
+                        print('data[{}, {}] = {}'.format(i, j, data[i][j]))
 
         elif code == FormatToCode('DoubleMat'):
-            #read matrix type:
+            # Read matrix type
             mattype = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("mattype = {}".format(mattype))
+            print('mattype = {}'.format(mattype))
 
-            #now read matrix
+            # Read matrix
             s = [0, 0]
             s[0] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
             s[1] = struct.unpack('i', f.read(struct.calcsize('i')))[0]
-            print("size = [{}x{}]".format(s[0], s[1]))
+            print('size = [{}x{}]'.format(s[0], s[1]))
             data = np.zeros((s[0], s[1]))
             for i in range(s[0]):
                 for j in range(s[1]):
-                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]    #get to the "c" convention, hence the transpose
+                    data[i][j] = struct.unpack('d', f.read(struct.calcsize('d')))[0]
                     if verbose > 2:
-                        print("data[{}, {}] = {}".format(i, j, data[i][j]))
+                        print('data[{}, {}] = {}'.format(i, j, data[i][j]))
 
         elif code == FormatToCode('MatArray'):
             f.seek(reclen - 4, 1)
-            print("skipping {} bytes for code {}.".format(code, reclen - 4))
+            print('skipping {} bytes for code {}'.format(code, reclen - 4))
         elif code == FormatToCode('StringArray'):
             f.seek(reclen - 4, 1)
-            print("skipping {} bytes for code {}.".format(code, reclen - 4))
+            print('skipping {} bytes for code {}'.format(code, reclen - 4))
 
         else:
@@ -123,8 +123,7 @@
 
 def FormatToCode(format):  # {{{
-    """
-    This routine takes the format string, and hardcodes it into an integer, which
-    is passed along the record, in order to identify the nature of the dataset being
-    sent.
+    """This routine takes the format string and converts it into an integer, 
+    which is passed along with the record in order to identify the nature of 
+    the data being sent.
     """
 
@@ -154,8 +153,6 @@
 
 def CodeToFormat(code):  # {{{
-    """
-    This routine takes the format string, and hardcodes it into an integer, which
-    is passed along the record, in order to identify the nature of the dataset being
-    sent.
+    """This routine takes a datatype code and converts it to the corresponding 
+    string in order to identify the nature of the data retrieved.
     """
 
@@ -186,9 +183,9 @@
 if __name__ == '__main__':  #{{{
     parser = ArgumentParser(description='BinRead - function to read binary input file.')
-    parser.add_argument('-f', '--filin', help='name of binary input file', default='')
-    parser.add_argument('-o', '--filout', help='optional name of text output file', default='')
+    parser.add_argument('-f', '--infile', help='name of binary input file', default='')
+    parser.add_argument('-o', '--outfile', help='optional name of text output file', default='')
     parser.add_argument('-v', '--verbose', help='optional level of output', default=0)
     args = parser.parse_args()
 
-    BinRead(args.filin, args.filout, args.verbose)
+    BinRead(args.infile, args.outfile, args.verbose)
 #}}}
Index: /issm/trunk/scripts/DownloadExamplesDatasets.sh
===================================================================
--- /issm/trunk/scripts/DownloadExamplesDatasets.sh	(revision 28012)
+++ /issm/trunk/scripts/DownloadExamplesDatasets.sh	(revision 28013)
@@ -25,7 +25,13 @@
 fi
 
-# Get content of page that hosts datasets, reduce to just datasets list, then 
+# Get content of page that hosts datasets, reduce to just datasets list, then
 # parse out dataset links
+#
+# NOTE: Clear DYLD_LIBRARY_PATH in case we have installed our own copy of cURL
+#		and $ISSM_DIR/etc/environment.sh has been sourced as there may be a
+#		conflict between versions of cURL executable and libcurl
+#
 dataset_urls=$(\
+	DYLD_LIBRARY_PATH=""; \
 	curl -Ls ${DATASETS_URL} |\
 	sed '/<!--DATASETS LIST START-->/,/<!--DATASETS LIST END-->/ !d' |\
@@ -34,5 +40,7 @@
 
 # Get datasets
-wget --no-clobber --directory-prefix="${DIRECTORY_PREFIX}" -i - <<< "${dataset_urls}"
+#
+echo "Downloading examples datasets..."
+wget --quiet --no-clobber --directory-prefix="${DIRECTORY_PREFIX}" -i - <<< "${dataset_urls}"
 
 # Expand zip files
Index: /issm/trunk/scripts/ol.m
===================================================================
--- /issm/trunk/scripts/ol.m	(revision 28012)
+++ /issm/trunk/scripts/ol.m	(revision 28013)
@@ -3,5 +3,10 @@
 	options=pairoptions(varargin{:});
 
-	range=getfieldvalue(options,'<',Inf);
+	%recover steps in calling workspace:  will be used to highlight current step.
+	steps= evalin('base', 'steps');
+	
+	mmin=getfieldvalue(options,'>',1);
+	mmax=getfieldvalue(options,'<',Inf);
+
 	file=getfieldvalue(options,'file','runme.m');
 
@@ -20,6 +25,14 @@
 				return;
 			end
-			disp(sprintf('%2i: %s',count,tline(16:lastchar)));
-			if count>range,
+			if ismember(count,steps),
+				disp(sprintf('%2i: *%s',count,tline(17:lastchar-1)));
+			else
+				if count>=mmin & count <= mmax,
+					disp(sprintf('%2i:  %s',count,tline(17:lastchar-1)));
+				else
+					%do nothing. 
+				end
+			end
+			if count>mmax,
 				break;
 			end
Index: /issm/trunk/scripts/ola.m
===================================================================
--- /issm/trunk/scripts/ola.m	(revision 28013)
+++ /issm/trunk/scripts/ola.m	(revision 28013)
@@ -0,0 +1,40 @@
+function ola(varargin)
+
+	options=pairoptions(varargin{:});
+
+	%recover steps in calling workspace:  will be used to highlight current step.
+	steps= evalin('base', 'steps');
+	
+	r=getfieldvalue(options,'r',10);
+	range=(min(steps)-r):(max(steps)+r);
+
+	file=getfieldvalue(options,'file','runme.m');
+
+	%Open runme.m file and read line by line
+	fid=fopen(file,'r');
+
+	tline = fgets(fid);
+	count=1;
+	while ischar(tline)
+		tline = fgets(fid);
+		if strncmpi(tline,'if perform(org,',14),
+			lastchar = strfind(tline,')');
+			lastchar = lastchar(end)-1;
+			string=tline(17:lastchar-1);
+			if strcmpi(string,'End'),
+				return;
+			end
+			if ismember(count,range),
+				if ismember(count,steps),
+					disp(sprintf('%2i: *%s',count,tline(17:lastchar-1)));
+				else
+					disp(sprintf('%2i:  %s',count,tline(17:lastchar-1)));
+				end
+			end
+			if count>range(end),
+				break;
+			end
+			count=count+1;
+		end
+	end
+	fclose(fid);
Index: /issm/trunk/scripts/py_to_pyc.sh
===================================================================
--- /issm/trunk/scripts/py_to_pyc.sh	(revision 28012)
+++ /issm/trunk/scripts/py_to_pyc.sh	(revision 28013)
@@ -14,8 +14,8 @@
 
 echo "Compiling Python source files"
-python -m compileall -q ${TARGET} > ${COMPILE_LOG}
+python3 -m compileall -f -q -b ${TARGET}
 
 if [ -s ${COMPILE_LOG} ]; then
-	echo "Error(s) occured while compiling Python scripts!"
+	echo "Error(s) occurred while compiling Python scripts!"
 	echo "--------------- start: ${COMPILE_LOG} ---------------"
 	cat ${COMPILE_LOG}
Index: /issm/trunk/scripts/svn_repo_authors.sh
===================================================================
--- /issm/trunk/scripts/svn_repo_authors.sh	(revision 28012)
+++ /issm/trunk/scripts/svn_repo_authors.sh	(revision 28013)
@@ -38,5 +38,5 @@
 
 function display_help_mac {
-	echo "	-h	diplay help"
+	echo "	-h	display help"
 	echo "	-a	sort output by author name"
 	echo "	-c	sort output by number of commits"
@@ -45,5 +45,5 @@
 
 function display_help_linux {
-	echo "	-h | --help		diplay help"
+	echo "	-h | --help		display help"
 	echo "	-a | --authors	sort output by author name"
 	echo "	-c | --commits	sort output by number of commits"
Index: /issm/trunk/src/c/Makefile.am
===================================================================
--- /issm/trunk/src/c/Makefile.am	(revision 28012)
+++ /issm/trunk/src/c/Makefile.am	(revision 28013)
@@ -7,5 +7,4 @@
 
 # Library declaration {{{
-if !WINDOWS
 lib_LTLIBRARIES = libISSMCore.la
 if !MSYS2
@@ -14,10 +13,4 @@
 if WRAPPERS
 lib_LTLIBRARIES += libISSMModules.la
-endif
-else
-noinst_LTLIBRARIES = libISSMCore.la libISSMOverload.la
-if WRAPPERS
-noinst_LTLIBRARIES += libISSMModules.la
-endif
 endif
 #}}}
@@ -89,5 +82,9 @@
 	./classes/Misfit.cpp \
 	./classes/Cfsurfacesquare.cpp \
+	./classes/Cfsurfacesquaretransient.cpp \
 	./classes/Cfdragcoeffabsgrad.cpp \
+	./classes/Cfdragcoeffabsgradtransient.cpp \
+	./classes/Cfrheologybbarabsgrad.cpp \
+	./classes/Cfrheologybbarabsgradtransient.cpp \
 	./classes/Cfsurfacelogvel.cpp \
 	./classes/Cflevelsetmisfit.cpp \
@@ -129,4 +126,5 @@
 	./classes/Params/Parameters.cpp \
 	./classes/Params/BoolParam.cpp \
+	./classes/Params/ControlParam.cpp \
 	./classes/Params/IntParam.cpp \
 	./classes/Params/IntVecParam.cpp \
@@ -487,4 +485,7 @@
 issm_sources += ./analyses/HydrologyDCEfficientAnalysis.cpp
 endif
+if HYDROLOGYARMAPW
+issm_sources += ./analyses/HydrologyArmapwAnalysis.cpp
+endif
 if L2PROJECTIONEPL
 issm_sources += ./analyses/L2ProjectionEPLAnalysis.cpp
@@ -598,4 +599,5 @@
 if FORTRAN
 issm_sources += ./modules/SurfaceMassBalancex/run_semic.f90
+issm_sources += ./modules/SurfaceMassBalancex/run_semic_transient.f90
 endif
 endif
@@ -715,10 +717,15 @@
 libISSMCore_la_FFLAGS = $(AM_FFLAGS)
 
-if !WINDOWS
+libISSMCore_LIB_ADD = $(CHACOLIB) $(DAKOTALIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MPLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(ADJOINTMPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) 
+if FORTRAN
+libISSMCore_LIB_ADD += $(FLIBS) $(FORTRANLIB)
+endif
+libISSMCore_LIB_ADD += $(OSLIBS)
+
+if MSYS2
+libISSMCore_la_LIBADD = ${libISSMCore_LIB_ADD}
+else
 if !STANDALONE_LIBRARIES
-libISSMCore_la_LIBADD = $(CHACOLIB) $(DAKOTALIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MPLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(ADJOINTMPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) $(OSLIBS)
-if FORTRAN
-libISSMCore_la_LIBADD += $(FLIBS) $(FORTRANLIB)
-endif
+libISSMCore_la_LIBADD = ${libISSMCore_LIB_ADD}
 endif
 endif
@@ -739,13 +746,16 @@
 
 libISSMModules_la_CXXFLAGS = $(ALL_CXXFLAGS)
-if !WINDOWS
-if STANDALONE_LIBRARIES
+
+libISSMModules_LIB_ADD = $(TRIANGLELIB) $(CHACOLIB) $(BLASLAPACKLIB) $(OSLIBS)
 libISSMModules_la_LIBADD = ./libISSMCore.la
-else
-libISSMModules_la_LIBADD = ./libISSMCore.la $(TRIANGLELIB) $(CHACOLIB) $(BLASLAPACKLIB) $(OSLIBS)
-endif
-endif
-endif
-
+
+if MSYS2
+libISSMModules_la_LIBADD += ${libISSMModules_LIB_ADD}
+else
+if !STANDALONE_LIBRARIES
+libISSMModules_la_LIBADD += ${libISSMModules_LIB_ADD}
+endif
+endif
+endif
 
 AM_LDFLAGS =
@@ -760,5 +770,5 @@
 #	will be linked to, whether we like it or not, if no static version is
 #	available.
-# - On macOC, static linking of binaries is not supported.
+# - On macOS, static linking of binaries is not supported.
 #
 if STANDALONE_EXECUTABLES
@@ -766,5 +776,15 @@
 AM_LDFLAGS += -Wl,-rpath,'@loader_path/../lib'
 else
-AM_LDFLAGS += -static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN/../lib'
+if MSYS2
+AM_LDFLAGS += -Wl,-static
+else
+AM_LDFLAGS += -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN/../lib'
+endif
+endif
+else
+if MAC
+if HAVE_FORTRANDIR
+AM_LDFLAGS += -Wl,-rpath,$(FORTRANDIR)
+endif
 endif
 endif
@@ -783,7 +803,9 @@
 
 if STANDALONE_LIBRARIES
+if !MSYS2
 libISSMCore_la_LDFLAGS += -static
 if WRAPPERS
 libISSMModules_la_LDFLAGS += -static
+endif
 endif
 endif
@@ -802,5 +824,7 @@
 
 if STANDALONE_LIBRARIES
+if !MSYS2
 libISSMOverload_la_LDFLAGS += -static
+endif
 endif
 endif
@@ -833,9 +857,9 @@
 
 # External packages
-LDADD += $(SEMICLIB) $(M1QN3LIB) $(CHACOLIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(NEOPZLIB) $(TAOLIB) $(PLAPACKLIB) $(MPLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(BLACSLIB) $(HDF5LIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(AMPILIB) $(ADJOINTMPILIB) $(ADOLCLIB) $(MPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) $(ESMFLIB) $(OSLIBS)
-
+LDADD += $(DAKOTALIB) $(SEMICLIB) $(M1QN3LIB) $(CHACOLIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(NEOPZLIB) $(TAOLIB) $(PLAPACKLIB) $(MPLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(BLACSLIB) $(HDF5LIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(AMPILIB) $(ADJOINTMPILIB) $(ADOLCLIB) $(MPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) $(ESMFLIB)
 if FORTRAN
 LDADD += $(FLIBS) $(FORTRANLIB)
 endif
+LDADD += $(OSLIBS)
 
 issm_SOURCES = main/issm.cpp
@@ -844,5 +868,4 @@
 issm_slc_SOURCES = main/issm_slc.cpp
 issm_slc_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS)
-issm_slc_LDADD = $(DAKOTALIB) $(LDADD)
 
 if OCEAN
@@ -862,5 +885,4 @@
 issm_dakota_SOURCES = main/issm_dakota.cpp
 issm_dakota_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS)
-issm_dakota_LDADD = $(DAKOTALIB) $(LDADD)
 bin_PROGRAMS += issm_post
 issm_post_SOURCES = main/issm_post.cpp
Index: /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp	(revision 28013)
@@ -2391,4 +2391,6 @@
 		case 2:
 		case 11:
+		case 13:
+		case 14:
 			dragcoefficient_input = basalelement->GetInput(FrictionCEnum); _assert_(dragcoefficient_input);
 			break;
@@ -2484,4 +2486,5 @@
 		case 2:
 		case 11:
+		case 14:
 			dragcoeff_input = element->GetInput(FrictionCEnum); _assert_(dragcoeff_input);
 			break;
@@ -2593,4 +2596,5 @@
 		case 2:
 		case 11:
+		case 14:
 			dragcoeff_input = element->GetInput(FrictionCEnum); _assert_(dragcoeff_input);
 			break;
@@ -2714,4 +2718,5 @@
 		case 2:
 		case 11:
+		case 14:
 			dragcoeff_input = basalelement->GetInput(FrictionCEnum); _assert_(dragcoeff_input);
 			break;
@@ -2832,4 +2837,6 @@
 		case 2:
 		case 11:
+		case 13:
+		case 14:
 			dragcoeff_input = basalelement->GetInput(FrictionCEnum); _assert_(dragcoeff_input);
 			break;
Index: /issm/trunk/src/c/analyses/AgeAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/AgeAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/AgeAnalysis.cpp	(revision 28013)
@@ -92,68 +92,4 @@
 }/*}}}*/
 ElementMatrix* AgeAnalysis::CreateKMatrix(Element* element){/*{{{*/
-
-	_error_("STOP");
-	/* Check if ice in element */
-	if(!element->IsIceInElement()) return NULL;
-
-	/*compute all stiffness matrices for this element*/
-	ElementMatrix* Ke1=CreateKMatrixVolume(element);
-	ElementMatrix* Ke2=CreateKMatrixShelf(element);
-	ElementMatrix* Ke =new ElementMatrix(Ke1,Ke2);
-
-	/*clean-up and return*/
-	delete Ke1;
-	delete Ke2;
-	return Ke;
-}/*}}}*/
-ElementMatrix* AgeAnalysis::CreateKMatrixShelf(Element* element){/*{{{*/
-
-	/* Check if ice in element */
-	if(!element->IsIceInElement()) return NULL;
-
-	/*Initialize Element matrix and return if necessary*/
-	if(!element->IsOnBase() || !element->IsAllFloating()) return NULL;
-
-	IssmDouble  dt,Jdet,D;
-	IssmDouble *xyz_list_base = NULL;
-
-	/*Get basal element*/
-	if(!element->IsOnBase() || !element->IsAllFloating()) return NULL;
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Initialize vectors*/
-	ElementMatrix* Ke    = element->NewElementMatrix();
-	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
-
-	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	IssmDouble gravity             = element->FindParam(ConstantsGEnum);
-	IssmDouble rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
-	IssmDouble rho_ice             = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
-	IssmDouble mixed_layer_capacity= element->FindParam(MaterialsMixedLayerCapacityEnum);
-
-	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss=element->NewGaussBase(4);
-	while(gauss->next()){
-
-		element->JacobianDeterminantBase(&Jdet,xyz_list_base,gauss);
-		element->NodalFunctions(basis,gauss);
-
-		D=gauss->weight*Jdet*rho_water*mixed_layer_capacity/(heatcapacity*rho_ice);
-		if(reCast<bool,IssmDouble>(dt)) D=dt*D;
-		for(int i=0;i<numnodes;i++) for(int j=0;j<numnodes;j++) Ke->values[i*numnodes+j] += D*basis[i]*basis[j];
-	}
-
-	/*Clean up and return*/
-	delete gauss;
-	xDelete<IssmDouble>(basis);
-	xDelete<IssmDouble>(xyz_list_base);
-	return Ke;
-}/*}}}*/
-ElementMatrix* AgeAnalysis::CreateKMatrixVolume(Element* element){/*{{{*/
 
 	/* Check if ice in element */
@@ -303,135 +239,7 @@
 	if(!element->IsIceInElement()) return NULL;
 
-	/*compute all load vectors for this element*/
-	ElementVector* pe1=CreatePVectorVolume(element);
-	ElementVector* pe2=CreatePVectorSheet(element);
-	ElementVector* pe3=CreatePVectorShelf(element);
-	ElementVector* pe =new ElementVector(pe1,pe2,pe3);
-
-	/*clean-up and return*/
-	delete pe1;
-	delete pe2;
-	delete pe3;
-	return pe;
-}/*}}}*/
-ElementVector* AgeAnalysis::CreatePVectorSheet(Element* element){/*{{{*/
-
-	/* Check if ice in element */
-	if(!element->IsIceInElement()) return NULL;
-
-	/* Geothermal flux on ice sheet base and basal friction */
-	if(!element->IsOnBase() || element->IsAllFloating()) return NULL;
-
-	IssmDouble  dt,Jdet,geothermalflux,vx,vy,vz;
-	IssmDouble  alpha2,scalar,basalfriction,heatflux;
-	IssmDouble *xyz_list_base = NULL;
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Initialize vectors*/
-	ElementVector* pe    = element->NewElementVector();
-	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
-
-	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input* vx_input             = element->GetInput(VxEnum);                          _assert_(vx_input);
-	Input* vy_input             = element->GetInput(VyEnum);                          _assert_(vy_input);
-	Input* vz_input             = element->GetInput(VzEnum);                          _assert_(vz_input);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
-	IssmDouble  rho_ice             = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
-
-	/*Build friction element, needed later: */
-	Friction* friction=new Friction(element,3);
-
-	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss   = element->NewGaussBase(4);
-	while(gauss->next()){
-
-		element->JacobianDeterminantBase(&Jdet,xyz_list_base,gauss);
-		element->NodalFunctions(basis,gauss);
-
-		geothermalflux_input->GetInputValue(&geothermalflux,gauss);
-		friction->GetAlpha2(&alpha2,gauss);
-		vx_input->GetInputValue(&vx,gauss);
-		vy_input->GetInputValue(&vy,gauss);
-		vz_input->GetInputValue(&vz,gauss);
-		vz = 0.;//FIXME
-		basalfriction = alpha2*(vx*vx + vy*vy + vz*vz);
-		heatflux      = (basalfriction+geothermalflux)/(rho_ice*heatcapacity);
-
-		scalar = gauss->weight*Jdet*heatflux;
-		if(dt!=0.) scalar=dt*scalar;
-
-		for(int i=0;i<numnodes;i++) pe->values[i]+=scalar*basis[i];
-	}
-
-	/*Clean up and return*/
-	delete gauss;
-	delete friction;
-	xDelete<IssmDouble>(basis);
-	xDelete<IssmDouble>(xyz_list_base);
-	return pe;
-}/*}}}*/
-ElementVector* AgeAnalysis::CreatePVectorShelf(Element* element){/*{{{*/
-
-	/* Check if ice in element */
-	if(!element->IsIceInElement()) return NULL;
-
-	IssmDouble  t_pmp,dt,Jdet,scalar_ocean,pressure;
-	IssmDouble *xyz_list_base = NULL;
-
-	/*Get basal element*/
-	if(!element->IsOnBase() || !element->IsAllFloating()) return NULL;
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Initialize vectors*/
-	ElementVector* pe    = element->NewElementVector();
-	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
-
-	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input*      pressure_input=element->GetInput(PressureEnum); _assert_(pressure_input);
-	IssmDouble  gravity             = element->FindParam(ConstantsGEnum);
-	IssmDouble  rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
-	IssmDouble  rho_ice             = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
-	IssmDouble  mixed_layer_capacity= element->FindParam(MaterialsMixedLayerCapacityEnum);
-
-	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss=element->NewGaussBase(4);
-	while(gauss->next()){
-
-		element->JacobianDeterminantBase(&Jdet,xyz_list_base,gauss);
-		element->NodalFunctions(basis,gauss);
-
-		pressure_input->GetInputValue(&pressure,gauss);
-		t_pmp=element->TMeltingPoint(pressure);
-
-		scalar_ocean=gauss->weight*Jdet*rho_water*mixed_layer_capacity*(t_pmp)/(heatcapacity*rho_ice);
-		if(reCast<bool,IssmDouble>(dt)) scalar_ocean=dt*scalar_ocean;
-
-		for(int i=0;i<numnodes;i++) pe->values[i]+=scalar_ocean*basis[i];
-	}
-
-	/*Clean up and return*/
-	delete gauss;
-	xDelete<IssmDouble>(basis);
-	xDelete<IssmDouble>(xyz_list_base);
-	return pe;
-}/*}}}*/
-ElementVector* AgeAnalysis::CreatePVectorVolume(Element* element){/*{{{*/
-
-	/* Check if ice in element */
-	if(!element->IsIceInElement()) return NULL;
-
 	/*Intermediaries*/
 	int         stabilization;
-	IssmDouble  Jdet,phi,dt;
+	IssmDouble  Jdet,dt;
 	IssmDouble  temperature;
 	IssmDouble  tau_parameter,diameter,hx,hy,hz;
@@ -451,8 +259,4 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	IssmDouble  rho_ice             = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
-	IssmDouble  thermalconductivity = 1.;
-	IssmDouble  kappa = thermalconductivity/(rho_ice*heatcapacity);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&stabilization,AgeStabilizationEnum);
@@ -469,7 +273,6 @@
 		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
 		element->NodalFunctions(basis,gauss);
-		element->ViscousHeating(&phi,xyz_list,gauss,vx_input,vy_input,vz_input);
-
-		scalar_def=phi/(rho_ice*heatcapacity)*Jdet*gauss->weight;
+
+		scalar_def=1.*Jdet*gauss->weight;
 		if(reCast<bool,IssmDouble>(dt)) scalar_def=scalar_def*dt;
 
@@ -490,5 +293,5 @@
 			vz_input->GetInputValue(&w,gauss);
 
-			tau_parameter=element->StabilizationParameter(u,v,w,diameter,kappa);
+			tau_parameter=element->StabilizationParameter(u,v,w,diameter,1.e-15); //assume very small conductivity to get tau
 
 			for(int i=0;i<numnodes;i++) pe->values[i]+=tau_parameter*scalar_def*(u*dbasis[0*numnodes+i]+v*dbasis[1*numnodes+i]+w*dbasis[2*numnodes+i]);
@@ -504,5 +307,5 @@
 			vy_input->GetInputValue(&v,gauss);
 			vz_input->GetInputValue(&w,gauss);
-			element->StabilizationParameterAnisotropic(&tau_parameter_anisotropic[0],u,v,w,hx,hy,hz,kappa);
+			element->StabilizationParameterAnisotropic(&tau_parameter_anisotropic[0],u,v,w,hx,hy,hz,1.e-15); //assume very small conductivity to get tau
 			tau_parameter_hor=tau_parameter_anisotropic[0];
 			tau_parameter_ver=tau_parameter_anisotropic[1];
@@ -518,5 +321,4 @@
 	delete gauss;
 	return pe;
-
 }/*}}}*/
 void           AgeAnalysis::GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element){/*{{{*/
@@ -533,5 +335,39 @@
 void           AgeAnalysis::UpdateConstraints(FemModel* femmodel){/*{{{*/
 	SetActiveNodesLSMx(femmodel);
-
 	_error_("Should also automatically constrain surface/basal nodes where we have inflow");
-}/*}}}*/
+
+	/*Constrain all nodes that are grounded and unconstrain the ones that float*/
+	for(Object* & object : femmodel->elements->objects){
+		Element    *element  = xDynamicCast<Element*>(object);
+
+		if(element->IsOnSurface()){
+			element = element->SpawnTopElement();
+			int         numnodes  = element->GetNumberOfNodes();
+			IssmDouble *mask      = xNew<IssmDouble>(numnodes);
+			IssmDouble *bed       = xNew<IssmDouble>(numnodes);
+			IssmDouble *ls_active = xNew<IssmDouble>(numnodes);
+
+			element->GetInputListOnNodes(&mask[0],MaskOceanLevelsetEnum);
+			element->GetInputListOnNodes(&bed[0],BaseEnum);
+			element->GetInputListOnNodes(&ls_active[0],IceMaskNodeActivationEnum);
+
+			for(int in=0;in<numnodes;in++){
+				Node* node=element->GetNode(in);
+				if(mask[in]<0. && ls_active[in]==1.){
+					node->Activate();
+				}
+				else{
+					node->Deactivate();
+					node->ApplyConstraint(0,bed[in]);
+				}
+			}
+			xDelete<IssmDouble>(mask);
+			xDelete<IssmDouble>(bed);
+			xDelete<IssmDouble>(ls_active);
+		}
+		else if(element->IsOnBase()){
+			element = element->SpawnBasalElement();
+			_error_("not supported");
+		}
+	}
+}/*}}}*/
Index: /issm/trunk/src/c/analyses/AgeAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/AgeAnalysis.h	(revision 28012)
+++ /issm/trunk/src/c/analyses/AgeAnalysis.h	(revision 28013)
@@ -26,10 +26,5 @@
 		ElementMatrix* CreateJacobianMatrix(Element* element);
 		ElementMatrix* CreateKMatrix(Element* element);
-		ElementMatrix* CreateKMatrixShelf(Element* element);
-		ElementMatrix* CreateKMatrixVolume(Element* element);
 		ElementVector* CreatePVector(Element* element);
-		ElementVector* CreatePVectorSheet(Element* element);
-		ElementVector* CreatePVectorShelf(Element* element);
-		ElementVector* CreatePVectorVolume(Element* element);
 		void           GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
 		void           GradientJ(Vector<IssmDouble>* gradient,Element*  element,int control_type,int control_interp,int control_index);
Index: /issm/trunk/src/c/analyses/DebrisAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/DebrisAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/DebrisAnalysis.cpp	(revision 28013)
@@ -10,4 +10,5 @@
 
 #define FINITEELEMENT P1Enum
+#define EPS 1e-14
 
 /*Model processing*/
@@ -52,5 +53,5 @@
 	}
 
-	/*free ressources: */
+	/*free resources: */
 	iomodel->DeleteData(vertex_pairing,"md.debris.vertex_pairing");
 	iomodel->DeleteData(nodeonsurface,"md.mesh.vertexonsurface");
@@ -59,5 +60,5 @@
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum) iomodel->FetchData(2,"md.mesh.vertexonbase","md.mesh.vertexonsurface");
-	::CreateNodes(nodes,iomodel,DebrisAnalysisEnum,FINITEELEMENT);
+	::CreateNodes(nodes,iomodel,DebrisAnalysisEnum,FINITEELEMENT,isamr);
 	iomodel->DeleteData(2,"md.mesh.vertexonbase","md.mesh.vertexonsurface");
 
@@ -127,85 +128,9 @@
 void           DebrisAnalysis::Core(FemModel* femmodel){/*{{{*/
 
-
-	Element* element= NULL;
-	for(Object* & object : femmodel->elements->objects){
-		element=xDynamicCast<Element*>(object);
-
-
-		int numvertices = element->GetNumberOfNodes();
-
-		IssmDouble* vx = xNew<IssmDouble>(numvertices);
-		IssmDouble* debristhickness = xNew<IssmDouble>(numvertices);
-		IssmDouble* slopex         = xNew<IssmDouble>(numvertices);
-		IssmDouble* onsurface      = xNew<IssmDouble>(numvertices);
-		IssmDouble* icethickness      = xNew<IssmDouble>(numvertices);
-
-		element->GetInputListOnVertices(&debristhickness[0],DebrisThicknessEnum);
-		element->GetInputListOnVertices(&vx[0],VxEnum);
-		element->GetInputListOnVertices(&slopex[0],SurfaceSlopeXEnum);
-		element->GetInputListOnVertices(&onsurface[0],MeshVertexonsurfaceEnum);
-		element->GetInputListOnVertices(&icethickness[0],ThicknessEnum);
-
-		IssmDouble slope,rad2deg=180./M_PI; //=57.2958
-		IssmDouble vslipx,rhod=1900.;
-		IssmDouble gravity=element->FindParam(ConstantsGEnum);
-		IssmDouble slope_threshold=element->FindParam(DebrisRemovalSlopeThresholdEnum);
-		IssmDouble iceminthickness=element->FindParam(MasstransportMinThicknessEnum);
-
-		int step;
-		IssmDouble dt, maxv;
-		IssmDouble yts=31536000.;
-		femmodel->parameters->FindParam(&step,StepEnum);
-		femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
-
-		bool isminthicknessinelement=false;
-		for(int i=0;i<numvertices;i++){
-			if(icethickness[i]<=(iceminthickness+0.01)) isminthicknessinelement=true;
-		}
-		if(isminthicknessinelement){
-			//do nothing
-			for(int i=0;i<numvertices;i++){
-				if(icethickness[i]<=(iceminthickness+0.01)) vx[i]=0.;                         
-			}
-		}else{
-			for(int i=0;i<numvertices;i++){
-				//if(onsurface[i]>.5){
-				slope=fabs(slopex[i]);
-				if((atan(slope)*rad2deg)>25.){
-					//if(debristhickness[i]>0.01){
-					vslipx=slope_threshold/yts;
-					//maxv=10.0/2./dt;
-					//vslipx=-slope_threshold*rhod*gravity*debristhickness[i]*slopex[i]/yts;
-					vx[i]=vx[i]+vslipx;
-					//debristhickness[i]=debristhickness[i];
-					//if(vx[i]>maxv) vx[i]=maxv;
-					//}
-				} 
-				//}
-			}
-		}
-		//if(step%100==0)   
-		element->AddInput(VxDebrisEnum,vx,P1Enum);
-		//element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
-
-		/* Free resources */
-		xDelete<IssmDouble>(debristhickness);
-		xDelete<IssmDouble>(icethickness);
-		xDelete<IssmDouble>(vx);
-		xDelete<IssmDouble>(slopex);
-		xDelete<IssmDouble>(onsurface);
-	}
-
-	//if(step%7==0) 
 	//PreProcessing(femmodel);
 	//femmodel->parameters->SetParam(VxDebrisEnum,InputToExtrudeEnum);
 	//extrudefromtop_core(femmodel);
-
 	femmodel->SetCurrentConfiguration(DebrisAnalysisEnum);        
 	solutionsequence_linear(femmodel);
-
-	int step;
-	femmodel->parameters->FindParam(&step,StepEnum);
-	//if(step%7==0) PreProcessing(femmodel);
 	PostProcessing(femmodel);
 
@@ -230,5 +155,6 @@
 	IssmDouble Jdet,D_scalar,dt,h;
 	IssmDouble vel,vx,vy,dvxdx,dvydy;
-	IssmDouble xi,tau;
+	IssmDouble yts=31536000.;
+	IssmDouble tau;
 	IssmDouble dvx[2],dvy[2];
 	Element*    topelement = NULL;
@@ -271,5 +197,5 @@
 	Input* vy_input=NULL;
 	if(dim>1){vy_input = topelement->GetInput(VyDebrisEnum); _assert_(vy_input);}
-	h = topelement->CharacteristicLength();
+	h=topelement->CharacteristicLength();
 
 	/* Start  looping on the number of gaussian points: */
@@ -302,6 +228,5 @@
 				}
 			}
-		}
-		else{
+		}else{
 			dvxdx=dvx[0];
 			for(int i=0;i<numnodes;i++){
@@ -313,101 +238,51 @@
 		}
 
-		/*******************************************************************/
-		/* Diffusion */
-		bool isdisplacement=false;
-		int step;
-		topelement->FindParam(&step,StepEnum);
-		IssmDouble slope_threshold;
-		topelement->FindParam(&slope_threshold,DebrisRemovalSlopeThresholdEnum);
-		IssmDouble kappa,f,smb,debristhickness,slopex;
-		IssmDouble Diff,fraction,M=1,C;
-		IssmDouble rad2deg=180./M_PI;
-		Diff=3.2/3e7;
-		Input* slopex_input=topelement->GetInput(SurfaceSlopeXEnum); _assert_(slopex_input);
-		Input* smb_input=topelement->GetInput(SmbMassBalanceEnum); _assert_(smb_input);
-		Input* debristhickness_input=topelement->GetInput(DebrisThicknessEnum); _assert_(debristhickness_input);
-
-		if(isdisplacement){
-
-			slopex_input->GetInputValue(&slopex, gauss);
-			smb_input->GetInputValue(&smb, gauss);
-			debristhickness_input->GetInputValue(&debristhickness, gauss);
-			if((atan(fabs(slopex))*rad2deg)>30.){
-				f=1.;
-			}else{
-				f=0.;
-			}
-			//f=1;
-			//kappa=-5.6e16*smb*debristhickness*f;
-			//kappa=debristhickness/h*4e9*f;
-			//kappa=14.2809e8*f; // 25°
-			kappa=slope_threshold*1e8*f;
-			if(dim==2){
-				for(int i=0;i<numnodes;i++){
-					for(int j=0;j<numnodes;j++){
-						Ke->values[i*numnodes+j] +=  D_scalar*kappa*(
-								dbasis[0*numnodes+j]*dbasis[0*numnodes+i] + dbasis[1*numnodes+j]*dbasis[1*numnodes+i] + dbasis[2*numnodes+j]*dbasis[2*numnodes+i]
-								);
-					}
-				}
-			}else{
-				for(int i=0;i<numnodes;i++){
-					for(int j=0;j<numnodes;j++){
-						Ke->values[i*numnodes+j] += D_scalar*kappa*(dbasis[0*numnodes+j]*dbasis[0*numnodes+i]);
-					}
-				}
-			}                
-		}
-
-		/*******************************************************************/                
-
 		IssmDouble rho;
 		if(FINITEELEMENT==P1Enum){
-			rho=2;
+			rho=2.;
 		}else if(FINITEELEMENT==P2Enum){
 			rho=4.;
 		}
-		if(stabilization==2){
+
+		for(int i=0;i<(dim*dim);i++) D[i]=0.;
+		if(stabilization==1){
+			/*SSA*/
+			if(dim==1){
+				vx_input->GetInputValue(&vx,gauss);
+				D[0]=h/rho*fabs(vx);
+			}else{
+				vx_input->GetInputValue(&vx,gauss);
+				vy_input->GetInputValue(&vy,gauss);
+				vel=sqrt(vx*vx+vy*vy);
+				D[0*dim+0]=h/rho*fabs(vx);
+				D[1*dim+1]=h/rho*fabs(vy);
+			}
+		}else if(stabilization==2){  
 			/*Streamline upwinding*/
 			if(dim==1){
 				vx_input->GetInputValue(&vx,gauss);
-				vel=fabs(vx)+1.e-10;
+				vel=fabs(vx)+EPS;
 				D[0] = h/(rho*vel)*vx*vx;
-			}
-			else{
-				vx_input->GetInputAverage(&vx);
-				vy_input->GetInputAverage(&vy);
-				vel=sqrt(vx*vx+vy*vy)+1.e-10;
+			}else{
+				vx_input->GetInputValue(&vx,gauss);
+				vy_input->GetInputValue(&vy,gauss);
+				vel=sqrt(vx*vx+vy*vy)+EPS;
 				D[0*dim+0]=h/(rho*vel)*vx*vx;
 				D[1*dim+0]=h/(rho*vel)*vy*vx;
 				D[0*dim+1]=h/(rho*vel)*vx*vy;
 				D[1*dim+1]=h/(rho*vel)*vy*vy;
-			}
-		}
-		else if(stabilization==1){  
-			/*SSA*/
-			if(dim==1){
-				vx_input->GetInputAverage(&vx);
-				D[0]=h/rho*fabs(vx);
-			}
-			else{
-				vx_input->GetInputAverage(&vx);
-				vy_input->GetInputAverage(&vy);
-				D[0*dim+0]=h/rho*fabs(vx);
-				D[1*dim+1]=h/rho*fabs(vy);
-			}
-		}
-		else if(stabilization==3){  
+			}		
+		}else if(stabilization==3){  
 			/*SUPG*/
 			if(dim==1){
-				vx_input->GetInputAverage(&vx);
-				tau=h/(rho*fabs(vx)+1e-10);
-			}
-			else{
-				vx_input->GetInputAverage(&vx);
-				vy_input->GetInputAverage(&vy);
-				tau=1*h/(rho*sqrt(vx*vx+vy*vy)+1e-10);
-			}
-		}
+				vx_input->GetInputValue(&vx,gauss);
+				tau=h/(rho*max(fabs(vx),EPS));
+			}else{
+				vx_input->GetInputValue(&vx,gauss);
+				vy_input->GetInputValue(&vy,gauss);
+				tau=h/(rho*sqrt(vx*vx+vy*vy)+EPS);
+			}
+		}
+
 		if(stabilization==1 || stabilization==2){
 			for(int i=0;i<dim*dim;i++) D[i]=D_scalar*D[i];
@@ -417,15 +292,13 @@
 						Ke->values[i*numnodes+j] += (
 								dbasis[0*numnodes+i] *(D[0*dim+0]*dbasis[0*numnodes+j] + D[0*dim+1]*dbasis[1*numnodes+j]) +
-								dbasis[1*numnodes+i] *(D[1*dim+0]*dbasis[0*numnodes+j] + D[1*dim+1]*dbasis[1*numnodes+j]) 
-								);
+								dbasis[1*numnodes+i] *(D[1*dim+0]*dbasis[0*numnodes+j] + D[1*dim+1]*dbasis[1*numnodes+j]));
 					}   
 				}
-			}
-			else{
+			}else{
 				for(int i=0;i<numnodes;i++) for(int j=0;j<numnodes;j++) Ke->values[i*numnodes+j] += dbasis[0*numnodes+i]*D[0]*dbasis[0*numnodes+j];
 			}
 		}else if(stabilization==3){ 
-			/*Mass matrix - part 2*/
 			if(dim==1){
+				/*Mass matrix - part 2*/
 				for(int i=0;i<numnodes;i++){
 					for(int j=0;j<numnodes;j++){
@@ -454,4 +327,5 @@
 				}
 
+
 				/*Advection matrix - part 2, B*/
 				for(int i=0;i<numnodes;i++){
@@ -464,5 +338,44 @@
 				for(int i=0;i<numnodes;i++){
 					for(int j=0;j<numnodes;j++){
-						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(basis[j]*dvxdx+basis[j]*dvydy)*(basis[i]*dvxdx);
+						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(basis[j]*dvxdx)*(basis[i]*dvxdx);;
+					}
+				}
+			}else if(dim==2){
+				/*Mass matrix - part 2*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=gauss->weight*Jdet*tau*basis[j]*(vx*dbasis[0*numnodes+i]+vy*dbasis[1*numnodes+i]);
+					}
+				}
+				/*Mass matrix - part 3*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=gauss->weight*Jdet*tau*basis[j]*(basis[i]*dvxdx+basis[i]*dvydy);
+					}
+				}
+
+				/*Advection matrix - part 2, A*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(vx*dbasis[0*numnodes+j]+vy*dbasis[1*numnodes+j])*(vx*dbasis[0*numnodes+i]+vy*dbasis[1*numnodes+i]);
+					}
+				}
+				/*Advection matrix - part 3, A*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(vx*dbasis[0*numnodes+j]+vy*dbasis[1*numnodes+j])*(basis[i]*dvxdx+basis[i]*dvydy);
+					}
+				}
+
+				/*Advection matrix - part 2, B*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(basis[j]*dvxdx+basis[j]*dvydy)*(vx*dbasis[0*numnodes+i]+vy*dbasis[1*numnodes+i]);
+					}
+				}
+				/*Advection matrix - part 3, B*/
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j]+=dt*gauss->weight*Jdet*tau*(basis[j]*dvxdx+basis[j]*dvydy)*(basis[i]*dvxdx+basis[i]*dvydy);
 					}
 				}
@@ -487,6 +400,7 @@
 	int	stabilization,dim,domaintype;
 	IssmDouble  Jdet,dt;
-	IssmDouble  smb,thickness;
-	IssmDouble  vx,vy,vel,dvxdx,dvydy,xi,h,tau,pf;
+	IssmDouble  smb,thickness,psi;
+	IssmDouble  vx,vy,vel,dvxdx,dvydy,h,tau,pf;
+	IssmDouble yts=31536000.;
 	IssmDouble  dvx[2],dvy[2];
 	IssmDouble* xyz_list = NULL;
@@ -533,4 +447,5 @@
 		vy_input=topelement->GetInput(VyDebrisEnum); _assert_(vy_input);
 	}
+	h=topelement->CharacteristicLength();
 
 	IssmDouble rho;
@@ -550,35 +465,46 @@
 		smb_input->GetInputValue(&smb,gauss);
 		thickness_input->GetInputValue(&thickness,gauss);
-
 		if(smb>0.){
-			for(int i=0;i<numnodes;i++) pe->values[i]+=Jdet*gauss->weight*(thickness-0.*dt*smb*pf)*basis[i];
-		} else {
-			for(int i=0;i<numnodes;i++) pe->values[i]+=Jdet*gauss->weight*(thickness-dt*smb*pf)*basis[i]; // take the negative of melt, because it is a debris production term here
-		}
+			psi=thickness-0.*dt*smb*pf;
+		}else{
+			psi=thickness-dt*smb*pf;
+		}
+
+		for(int i=0;i<numnodes;i++) pe->values[i]+=Jdet*gauss->weight*psi*basis[i]; 
 
 		if(stabilization==3){
 			/*SUPG*/
 			topelement->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
+			vx_input->GetInputDerivativeValue(&dvx[0],xyz_list,gauss);
+			dvxdx=dvx[0];
 			if(dim==1){
-
 				vx_input->GetInputValue(&vx,gauss);
-				vx_input->GetInputDerivativeValue(&dvx[0],xyz_list,gauss);
-				dvxdx=dvx[0];
-				tau=h/(rho*fabs(vx)+1e-10);
-				IssmDouble psi;
-				if(smb>0.){
-					psi=thickness;
-				} else {
-					psi=thickness-dt*smb*pf;
-				}
+				tau=h/(rho*max(fabs(vx),EPS));
+
 				/*Force vector - part 2*/
 				for(int i=0;i<numnodes;i++){
-					pe->values[i]+=Jdet*gauss->weight*psi*tau*(vx*dbasis[0*numnodes+i]);
+					pe->values[i]+=Jdet*gauss->weight*psi*tau*vx*dbasis[0*numnodes+i];
 				}
 				/*Force vector - part 3*/
 				for(int i=0;i<numnodes;i++){
-					pe->values[i]+=Jdet*gauss->weight*psi*tau*(basis[i]*dvxdx);
-				}
-
+					pe->values[i]+=Jdet*gauss->weight*psi*tau*basis[i]*dvxdx;
+				}
+
+			}else if(dim==2){
+				vx_input->GetInputValue(&vx,gauss);
+				vy_input->GetInputValue(&vy,gauss);
+				vy_input->GetInputDerivativeValue(&dvy[0],xyz_list,gauss);
+				vel=sqrt(vx*vx+vy*vy);
+				dvydy=dvy[1];
+				tau=h/(rho*vel+EPS);
+
+				/*Force vector - part 2*/
+				for(int i=0;i<numnodes;i++){
+					pe->values[i]+=Jdet*gauss->weight*psi*tau*(vx*dbasis[0*numnodes+i]+vy*dbasis[1*numnodes+i]);
+				}
+				/*Force vector - part 3*/
+				for(int i=0;i<numnodes;i++){
+					pe->values[i]+=Jdet*gauss->weight*psi*tau*(basis[i]*dvxdx+basis[i]*dvydy);
+				}
 			}
 		}
@@ -600,22 +526,12 @@
 }/*}}}*/
 void           DebrisAnalysis::InputUpdateFromSolution(IssmDouble* solution,Element* element){/*{{{*/
-	//element->InputUpdateFromSolutionOneDof(solution,DebrisThicknessEnum);
-	int*         ddoflist=NULL;
+
+	int *ddoflist=NULL;
 
 	int numnodes = element->GetNumberOfNodes();
-	IssmDouble* thickness     = xNew<IssmDouble>(numnodes);
-	IssmDouble* thicknessold  = xNew<IssmDouble>(numnodes);
 	IssmDouble* newthickness  = xNew<IssmDouble>(numnodes);
-	IssmDouble* icethickness  = xNew<IssmDouble>(numnodes);
-	IssmDouble* bedslopex     = xNew<IssmDouble>(numnodes);
-	IssmDouble* surfaceslopex = xNew<IssmDouble>(numnodes);
 
 	/*Use the dof list to index into the solution vector: */
 	IssmDouble minthickness = element->FindParam(DebrisMinThicknessEnum);
-	IssmDouble iceminthickness = element->FindParam(MasstransportMinThicknessEnum);
-	element->GetInputListOnVertices(&thickness[0],DebrisThicknessEnum);   
-	element->GetInputListOnVertices(&icethickness[0],ThicknessEnum);
-	element->GetInputListOnVertices(&bedslopex[0],BedSlopeXEnum);
-	element->GetInputListOnVertices(&surfaceslopex[0],SurfaceSlopeXEnum);
 	element->GetDofListLocal(&ddoflist,NoneApproximationEnum,GsetEnum);
 
@@ -625,26 +541,50 @@
 		if(xIsInf<IssmDouble>(newthickness[i])) _error_("Inf found in solution vector");
 
-		/* check for thickness<minthickness */
-		if(thickness[i]<minthickness) newthickness[i]=minthickness;
-
-		/* Carlos model sets all values below Hmin to zero */
-		if(icethickness[i]<=(iceminthickness+0.0001) & fabs((fabs(surfaceslopex[i])-fabs(bedslopex[i])))<1e-3 ) newthickness[i]=0;
-		//if(icethickness[i]<=(iceminthickness+0.01)) newthickness[i]=0;
-	}
-
-	/* update inputs */
+		// check for thickness<minthickness
+		if(newthickness[i]<minthickness) newthickness[i]=minthickness;
+	}
+
+	// update inputs
 	element->AddInput(DebrisThicknessEnum,newthickness,P1Enum);
 
-	/* Free resources */
+	// Free resources
 	xDelete<IssmDouble>(newthickness);
-	xDelete<IssmDouble>(thickness);
-	xDelete<IssmDouble>(icethickness);
-	xDelete<IssmDouble>(bedslopex);
-	xDelete<IssmDouble>(surfaceslopex);
 	xDelete<int>(ddoflist);
+	//*/
 }/*}}}*/
 void           DebrisAnalysis::UpdateConstraints(FemModel* femmodel){/*{{{*/
-	//        return;
-	SetActiveNodesLSMx(femmodel);
+	//SetActiveNodesLSMx(femmodel);
+
+	// Update active elements based on ice levelset and ocean levelset*/
+	GetMaskOfIceVerticesLSMx(femmodel,false,true); //FIXME?
+	SetActiveNodesLSMx(femmodel,false,true); //FIXME?
+
+	/*Constrain all nodes that are grounded and unconstrain the ones that float*/
+	for(Object* & object : femmodel->elements->objects){
+		Element    *element  = xDynamicCast<Element*>(object);
+		int         numnodes  = element->GetNumberOfNodes();
+		IssmDouble *mask      = xNew<IssmDouble>(numnodes);
+		IssmDouble *ls_active = xNew<IssmDouble>(numnodes);
+
+		element->GetInputListOnNodes(&mask[0],MaskOceanLevelsetEnum);
+		element->GetInputListOnNodes(&ls_active[0],DebrisMaskNodeActivationEnum);
+
+		for(int in=0;in<numnodes;in++){
+			Node* node=element->GetNode(in);
+			if(mask[in]>0. && ls_active[in]==1.){
+				// Do nothing
+				node->Activate(); //Not sure if we need this!
+			}
+			else{
+				IssmDouble phi=0;
+				node->Deactivate();// Not sure if we need this
+				node->ApplyConstraint(0,phi);
+			}
+		}
+		xDelete<IssmDouble>(mask);
+		xDelete<IssmDouble>(ls_active);
+	}
+	//*/
+	return;	
 }/*}}}*/
 void           DebrisAnalysis::PostProcessing(FemModel* femmodel){/*{{{*/
@@ -676,7 +616,10 @@
 			IssmDouble* slopey	   = xNew<IssmDouble>(numnodes); 
 			IssmDouble* onsurface	   = xNew<IssmDouble>(numnodes); 
+			IssmDouble* ls_active      = xNew<IssmDouble>(numnodes); 
 			element->GetInputListOnNodes(debristhickness,DebrisThicknessEnum);
 			element->GetInputListOnNodes(icethickness,ThicknessEnum);
 			element->GetInputListOnNodes(onsurface,MeshVertexonsurfaceEnum);
+			element->GetInputListOnNodes(ls_active,DebrisMaskNodeActivationEnum);
+
 			dim=1;
 			element->GetInputListOnNodes(slopex,SurfaceSlopeXEnum);
@@ -688,4 +631,5 @@
 			bool isminthicknessinelement=false;
 			bool remove_debris=false;
+			bool isactive=false;
 
 			IssmDouble iceminthickness=element->FindParam(MasstransportMinThicknessEnum);                        
@@ -693,72 +637,71 @@
 			switch(removalmodel){
 				case 1:{
-					IssmDouble slope_threshold=element->FindParam(DebrisRemovalSlopeThresholdEnum);
-
-					for(k=0; k<numnodes;k++){
-						if(icethickness[k]<=(iceminthickness+0.01)) isminthicknessinelement=true;
-					}
-					isminthicknessinelement=true;
-					if(isminthicknessinelement){
-						for(k=0; k<numnodes;k++){
-							if(onsurface[k]>0.5){
-								slope=fabs(slopex[k]);
-								if(dim==2) slope=pow(pow(slopex[k],2)+pow(slopey[k],2),0.5);
-								if((atan(slope)*rad2deg)>slope_threshold) debristhickness[k]=remove_debris=true;
-							}
-						}
-						if(remove_debris){
-							for(k=0; k<numnodes;k++){
-								if(icethickness[k]<=(iceminthickness+0.01)) debristhickness[k]=0.;
-							}
-						}
-					}
-					//int finite_element = element->GetElementType(); 
-					//element->AddInput(DebrisThicknessEnum,debristhickness,FINITEELEMENT);
-					element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
-
-					xDelete<IssmDouble>(debristhickness);
-					xDelete<IssmDouble>(icethickness);
-					xDelete<IssmDouble>(slopex);
-					xDelete<IssmDouble>(slopey);
-					break;
-						 }
+					       IssmDouble slope_threshold=element->FindParam(DebrisRemovalSlopeThresholdEnum);
+					       int kk=0;
+					       for(k=0; k<numnodes;k++){
+						       if(icethickness[k]<=(iceminthickness+0.00001)) isminthicknessinelement=true;
+						       if(icethickness[k]<=(iceminthickness+0.00001)) kk++;
+					       }
+					       isminthicknessinelement=true;
+					       if(kk<numnodes && isminthicknessinelement){
+						       for(k=0; k<numnodes;k++){
+							       slope=fabs(slopex[k]);
+							       if(dim==2) slope=pow(pow(slopex[k],2)+pow(slopey[k],2),0.5);
+							       //slope_mean=slope_mean+slope;
+							       if((atan(slope)*rad2deg)>slope_threshold) remove_debris=true;
+							       //if((atan(slope)*rad2deg)>slope_threshold) debristhickness[k]=0.;
+						       }
+						       //if((atan(slope_mean)*rad2deg)>slope_threshold) remove_debris=true;
+						       if(remove_debris){
+							       for(k=0; k<numnodes;k++){
+								       debristhickness[k]=0.;
+							       }
+						       }
+					       }
+					       element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
+
+					       xDelete<IssmDouble>(debristhickness);
+					       xDelete<IssmDouble>(icethickness);
+					       xDelete<IssmDouble>(slopex);
+					       xDelete<IssmDouble>(slopey);
+					       break;
+				       }
 				case 2:{
-					IssmDouble stress_threshold = element->FindParam(DebrisRemovalStressThresholdEnum);
-					IssmDouble gravity = element->FindParam(ConstantsGEnum);
-					IssmDouble stress,rhod=1900.;
-
-					for(k=0; k<numnodes;k++){
-						if(icethickness[k]<=(iceminthickness+0.01)) isminthicknessinelement=true;
-					}
-					isminthicknessinelement=true;
-					if(isminthicknessinelement){
-						//stress=0;
-						int kk=0;
-						for(k=0; k<numnodes;k++){
-							if(onsurface[k]>0.5){
-								slope=fabs(slopex[k]);
-								if(dim==2) slope=pow(pow(slopex[k],2)+pow(slopey[k],2),0.5);
-								stress=rhod*gravity*debristhickness[k]*slope;//pow(slope*slope/(slope*slope+1),0.5);//sin(slope/rad2deg);
-								if(stress>stress_threshold) debristhickness[k]=0.;
-								//kk++;
-							}
-						}
-						/*if((stress/double(kk))>stress_threshold) remove_debris=true;
-						  if(remove_debris){
-						  for(k=0; k<numnodes;k++){
-						  debristhickness[k]=0.;
-						  }
-						  }*/
-					}
-					//int finite_element = element->GetElementType(); 
-					//element->AddInput(DebrisThicknessEnum,debristhickness,FINITEELEMENT);
-					element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
-
-					xDelete<IssmDouble>(debristhickness);
-					xDelete<IssmDouble>(icethickness);
-					xDelete<IssmDouble>(slopex);
-					xDelete<IssmDouble>(slopey);
-					break;
-						 }
+					       IssmDouble stress_threshold = element->FindParam(DebrisRemovalStressThresholdEnum);
+					       IssmDouble gravity = element->FindParam(ConstantsGEnum);
+					       IssmDouble stress,rhod=1900.;
+					       int kk=0;
+					       for(k=0; k<numnodes;k++){
+						       if(icethickness[k]<=(iceminthickness+0.00001)) isminthicknessinelement=true;
+						       if(icethickness[k]<=(iceminthickness+0.00001)) kk++;
+					       }
+					       isminthicknessinelement=true;
+					       if(kk<numnodes && isminthicknessinelement){
+						       //stress=0;
+						       IssmDouble stress_sum=0.;
+						       for(k=0; k<numnodes;k++){
+							       slope=fabs(slopex[k]);
+							       if(dim==2) slope=pow(pow(slopex[k],2)+pow(slopey[k],2),0.5);
+							       stress=rhod*gravity*debristhickness[k]*slope;//pow(slope*slope/(slope*slope+1),0.5);//sin(slope/rad2deg);
+							       //stress_sum=stress_sum+stress;
+							       if(stress>stress_threshold) remove_debris=true;
+							       //if(stress>stress_threshold) debristhickness[k]=0.;
+						       }
+						       //if((stress_sum/double(kk))>stress_threshold) remove_debris=true;
+						       if(remove_debris){
+							       for(k=0; k<numnodes;k++){
+								       debristhickness[k]=0.;
+							       }
+						       }
+					       }
+					       element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
+
+					       xDelete<IssmDouble>(debristhickness);
+					       xDelete<IssmDouble>(icethickness);
+					       xDelete<IssmDouble>(slopex);
+					       xDelete<IssmDouble>(slopey);
+					       xDelete<IssmDouble>(ls_active);
+					       break;
+				       }
 				default: _error_("removalmodel "<<EnumToStringx(removalmodel)<<" not implemented yet");
 			}
@@ -767,138 +710,74 @@
 
 }/*}}}*/
-void           DebrisAnalysis::PreProcessing(FemModel* femmodel){/*{{{*/
+void DebrisAnalysis::PreProcessing(FemModel* femmodel){/*{{{*/
 
 	if(VerboseSolution()) _printf0_("   Debris preprocessing\n");
 
-	/*Intermediaries*/
-	bool isdebrisdisplacement=true;
-	int displacementmodel=1;
-	int k,numnodes;
-	int domaintype,dim;
-	femmodel->parameters->FindParam(&domaintype,DomainTypeEnum);
-	//femmodel->parameters->FindParam(&displacementmodel,DebrisDisplacementmodelEnum);
 	Element* element= NULL;
-	IssmDouble *xyz_list = NULL;
-	//IssmDouble top_normal[2];
-
-	if(isdebrisdisplacement){
-
-		//if(displacementmodel==0){
-		// no displacement, do nothing
-		//}else{
-		// deterministic or random displacement
-
-		for(Object* & object : femmodel->elements->objects){
-			element=xDynamicCast<Element*>(object);
-
-			numnodes=element->GetNumberOfNodes();
-			IssmDouble* debristhickness= xNew<IssmDouble>(numnodes);
-			IssmDouble* icethickness= xNew<IssmDouble>(numnodes);
-			IssmDouble* slopex	   = xNew<IssmDouble>(numnodes);
-			IssmDouble* slopey	   = xNew<IssmDouble>(numnodes); 
-			IssmDouble* vx 		   = xNew<IssmDouble>(numnodes);
-			IssmDouble* vy 		   = xNew<IssmDouble>(numnodes);
-			IssmDouble* surface        = xNew<IssmDouble>(numnodes);
-			IssmDouble* onsurface  	   = xNew<IssmDouble>(numnodes);
-			element->GetInputListOnNodes(vx,VxDebrisEnum);
-			element->GetInputListOnNodes(debristhickness,DebrisThicknessEnum);
-			element->GetInputListOnNodes(icethickness,ThicknessEnum);
-			element->GetInputListOnNodes(surface,SurfaceEnum);
-			element->GetInputListOnNodes(onsurface,MeshVertexonsurfaceEnum);
-			element->GetVerticesCoordinates(&xyz_list);
-
-			dim=1;
-			element->GetInputListOnNodes(slopex,SurfaceSlopeXEnum);
-			if(domaintype!=Domain2DverticalEnum){
-				element->GetInputListOnNodes(slopey,SurfaceSlopeYEnum);
-				element->GetInputListOnNodes(vy,VyDebrisEnum);
-				dim=2;
-			}
-			IssmDouble slope,rad2deg=180./M_PI; //=57.2958
-			IssmDouble h=10.,f;
-			IssmDouble debrissum;
-			IssmDouble dt = element->FindParam(TimesteppingTimeStepEnum);
-			bool displacedebris;
-
-			switch(displacementmodel){
-				case 1:{
-					IssmDouble slope_threshold = element->FindParam(DebrisRemovalSlopeThresholdEnum);
-					IssmDouble iceminthickness=element->FindParam(MasstransportMinThicknessEnum);
-
-					bool isminthicknessinelement=false;
-					for(k=0; k<numnodes;k++){
-						if(icethickness[k]<=(iceminthickness+0.01)) isminthicknessinelement=true;
-					}
-					if(isminthicknessinelement){
-						//do nothing
-					}else{
-						debrissum=0.;
-						int test;
-						test=0;
-						for(k=0; k<numnodes;k++){
-							if(onsurface[k]>.5){
-								slope=pow(slopex[k]*slopex[k],0.5);
-								if(dim==2) slope=pow(pow(slopex[k],2)+pow(slopey[k],2),0.5);
-								if((atan(slope)*rad2deg)>30.){
-									f=0.5;
-									test=test+0;
-									debrissum=debrissum+debristhickness[k]*f;
-									//displacedebris=true;
-									//if(debristhickness[k]>1.e-6) vx[k]=vx[k]+10./31536000.;//*vx[k]/pow(pow(vx[k],2),0.5);
-									//debristhickness[k]=debristhickness[k]*(1.-f);
-								}
-							}
-						}
-						if(test>1){
-							test=test;
-						}else{
-							test=1;
-						}
-						//if(displacedebris){
-						int index=-1;
-						IssmDouble min=1e14;
-						for(k=0; k<numnodes;k++){
-							if(onsurface[k]>.5){
-								if(surface[k]<min){
-									index=k;
-									min=surface[k];
-								} 
-							}
-						}
-						for(k=0; k<numnodes;k++){
-							if(onsurface[k]>.5){
-								if(k==index){
-									debristhickness[k]=debristhickness[k]+debrissum;
-								}else{
-									debristhickness[k]=debristhickness[k]-debrissum;
-									if(debristhickness[k]<=0) debristhickness[k]=0;
-								}
-								//if(debristhickness[k]>10.) debristhickness[k]=10.;
-							}
-						}
-						//}
-					}
-
-					int finite_element = element->GetElementType(); 
-					//element->AddInput(DebrisThicknessEnum,debristhickness,finite_element);
-					//element->AddInput(VxDebrisEnum,vx,P1Enum);
-					element->AddInput(DebrisThicknessEnum,debristhickness,P1Enum);
-
-					xDelete<IssmDouble>(debristhickness);
-					xDelete<IssmDouble>(icethickness);
-					xDelete<IssmDouble>(vx);
-					xDelete<IssmDouble>(vy); 
-					xDelete<IssmDouble>(slopex);
-					xDelete<IssmDouble>(slopey);
-					xDelete<IssmDouble>(surface);
-					break;
-						 }
-				case 2:
-					// Do nothing
-
-				default: _error_("Debris displacement model "<<EnumToStringx(displacementmodel)<<" not implemented yet");
-			}
-		}
-	}
-	//}
-}/*}}}*/        
+	for(Object* & object : femmodel->elements->objects){
+		element=xDynamicCast<Element*>(object);
+
+		int numvertices = element->GetNumberOfVertices();
+
+		IssmDouble* vx = xNew<IssmDouble>(numvertices);
+		IssmDouble* debristhickness = xNew<IssmDouble>(numvertices);
+		IssmDouble* slopex         = xNew<IssmDouble>(numvertices);
+		IssmDouble* onsurface      = xNew<IssmDouble>(numvertices);
+		IssmDouble* icethickness      = xNew<IssmDouble>(numvertices);
+
+		element->GetInputListOnVertices(&debristhickness[0],DebrisThicknessEnum);
+		element->GetInputListOnVertices(&vx[0],VxDebrisEnum);
+		element->GetInputListOnVertices(&slopex[0],SurfaceSlopeXEnum);
+		element->GetInputListOnVertices(&onsurface[0],MeshVertexonsurfaceEnum);
+		element->GetInputListOnVertices(&icethickness[0],ThicknessEnum);
+
+		IssmDouble slope,rad2deg=180./M_PI; //=57.2958
+		IssmDouble vslipx,rhod=1900.;
+		IssmDouble gravity=element->FindParam(ConstantsGEnum);
+		IssmDouble slope_threshold=element->FindParam(DebrisRemovalSlopeThresholdEnum);
+		IssmDouble iceminthickness=element->FindParam(MasstransportMinThicknessEnum);
+
+		int step;
+		IssmDouble dt, maxv;
+		IssmDouble yts=31536000.;
+		femmodel->parameters->FindParam(&step,StepEnum);
+		femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
+
+		bool isminthicknessinelement=false;
+		for(int i=0;i<numvertices;i++){
+			if(icethickness[i]<=(iceminthickness+0.01)) isminthicknessinelement=true;
+		}
+		if(isminthicknessinelement){
+			//do nothing
+			for(int i=0;i<numvertices;i++){
+				if(icethickness[i]<=(iceminthickness+0.01)) vx[i]=0.;
+			}
+		}else{
+			for(int i=0;i<numvertices;i++){
+				//if(onsurface[i]>.5){
+				slope=fabs(slopex[i]);
+				//if((atan(slope)*rad2deg)>25.){
+				//if(debristhickness[i]>0.01){
+				vslipx=1.0/yts;
+				//maxv=10.0/2./dt;
+				//vslipx=-slope_threshold*rhod*gravity*debristhickness[i]*slopex[i]/yts;
+				vx[i]=vx[i]+vslipx;
+				//debristhickness[i]=debristhickness[i];
+				//if(vx[i]>maxv) vx[i]=maxv;
+				//}
+				//} 
+				//}
+			}
+		}
+		//if(step%100==0)   
+		element->AddInput(VxDebrisEnum,vx,P1Enum);
+
+		/* Free resources */
+		xDelete<IssmDouble>(debristhickness);
+		xDelete<IssmDouble>(icethickness);
+		xDelete<IssmDouble>(vx);
+		xDelete<IssmDouble>(slopex);
+		xDelete<IssmDouble>(onsurface);
+	}
+
+}/*}}}*/
Index: /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp	(revision 28013)
@@ -102,6 +102,5 @@
 
 	bool dakota_analysis,ismovingfront,isenthalpy;
-	int frictionlaw,basalforcing_model,materialstype;
-	int FrictionCoupling;
+	int  basalforcing_model,materialstype;
 
 	/*Now, is the model 3d? otherwise, do nothing: */
@@ -131,5 +130,4 @@
 	iomodel->FindConstant(&dakota_analysis,"md.qmu.isdakota");
 	iomodel->FindConstant(&ismovingfront,"md.transient.ismovingfront");
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 	iomodel->FindConstant(&materialstype,"md.materials.type");
 
@@ -191,73 +189,6 @@
 	}
 
-	/*Friction law variables*/
-	switch(frictionlaw){
-		case 1:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 2:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			break;
-		case 3:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 4:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			break;
-		case 5:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
-			break;
-		case 6:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			break;
-		case 7:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 9:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionQEnum);
-			break;
-		default:
-			_error_("friction law not supported");
-	}
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
 
 	/*Free data: */
@@ -277,5 +208,4 @@
 	parameters->AddObject(iomodel->CopyConstantObject("md.thermal.isdrainicecolumn",ThermalIsdrainicecolumnEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.thermal.watercolumn_upperlimit",ThermalWatercolumnUpperlimitEnum));
-	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
 
 	iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.thermal.requested_outputs");
@@ -284,24 +214,6 @@
 	iomodel->DeleteData(&requestedoutputs,numoutputs,"md.thermal.requested_outputs");
 
-	/*Deal with friction parameters*/
-	int frictionlaw;
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==6){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-	}
-	if(frictionlaw==4){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==9){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
-	}
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 }/*}}}*/
 
Index: /issm/trunk/src/c/analyses/EnumToAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/EnumToAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/EnumToAnalysis.cpp	(revision 28013)
@@ -74,4 +74,7 @@
 		case GLheightadvectionAnalysisEnum : return new GLheightadvectionAnalysis();
 		#endif
+		#ifdef _HAVE_HYDROLOGYARMAPW_
+		case HydrologyArmapwAnalysisEnum : return new HydrologyArmapwAnalysis();
+		#endif
 		#ifdef _HAVE_HYDROLOGYDCEFFICIENT_
 		case HydrologyDCEfficientAnalysisEnum : return new HydrologyDCEfficientAnalysis();
Index: /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.cpp	(revision 28013)
+++ /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.cpp	(revision 28013)
@@ -0,0 +1,170 @@
+#include "./HydrologyArmapwAnalysis.h"
+#include "../toolkits/toolkits.h"
+#include "../classes/classes.h"
+#include "../shared/shared.h"
+#include "../modules/modules.h"
+
+/*Model processing*/
+void HydrologyArmapwAnalysis::CreateConstraints(Constraints* constraints,IoModel* iomodel){/*{{{*/
+
+	return;
+
+}/*}}}*/
+void HydrologyArmapwAnalysis::CreateLoads(Loads* loads, IoModel* iomodel){/*{{{*/
+	/*No loads*/
+}/*}}}*/
+void HydrologyArmapwAnalysis::CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr){/*{{{*/
+
+	return;
+
+}/*}}}*/
+int  HydrologyArmapwAnalysis::DofsPerNode(int** doflist,int domaintype,int approximation){/*{{{*/
+	return 0;
+}/*}}}*/
+void HydrologyArmapwAnalysis::UpdateElements(Elements* elements,Inputs* inputs,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+
+	/*Fetch data needed: */
+   int    hydrology_model,frictionlaw;
+   iomodel->FindConstant(&hydrology_model,"md.hydrology.model");
+
+   /*Now, do we really want armapw?*/
+   if(hydrology_model!=HydrologyarmapwEnum) return;
+
+   /*Add input to elements*/
+   iomodel->FetchDataToInput(inputs,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+   iomodel->FetchDataToInput(inputs,elements,"md.mask.ocean_levelset",MaskOceanLevelsetEnum);
+   iomodel->FetchDataToInput(inputs,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.basin_id",HydrologyBasinsIdEnum);
+   iomodel->FetchDataToInput(inputs,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
+
+}/*}}}*/
+void HydrologyArmapwAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
+
+	/*retrieve some parameters: */
+	int    hydrology_model;
+	int    numoutputs;
+	char** requestedoutputs = NULL;
+	iomodel->FindConstant(&hydrology_model,"md.hydrology.model");
+
+	/*Now, do we really want Armapw?*/
+	if(hydrology_model!=HydrologyarmapwEnum) return;
+
+	parameters->AddObject(new IntParam(HydrologyModelEnum,hydrology_model));
+  /*Requested outputs*/
+  iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.hydrology.requested_outputs");
+  parameters->AddObject(new IntParam(HydrologyNumRequestedOutputsEnum,numoutputs));
+  if(numoutputs)parameters->AddObject(new StringArrayParam(HydrologyRequestedOutputsEnum,requestedoutputs,numoutputs));
+  iomodel->DeleteData(&requestedoutputs,numoutputs,"md.hydrology.requested_outputs");
+
+}/*}}}*/
+
+/*Finite Element Analysis*/
+void           HydrologyArmapwAnalysis::Core(FemModel* femmodel){/*{{{*/
+	_error_("not implemented");
+}/*}}}*/
+void           HydrologyArmapwAnalysis::PreCore(FemModel* femmodel){/*{{{*/
+	_error_("not implemented");
+}/*}}}*/
+ElementVector* HydrologyArmapwAnalysis::CreateDVector(Element* element){/*{{{*/
+	/*Default, return NULL*/
+	return NULL;
+}/*}}}*/
+ElementMatrix* HydrologyArmapwAnalysis::CreateJacobianMatrix(Element* element){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+ElementMatrix* HydrologyArmapwAnalysis::CreateKMatrix(Element* element){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+ElementVector* HydrologyArmapwAnalysis::CreatePVector(Element* element){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+void           HydrologyArmapwAnalysis::GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+void           HydrologyArmapwAnalysis::GradientJ(Vector<IssmDouble>* gradient,Element*  element,int control_type,int control_interp,int control_index){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+void           HydrologyArmapwAnalysis::InputUpdateFromSolution(IssmDouble* solution,Element* element){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+void           HydrologyArmapwAnalysis::UpdateConstraints(FemModel* femmodel){/*{{{*/
+	_error_("Not implemented");
+}/*}}}*/
+
+/*Additional methods*/
+void HydrologyArmapwAnalysis::UpdateSubglacialWaterPressure(FemModel* femmodel){/*{{{*/
+
+	/*Get time parameters*/
+   IssmDouble time,dt,starttime,tstep_arma;
+   femmodel->parameters->FindParam(&time,TimeEnum);
+   femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
+   femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
+   femmodel->parameters->FindParam(&tstep_arma,HydrologyarmaTimestepEnum);
+
+	/*Determine if this is a time step for the ARMA model*/
+   bool isstepforarma = false;
+
+   #ifndef _HAVE_AD_
+   if((fmod(time,tstep_arma)<fmod((time-dt),tstep_arma)) || (time<=starttime+dt) || tstep_arma==dt) isstepforarma = true;
+   #else
+   _error_("not implemented yet");
+   #endif
+
+   /*Load parameters*/
+   bool isstochastic;
+   bool ispwstochastic = false;
+   int M,N,arorder,maorder,numbasins,numparams,numbreaks,my_rank;
+   femmodel->parameters->FindParam(&numbasins,HydrologyNumBasinsEnum);
+   femmodel->parameters->FindParam(&numparams,HydrologyarmaNumParamsEnum);
+   femmodel->parameters->FindParam(&numbreaks,HydrologyarmaNumBreaksEnum);
+   femmodel->parameters->FindParam(&arorder,HydrologyarmaarOrderEnum);
+   femmodel->parameters->FindParam(&maorder,HydrologyarmamaOrderEnum);
+   IssmDouble* datebreaks        = NULL;
+   IssmDouble* arlagcoefs        = NULL;
+   IssmDouble* malagcoefs        = NULL;
+	IssmDouble* monthlyfactors    = NULL;
+   IssmDouble* polyparams        = NULL;	
+
+	femmodel->parameters->FindParam(&datebreaks,&M,&N,HydrologyarmadatebreaksEnum);            _assert_(M==numbasins); _assert_(N==max(numbreaks,1));
+   femmodel->parameters->FindParam(&polyparams,&M,&N,HydrologyarmapolyparamsEnum);            _assert_(M==numbasins); _assert_(N==(numbreaks+1)*numparams);
+   femmodel->parameters->FindParam(&arlagcoefs,&M,&N,HydrologyarmaarlagcoefsEnum);            _assert_(M==numbasins); _assert_(N==arorder);
+   femmodel->parameters->FindParam(&malagcoefs,&M,&N,HydrologyarmamalagcoefsEnum);            _assert_(M==numbasins); _assert_(N==maorder);
+	femmodel->parameters->FindParam(&monthlyfactors,&M,&N,HydrologyarmaMonthlyFactorsEnum);    _assert_(M==numbasins); _assert_(N==12);
+
+	femmodel->parameters->FindParam(&isstochastic,StochasticForcingIsStochasticForcingEnum);
+   if(isstochastic){
+      int  numstochasticfields;
+      int* stochasticfields;
+      femmodel->parameters->FindParam(&numstochasticfields,StochasticForcingNumFieldsEnum);
+      femmodel->parameters->FindParam(&stochasticfields,&N,StochasticForcingFieldsEnum); _assert_(N==numstochasticfields);
+      for(int i=0;i<numstochasticfields;i++){
+         if(stochasticfields[i]==FrictionWaterPressureEnum) ispwstochastic = true;
+      }
+      xDelete<int>(stochasticfields);
+   }
+
+	/*Check if seasonality is imposed*/
+	bool isseasonality = false;
+	for(int i=0;i<numbasins*12;i++){
+		if(monthlyfactors[i]!=1) isseasonality = true;
+	}
+
+	/*Loop over each element to compute Subglacial Water Pressure at vertices*/
+   for(Object* &object:femmodel->elements->objects){
+      Element* element = xDynamicCast<Element*>(object);
+      /*Compute ARMA perturbation values*/
+      element->ArmaProcess(isstepforarma,arorder,maorder,numparams,numbreaks,tstep_arma,polyparams,arlagcoefs,malagcoefs,datebreaks,ispwstochastic,HydrologyarmapwEnum);
+      /*Compute subglacial water pressure with the ARMA perturbation*/
+		element->SubglacialWaterPressure(FrictionWaterPressureEnum);
+		/*Scale with monthly factors*/
+		if(isseasonality) element->MonthlyFactorBasin(monthlyfactors,HydrologyarmapwEnum);
+   }
+
+	/*Cleanup*/
+   xDelete<IssmDouble>(arlagcoefs);
+   xDelete<IssmDouble>(malagcoefs);
+   xDelete<IssmDouble>(polyparams);
+   xDelete<IssmDouble>(datebreaks);
+   xDelete<IssmDouble>(monthlyfactors);
+}/*}}}*/
+
Index: /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.h	(revision 28013)
+++ /issm/trunk/src/c/analyses/HydrologyArmapwAnalysis.h	(revision 28013)
@@ -0,0 +1,37 @@
+/*! \file HydrologyArmapwAnalysis.h 
+ *  \brief: header file for generic external result object
+ */
+
+#ifndef _HydrologyArmapwAnalysis_
+#define _HydrologyArmapwAnalysis_
+
+/*Headers*/
+#include "./Analysis.h"
+
+class HydrologyArmapwAnalysis: public Analysis{
+
+	public:
+		/*Model processing*/
+		void CreateConstraints(Constraints* constraints,IoModel* iomodel);
+		void CreateLoads(Loads* loads, IoModel* iomodel);
+		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
+		int  DofsPerNode(int** doflist,int domaintype,int approximation);
+		void UpdateElements(Elements* elements,Inputs* inputs,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
+
+		/*Finite element Analysis*/
+		void           Core(FemModel* femmodel);
+		void           PreCore(FemModel* femmodel);
+		ElementVector* CreateDVector(Element* element);
+		ElementMatrix* CreateJacobianMatrix(Element* element);
+		ElementMatrix* CreateKMatrix(Element* element);
+		ElementVector* CreatePVector(Element* element);
+		void           GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
+		void           GradientJ(Vector<IssmDouble>* gradient,Element*  element,int control_type,int control_interp,int control_index);
+		void           InputUpdateFromSolution(IssmDouble* solution,Element* element);
+		void           UpdateConstraints(FemModel* femmodel);
+
+		/*Intermediaries*/
+		void UpdateSubglacialWaterPressure(FemModel* femmodel);
+};
+#endif
Index: /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp	(revision 28013)
@@ -297,7 +297,8 @@
 	IssmDouble dt,scalar,water_head;
 	IssmDouble water_load,transfer,runoff_value;
-	IssmDouble epl_storing,epl_transmitivity;
+	IssmDouble epl_storing;  //,epl_transmitivity;
 	IssmDouble Jdet,time;
 	IssmDouble residual,connectivity;
+	IssmDouble active_node;
 
 	IssmDouble *xyz_list             = NULL;
@@ -313,4 +314,5 @@
 	ElementVector* pe    = basalelement->NewElementVector();
 	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
+
 
 	/*Retrieve all inputs and parameters*/
@@ -353,6 +355,6 @@
 		basalelement ->JacobianDeterminant(&Jdet,xyz_list,gauss);
 		basalelement ->NodalFunctions(basis,gauss);
+		//epl_transmitivity = EplTransmitivity(basalelement,gauss,epl_thick_input);
 		epl_storing	= EplStoring(basalelement,gauss,epl_thick_input);
-		epl_transmitivity = EplTransmitivity(basalelement,gauss,epl_thick_input);
 
 		/*Loading term*/
@@ -362,6 +364,14 @@
 		scalar = Jdet*gauss->weight*(water_load+runoff_value);
 		if(dt!=0.) scalar = scalar*dt;
-		for(int i=0;i<numnodes;i++)pe->values[i]+=scalar*basis[i];
-
+		for(int i=0;i<numnodes;i++){
+			//This is the original
+			pe->values[i]+=scalar*basis[i];
+			//This is the noded version
+			/* basalelement->GetInputValue(&active_node,basalelement->nodes[i],HydrologydcMaskEplactiveNodeEnum); */
+			/* if(!reCast<bool>(active_node)){ */
+			/* 	pe->values[i]+=scalar*basis[i]; */
+			//}
+			//if(basalelement->nodes[i]->Sid()==42)_printf_("EPL uni Input "<<scalar*basis[i]<<"\n");
+		}
 		/*Transient and transfer terms*/
 		if(dt!=0.){
@@ -379,5 +389,5 @@
 	for(int iv=0;iv<numvertices;iv++){
 		gauss->GaussVertex(iv);
-		epl_transmitivity = EplTransmitivity(basalelement,gauss,epl_thick_input);
+		//epl_transmitivity = EplTransmitivity(basalelement,gauss,epl_thick_input);
 		connectivity = IssmDouble(basalelement->VertexConnectivity(iv));
 		residual_input->GetInputValue(&residual,gauss);
Index: /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp	(revision 28013)
@@ -326,10 +326,11 @@
 	/*Intermediaries */
 	bool       active_element,isefficientlayer;
-	int        smb_model,smbsubstepping;
-	int        hydrologysubstepping,smb_averaging;
-	IssmDouble dt,scalar,sediment_storing;
-	IssmDouble water_head,sediment_transmitivity;
+	int        smb_model,smb_averaging;
+	int        smbsubstepping, hydrologysubstepping;
+	IssmDouble dt,scalar,water_head;
+	IssmDouble sediment_storing,sediment_transmitivity;
 	IssmDouble water_load,runoff_value,transfer;
 	IssmDouble Jdet,time;
+	IssmDouble active_node;
 
 	IssmDouble *xyz_list             = NULL;
@@ -401,7 +402,5 @@
 			scalar = Jdet*gauss->weight*(water_load+runoff_value);
 			if(dt!=0.) scalar = scalar*dt;
-			for(int i=0;i<numnodes;i++){
-				pe->values[i]+=scalar*basis[i];
-			}
+			for(int i=0;i<numnodes;i++)pe->values[i]+=scalar*basis[i];
 		}
 		else{
@@ -414,5 +413,12 @@
 				if(dt!=0.) scalar = scalar*dt;
 				for(int i=0;i<numnodes;i++){
+					//This is the original
 					pe->values[i]+=scalar*basis[i];
+					//This is the noded version
+					/* basalelement->GetInputValue(&active_node,basalelement->nodes[i],HydrologydcMaskEplactiveNodeEnum); */
+					/* if(!reCast<bool>(active_node)){ */
+					/* 	pe->values[i]+=scalar*basis[i]; */
+					/* 	//if(basalelement->nodes[i]->Sid()==42)_printf_("IDS uni Input "<<scalar*basis[i]<<"\n"); */
+					//}
 				}
 			}
Index: /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp	(revision 28013)
@@ -61,4 +61,5 @@
 	/*Create discrete loads for Moulins*/
 	CreateSingleNodeToElementConnectivity(iomodel);
+	if(iomodel->domaintype!=Domain2DhorizontalEnum && iomodel->domaintype!=Domain3DsurfaceEnum) iomodel->FetchData(1,"md.mesh.vertexonbase");
 	for(int i=0;i<iomodel->numberofvertices;i++){
 		if (iomodel->domaintype!=Domain3DEnum){
@@ -110,7 +111,9 @@
 
 	/*Fetch data needed: */
-	int    hydrology_model,frictionlaw;
+	int    hydrology_model;
 	iomodel->FindConstant(&hydrology_model,"md.hydrology.model");
-
+	int    meltflag;	
+	iomodel->FindConstant(&meltflag,"md.hydrology.melt_flag");
+	
 	/*Now, do we really want GlaDS?*/
 	if(hydrology_model!=HydrologyGlaDSEnum) return;
@@ -132,4 +135,7 @@
 	iomodel->FetchDataToInput(inputs,elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
 	iomodel->FetchDataToInput(inputs,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	if(meltflag==2){
+		iomodel->FetchDataToInput(inputs,elements,"md.smb.runoff",SmbRunoffEnum);
+	}
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
 		iomodel->FetchDataToInput(inputs,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
@@ -140,4 +146,5 @@
 	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.bump_height",HydrologyBumpHeightEnum);
 	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.sheet_conductivity",HydrologySheetConductivityEnum);
+	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.channel_conductivity",HydrologyChannelConductivityEnum);
 	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.neumannflux",HydrologyNeumannfluxEnum);
 	iomodel->FetchDataToInput(inputs,elements,"md.hydrology.moulin_input",HydrologyMoulinInputEnum);
@@ -149,116 +156,13 @@
 		iomodel->FetchDataToInput(inputs,elements,"md.initialization.vx",VxBaseEnum);
 		iomodel->FetchDataToInput(inputs,elements,"md.initialization.vy",VyBaseEnum);
-		iomodel->FindConstant(&frictionlaw,"md.friction.law");
 	}
 	else{
 		iomodel->FetchDataToInput(inputs,elements,"md.initialization.vx",VxEnum);
 		iomodel->FetchDataToInput(inputs,elements,"md.initialization.vy",VyEnum);
-		iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	}
-
-	/*Friction law variables*/
-	int FrictionCoupling;
-	switch(frictionlaw){
-		case 1:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 2:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			break;
-		case 3:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 4:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			break;
-		case 5:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
-			break;
-		case 6:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			break;
-		case 7:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-
-			}
-			break;
-		case 9:
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionQEnum);
-			break;
-		case 10:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.till_friction_angle",FrictionTillFrictionAngleEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.sediment_compressibility_coefficient",FrictionSedimentCompressibilityCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
-			break;
-		case 11:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.Cmax",FrictionCmaxEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 12:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.f",FrictionfEnum);
-			break;
-		case 13:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		default:
-			_error_("friction law "<< frictionlaw <<" not supported");
-	}
+	}
+
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
+
 }/*}}}*/
 void HydrologyGlaDSAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -274,71 +178,19 @@
 
 	parameters->AddObject(new IntParam(HydrologyModelEnum,hydrology_model));
-	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.pressure_melt_coefficient",HydrologyPressureMeltCoefficientEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.cavity_spacing",HydrologyCavitySpacingEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.ischannels",HydrologyIschannelsEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.melt_flag",HydrologyMeltFlagEnum));
-	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.channel_conductivity",HydrologyChannelConductivityEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.channel_sheet_width",HydrologyChannelSheetWidthEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.channel_alpha",HydrologyChannelAlphaEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.channel_beta",HydrologyChannelBetaEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.sheet_alpha",HydrologySheetAlphaEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.sheet_beta",HydrologySheetBetaEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.omega",HydrologyOmegaEnum));
+	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.istransition",HydrologyIsTransitionEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.englacial_void_ratio",HydrologyEnglacialVoidRatioEnum));
 
-	/*Deal with friction parameters*/
-	int frictionlaw;
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	switch(frictionlaw){
-		case 1:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 2:
-			break;
-		case 3:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 4:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 5:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.f",FrictionFEnum));
-			break;
-		case 6:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			break;
-		case 7:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 8:
-			break;
-		case 9:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
-			break;
-		case 10:
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,2)); /*comment this line to use effective pressure from Beuler and Pelt (2015)*/
-			parameters->AddObject(new DoubleParam(FrictionEffectivePressureLimitEnum,0.));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.pseudoplasticity_exponent",FrictionPseudoplasticityExponentEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.threshold_speed",FrictionThresholdSpeedEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.delta",FrictionDeltaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.void_ratio",FrictionVoidRatioEnum));
-			break;
-		case 11:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 12:
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 13:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		default: _error_("Friction law "<<frictionlaw<<" not implemented yet");
-	}
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 
 	/*Requested outputs*/
@@ -370,10 +222,7 @@
 	/*Intermediaries */
 	IssmDouble  Jdet,dphi[3],h,k;
+	IssmDouble  h_r;
 	IssmDouble  A,B,n,phi_old,phi,phi_0,H,b,v1;
 	IssmDouble* xyz_list = NULL;
-
-	/*Hard coded coefficients*/
-	const IssmPDouble alpha = 5./4.;
-	const IssmPDouble beta  = 3./2.;
 
 	/*Fetch number of nodes and dof for this finite element*/
@@ -389,9 +238,16 @@
 
 	/*Get all inputs and parameters*/
+	bool istransition;
+	element->FindParam(&istransition,HydrologyIsTransitionEnum);
+	IssmDouble alpha     = element->FindParam(HydrologySheetAlphaEnum);
+	IssmDouble beta      = element->FindParam(HydrologySheetBetaEnum);
+	IssmDouble omega     = element->FindParam(HydrologyOmegaEnum);
 	IssmDouble dt        = element->FindParam(TimesteppingTimeStepEnum);
 	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble mu_water  = element->FindParam(MaterialsMuWaterEnum);
 	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
 	IssmDouble e_v       = element->FindParam(HydrologyEnglacialVoidRatioEnum);
+	Input* hr_input      = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
 	Input* k_input   = element->GetInput(HydrologySheetConductivityEnum);_assert_(k_input);
 	Input* phi_input = element->GetInput(HydraulicPotentialEnum);      _assert_(phi_input);
@@ -416,12 +272,27 @@
 		B_input->GetInputValue(&B,gauss);
 		n_input->GetInputValue(&n,gauss);
+		hr_input->GetInputValue(&h_r,gauss);
 		b_input->GetInputValue(&b,gauss);
 		H_input->GetInputValue(&H,gauss);
 
+		/*Hard code B*/
+		B = Cuffey(273.15-2);
+		
 		/*Get norm of gradient of hydraulic potential and make sure it is >0*/
 		IssmDouble normgradphi = sqrt(dphi[0]*dphi[0] + dphi[1]*dphi[1]);
 		if(normgradphi < AEPS) normgradphi = AEPS;
-
-		IssmDouble coeff = k*pow(h,alpha)*pow(normgradphi,beta-2.);
+		
+		/*Use transition model if specified*/
+		IssmDouble nu = mu_water/rho_water;
+		IssmDouble coeff;
+		if(istransition==1 && omega>=AEPS){
+			IssmDouble hratio = fabs(h/h_r);
+			IssmDouble coarg = 1. + 4.*pow(hratio,3-2*alpha)*omega*k*pow(h,3)*normgradphi/nu;
+			coeff = nu/2./omega*pow(hratio,2*alpha-3) * (-1 + pow(coarg, 0.5))/normgradphi;
+		}
+		else {
+			/*If omega is zero, use standard model, otherwise transition model*/
+			coeff = k*pow(h,alpha)*pow(normgradphi,beta-2.);
+		}
 
 		/*Diffusive term*/
@@ -469,7 +340,7 @@
 
 	/*Intermediaries */
-	bool        meltflag;
+	int         meltflag;
 	IssmDouble  Jdet,w,v2,vx,vy,ub,h,h_r;
-	IssmDouble  G,m,frictionheat,alpha2;
+	IssmDouble  G,m,melt,RO,frictionheat,alpha2;
 	IssmDouble  A,B,n,phi_old,phi,phi_0;
 	IssmDouble  H,b;
@@ -498,5 +369,6 @@
 	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
 	Input* G_input      = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(G_input);
-	Input* m_input      = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);_assert_(m_input);
+	Input* melt_input   = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);_assert_(melt_input);
+	Input* RO_input     = NULL;
 	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
 	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
@@ -523,4 +395,8 @@
 		b_input->GetInputValue(&b,gauss);
 		H_input->GetInputValue(&H,gauss);
+		melt_input->GetInputValue(&melt,gauss);
+
+		/*Hard code B*/
+		B = Cuffey(273.15-2);
 
 		/*Get basal velocity*/
@@ -537,9 +413,14 @@
 
 		/*Compute melt (if necessary)*/
-		if(!meltflag){
+		if(meltflag == 0){
 			m = (G + frictionheat)/(rho_ice*L);
 		}
+		else if(meltflag == 1){
+			m = melt;
+		}
 		else{
-			m_input->GetInputValue(&m,gauss);
+			Input* RO_input = element->GetInput(SmbRunoffEnum);_assert_(RO_input);
+			RO_input->GetInputValue(&melt,gauss);
+			m = melt + RO;
 		}
 
@@ -580,10 +461,7 @@
 	/*Intermediaries*/
    IssmDouble  dphi[3],h,k,phi;
+   	IssmDouble  h_r;
 	IssmDouble  oceanLS,iceLS;
 	IssmDouble* xyz_list = NULL;
-
-	/*Hard coded coefficients*/
-	const IssmPDouble alpha = 5./4.;
-	const IssmPDouble beta  = 3./2.;
 
 	/*Fetch number vertices for this element*/
@@ -606,7 +484,15 @@
 
 	/*Retrieve all inputs and parameters*/
+	bool istransition;
+	element->FindParam(&istransition,HydrologyIsTransitionEnum);
+	IssmDouble alpha     = element->FindParam(HydrologySheetAlphaEnum);
+	IssmDouble beta      = element->FindParam(HydrologySheetBetaEnum);
+	IssmDouble omega     = element->FindParam(HydrologyOmegaEnum);
 	element->GetVerticesCoordinates(&xyz_list);
+	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble mu_water  = element->FindParam(MaterialsMuWaterEnum);
 	Input *k_input       = element->GetInput(HydrologySheetConductivityEnum); _assert_(k_input);
 	Input *phi_input     = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input *hr_input      = element->GetInput(HydrologyBumpHeightEnum);        _assert_(hr_input);
 	Input *h_input       = element->GetInput(HydrologySheetThicknessEnum);    _assert_(h_input);
 	Input *oceanLS_input = element->GetInput(MaskOceanLevelsetEnum);          _assert_(oceanLS_input);
@@ -622,4 +508,5 @@
       phi_input->GetInputValue(&phi,gauss);
       h_input->GetInputValue(&h,gauss);
+      	hr_input->GetInputValue(&h_r,gauss); 
       k_input->GetInputValue(&k,gauss);
 		oceanLS_input->GetInputValue(&oceanLS,gauss);
@@ -636,6 +523,17 @@
          IssmDouble normgradphi = sqrt(dphi[0]*dphi[0] + dphi[1]*dphi[1]);
          if(normgradphi < AEPS) normgradphi = AEPS;
-
-         IssmDouble coeff = k*pow(h,alpha)*pow(normgradphi,beta-2.)/max(AEPS,h); // divide by h to get speed instead of discharge
+         
+         /*If omega is zero, use standard model, otherwise transition model*/
+         IssmDouble nu = mu_water/rho_water;
+	IssmDouble coeff;
+	if(istransition==1 && omega>=AEPS){
+		IssmDouble hratio = fabs(h/h_r);
+		IssmDouble coarg = 1. + 4.*pow(hratio,3-2*alpha)*omega*k*pow(h,3)*normgradphi/nu;
+		coeff = nu/2./omega*pow(hratio,2*alpha-3) * (-1 + pow(coarg, 0.5))/normgradphi/max(AEPS,h);  // divide by h to get speed instead of discharge
+	}
+	else {
+		coeff = k*pow(h,alpha)*pow(normgradphi,beta-2.)/max(AEPS,h);  // divide by h to get speed instead of discharge
+	}
+
 
 			vx[iv] = -coeff*dphi[0];
@@ -674,5 +572,5 @@
 		element->GetInputListOnNodes(&mask[0],MaskOceanLevelsetEnum);
 		element->GetInputListOnNodes(&bed[0],BaseEnum);
-		element->GetInputListOnNodes(&thickness[0],BaseEnum);
+		element->GetInputListOnNodes(&thickness[0],ThicknessEnum);
 		element->GetInputListOnNodes(&ls_active[0],HydrologyMaskNodeActivationEnum);
 
@@ -779,4 +677,7 @@
 		iceLS_input->GetInputValue(&iceLS,gauss);
 
+		/*Hard code B*/
+		B = Cuffey(273.15-2);
+
 		/*Set sheet thickness to zero if floating or no ice*/
 		if(oceanLS<0. || iceLS>0.){
@@ -793,5 +694,5 @@
 
 		/*Get A from B and n*/
-		A=pow(B,-n);
+		A = pow(B,-n);
 
 		/*Define alpha and beta*/
@@ -812,9 +713,4 @@
 	}
 	}
-
-	/*Force floating ice to have zero sheet thickness*/
-	/*if(!element->IsAllGrounded() || !element->IsIceInElement()){
-		for(int iv=0;iv<numvertices;iv++) h_new[iv] = 0.;
-	}*/
 
 	element->AddInput(HydrologySheetThicknessEnum,h_new,P1Enum);
@@ -878,5 +774,5 @@
 		p_i = rho_ice*g*H;
 
-		/*Copmute overburden potential*/
+		/*Compute overburden potential*/
 		phi_0 = phi_m + p_i;
 
Index: /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp	(revision 28013)
@@ -34,4 +34,5 @@
 	/*Create discrete loads for Moulins*/
 	CreateSingleNodeToElementConnectivity(iomodel);
+	if(iomodel->domaintype!=Domain2DhorizontalEnum && iomodel->domaintype!=Domain3DsurfaceEnum) iomodel->FetchData(1,"md.mesh.vertexonbase");
 	for(int i=0;i<iomodel->numberofvertices;i++){
 		if (iomodel->domaintype!=Domain3DEnum){
@@ -52,8 +53,17 @@
 	int M,N;
 	int *segments = NULL;
-	iomodel->FetchData(&segments,&M,&N,"md.mesh.segments");
+	if(iomodel->domaintype==Domain3DEnum){
+		iomodel->FetchData(&segments,&M,&N,"md.mesh.segments2d");
+	}
+	else if(iomodel->domaintype==Domain2DhorizontalEnum){
+		iomodel->FetchData(&segments,&M,&N,"md.mesh.segments");
+	}
+	else{
+		_error_("mesh type not supported yet");
+	}
 
 	/*Check that the size seem right*/
 	_assert_(N==3); _assert_(M>=3);
+
 	for(int i=0;i<M;i++){
 		if(iomodel->my_elements[segments[i*3+2]-1]){
@@ -123,19 +133,7 @@
 		iomodel->FetchDataToInput(inputs,elements,"md.initialization.vy",VyBaseEnum);
 	}
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-
-	/*Friction law variables*/
-	switch(frictionlaw){
-		case 1:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			break;
-		case 8:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			break;
-		default:
-			_error_("Friction law "<< frictionlaw <<" not supported");
-	}
+
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
 }/*}}}*/
 void HydrologyShaktiAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -151,28 +149,6 @@
 
 	parameters->AddObject(new IntParam(HydrologyModelEnum,hydrology_model));
-	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
    parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.relaxation",HydrologyRelaxationEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.storage",HydrologyStorageEnum));
-
-	/*Deal with friction parameters*/
-	int frictionlaw;
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==6){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-	}
-	if(frictionlaw==4){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==9){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
-	}
 
   /*Requested outputs*/
@@ -181,4 +157,7 @@
   if(numoutputs)parameters->AddObject(new StringArrayParam(HydrologyRequestedOutputsEnum,requestedoutputs,numoutputs));
   iomodel->DeleteData(&requestedoutputs,numoutputs,"md.hydrology.requested_outputs");
+
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 }/*}}}*/
 
@@ -198,4 +177,9 @@
 }/*}}}*/
 ElementMatrix* HydrologyShaktiAnalysis::CreateKMatrix(Element* element){/*{{{*/
+
+	/* Check if ice in element */
+	if(element->IsAllFloating() || !element->IsIceInElement()) return NULL;
+	if(!element->IsOnBase()) return NULL;
+	Element* basalelement = element->SpawnBasalElement();
 
 	/*Intermediaries */
@@ -205,57 +189,55 @@
 
 	/*Fetch number of nodes and dof for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
+	int numnodes = basalelement->GetNumberOfNodes();
 
 	/*Initialize Element vector and other vectors*/
-	ElementMatrix* Ke     = element->NewElementMatrix();
+	ElementMatrix* Ke     = basalelement->NewElementMatrix();
 	IssmDouble*    dbasis = xNew<IssmDouble>(2*numnodes);
 	IssmDouble*    basis  = xNew<IssmDouble>(numnodes);
 
 	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinates(&xyz_list);
+	basalelement->GetVerticesCoordinates(&xyz_list);
 
 	/*Get conductivity from inputs*/
-	IssmDouble conductivity = GetConductivity(element);
+	IssmDouble conductivity = GetConductivity(basalelement);
 
 	/*Get englacial storage coefficient*/
 	IssmDouble storage,dt;
-	element->FindParam(&storage,HydrologyStorageEnum);
-	element->FindParam(&dt,TimesteppingTimeStepEnum);
-
-        /*Get all inputs and parameters*/
-        element->FindParam(&rho_water,MaterialsRhoFreshwaterEnum);
-        element->FindParam(&rho_ice,MaterialsRhoIceEnum);
-        element->FindParam(&g,ConstantsGEnum);
-        Input* B_input = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-        Input* n_input = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-        Input* gap_input = element->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
-        Input* thickness_input = element->GetInput(ThicknessEnum);                  _assert_(thickness_input);
-        Input* head_input = element->GetInput(HydrologyHeadEnum);              _assert_(head_input);
-        Input* base_input = element->GetInput(BaseEnum);                      _assert_(base_input);
+	basalelement->FindParam(&storage,HydrologyStorageEnum);
+	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
+
+	/*Get all inputs and parameters*/
+	basalelement->FindParam(&rho_water,MaterialsRhoFreshwaterEnum);
+	basalelement->FindParam(&rho_ice,MaterialsRhoIceEnum);
+	basalelement->FindParam(&g,ConstantsGEnum);
+	Input* B_input = basalelement->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input* n_input = basalelement->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input* gap_input = basalelement->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
+	Input* thickness_input = basalelement->GetInput(ThicknessEnum);                  _assert_(thickness_input);
+	Input* head_input = basalelement->GetInput(HydrologyHeadEnum);              _assert_(head_input);
+	Input* base_input = basalelement->GetInput(BaseEnum);                      _assert_(base_input);
 
 	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss=element->NewGauss(1);
+	Gauss* gauss=basalelement->NewGauss(1);
 	while(gauss->next()){
 
-		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
-		element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
-		element->NodalFunctions(basis,gauss);
-
-                base_input->GetInputValue(&bed,gauss);
-                thickness_input->GetInputValue(&thickness,gauss);
-                gap_input->GetInputValue(&gap,gauss);
-                head_input->GetInputValue(&head,gauss);
-
-                /*Get ice A parameter*/
-                B_input->GetInputValue(&B,gauss);
-                n_input->GetInputValue(&n,gauss);
-                A=pow(B,-n);
-
-                /*Get water and ice pressures*/
-                IssmDouble pressure_ice   = rho_ice*g*thickness;    _assert_(pressure_ice>0.);
-                IssmDouble pressure_water = rho_water*g*(head-bed);
-                if(pressure_water>pressure_ice) pressure_water = pressure_ice;
-
-
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+		basalelement->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
+		basalelement->NodalFunctions(basis,gauss);
+
+		base_input->GetInputValue(&bed,gauss);
+		thickness_input->GetInputValue(&thickness,gauss);
+		gap_input->GetInputValue(&gap,gauss);
+		head_input->GetInputValue(&head,gauss);
+
+		/*Get ice A parameter*/
+		B_input->GetInputValue(&B,gauss);
+		n_input->GetInputValue(&n,gauss);
+		A=pow(B,-n);
+
+		/*Get water and ice pressures*/
+		IssmDouble pressure_ice   = rho_ice*g*thickness;    _assert_(pressure_ice>0.);
+		IssmDouble pressure_water = rho_water*g*(head-bed);
+		if(pressure_water>pressure_ice) pressure_water = pressure_ice;
 
 		for(int i=0;i<numnodes;i++){
@@ -263,5 +245,5 @@
 				Ke->values[i*numnodes+j] += conductivity*gauss->weight*Jdet*(dbasis[0*numnodes+i]*dbasis[0*numnodes+j] + dbasis[1*numnodes+i]*dbasis[1*numnodes+j])
 				  + gauss->weight*Jdet*storage/dt*basis[i]*basis[j]
-                			+gauss->weight*Jdet*A*(n)*(pow(fabs(pressure_ice-pressure_water),(n-1))*rho_water*g)*gap*basis[i]*basis[j];
+				  +gauss->weight*Jdet*A*(n)*(pow(fabs(pressure_ice-pressure_water),(n-1))*rho_water*g)*gap*basis[i]*basis[j];
 			}
 		}
@@ -273,4 +255,5 @@
 	xDelete<IssmDouble>(dbasis);
 	delete gauss;
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
 	return Ke;
 }/*}}}*/
@@ -278,5 +261,7 @@
 
 	/*Skip if water or ice shelf element*/
-	if(element->IsAllFloating()) return NULL;
+	if(element->IsAllFloating() || !element->IsIceInElement()) return NULL;
+	if(!element->IsOnBase()) return NULL;
+	Element* basalelement = element->SpawnBasalElement();
 
 	/*Intermediaries */
@@ -289,45 +274,45 @@
 
 	/*Fetch number of nodes and dof for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
+	int numnodes = basalelement->GetNumberOfNodes();
 
 	/*Initialize Element vector and other vectors*/
-	ElementVector* pe    = element->NewElementVector();
+	ElementVector* pe    = basalelement->NewElementVector();
 	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
 
 	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinates(&xyz_list);
-	IssmDouble  latentheat      = element->FindParam(MaterialsLatentheatEnum);
-	IssmDouble  g               = element->FindParam(ConstantsGEnum);
-	IssmDouble  rho_ice         = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  rho_water       = element->FindParam(MaterialsRhoFreshwaterEnum);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
-	Input* head_input           = element->GetInput(HydrologyHeadEnum);              _assert_(head_input);
-	Input* gap_input            = element->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
-	Input* thickness_input      = element->GetInput(ThicknessEnum);                  _assert_(thickness_input);
-	Input* base_input           = element->GetInput(BaseEnum);                       _assert_(base_input);
-	Input* B_input              = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input              = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* englacial_input      = element->GetInput(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
-	Input* lr_input             = element->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
-	Input* br_input             = element->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
-   Input* headold_input        = element->GetInput(HydrologyHeadOldEnum);           _assert_(headold_input);
+	basalelement->GetVerticesCoordinates(&xyz_list);
+	IssmDouble  latentheat      = basalelement->FindParam(MaterialsLatentheatEnum);
+	IssmDouble  g               = basalelement->FindParam(ConstantsGEnum);
+	IssmDouble  rho_ice         = basalelement->FindParam(MaterialsRhoIceEnum);
+	IssmDouble  rho_water       = basalelement->FindParam(MaterialsRhoFreshwaterEnum);
+	Input* geothermalflux_input = basalelement->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
+	Input* head_input           = basalelement->GetInput(HydrologyHeadEnum);              _assert_(head_input);
+	Input* gap_input            = basalelement->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
+	Input* thickness_input      = basalelement->GetInput(ThicknessEnum);                  _assert_(thickness_input);
+	Input* base_input           = basalelement->GetInput(BaseEnum);                       _assert_(base_input);
+	Input* B_input              = basalelement->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input* n_input              = basalelement->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input* englacial_input      = basalelement->GetInput(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
+	Input* lr_input             = basalelement->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
+	Input* br_input             = basalelement->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
+   Input* headold_input        = basalelement->GetInput(HydrologyHeadOldEnum);           _assert_(headold_input);
 
 	/*Get conductivity from inputs*/
-	IssmDouble conductivity = GetConductivity(element);
+	IssmDouble conductivity = GetConductivity(basalelement);
 
 	/*Get englacial storage coefficient*/
 	IssmDouble storage,dt;
-   element->FindParam(&storage,HydrologyStorageEnum);
-   element->FindParam(&dt,TimesteppingTimeStepEnum);
-
-	/*Build friction element, needed later: */
-	Friction* friction=new Friction(element,2);
+   basalelement->FindParam(&storage,HydrologyStorageEnum);
+   basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
+
+	/*Build friction basalelement, needed later: */
+	Friction* friction=new Friction(basalelement,2);
 
 	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss=element->NewGauss(2);
+	Gauss* gauss=basalelement->NewGauss(2);
 	while(gauss->next()){
 
-		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
-		element->NodalFunctions(basis,gauss);
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+		basalelement->NodalFunctions(basis,gauss);
 		geothermalflux_input->GetInputValue(&G,gauss);
 		base_input->GetInputValue(&bed,gauss);
@@ -368,21 +353,21 @@
 
 		/*Compute change in sensible heat due to changes in pressure melting point*/
-   		dpressure_water[0] = rho_water*g*(dh[0] - dbed[0]);
+		dpressure_water[0] = rho_water*g*(dh[0] - dbed[0]);
 		dpressure_water[1] = rho_water*g*(dh[1] - dbed[1]);
 
-   	meltrate = 1/latentheat*(G+frictionheat+rho_water*g*conductivity*(dh[0]*dh[0]+dh[1]*dh[1]));
-
-                  for(int i=0;i<numnodes;i++) pe->values[i]+=Jdet*gauss->weight*
-                   (
-                    meltrate*(1/rho_water-1/rho_ice)
-                    +A*pow(fabs(pressure_ice - pressure_water),n-1)*(pressure_ice + rho_water*g*bed)*gap
-                    +(n-1)*A*pow(fabs(pressure_ice - pressure_water),n-1)*(rho_water*g*head)*gap
-                    -beta*sqrt(vx*vx+vy*vy)
-                    +ieb
-                    +storage*head_old/dt
-                    )*basis[i];
-
-	
-	}
+		meltrate = 1/latentheat*(G+frictionheat+rho_water*g*conductivity*(dh[0]*dh[0]+dh[1]*dh[1]));
+
+		for(int i=0;i<numnodes;i++) pe->values[i]+=Jdet*gauss->weight*
+		 (
+		  meltrate*(1/rho_water-1/rho_ice)
+		  +A*pow(fabs(pressure_ice - pressure_water),n-1)*(pressure_ice + rho_water*g*bed)*gap
+		  +(n-1)*A*pow(fabs(pressure_ice - pressure_water),n-1)*(rho_water*g*head)*gap
+		  -beta*sqrt(vx*vx+vy*vy)
+		  +ieb
+		  +storage*head_old/dt
+		 )*basis[i];
+
+	}
+
 	/*Clean up and return*/
 	xDelete<IssmDouble>(xyz_list);
@@ -390,4 +375,5 @@
 	delete friction;
 	delete gauss;
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
 	return pe;
 }/*}}}*/
@@ -399,4 +385,7 @@
 }/*}}}*/
 void           HydrologyShaktiAnalysis::InputUpdateFromSolution(IssmDouble* solution,Element* element){/*{{{*/
+
+	/*Only update if on base*/
+	if(!element->IsOnBase()) return;
 
 	/*Intermediary*/
@@ -441,5 +430,5 @@
 
 	/*Add input to the element: */
-	element->AddInput(HydrologyHeadEnum,values,element->GetElementType());
+	element->AddBasalInput(HydrologyHeadEnum,values,element->GetElementType());
 
 	/*Update reynolds number according to new solution*/
@@ -454,5 +443,5 @@
 
 	IssmDouble reynolds = conductivity*sqrt(dh[0]*dh[0]+dh[1]*dh[1])/NU;
-	element->AddInput(HydrologyReynoldsEnum,&reynolds,P0Enum);
+	element->AddBasalInput(HydrologyReynoldsEnum,&reynolds,P0Enum);
 
    /*Compute new effective pressure*/
@@ -468,5 +457,48 @@
 }/*}}}*/
 void           HydrologyShaktiAnalysis::UpdateConstraints(FemModel* femmodel){/*{{{*/
-	/*Default, do nothing*/
+	/*Update active elements based on ice levelset and ocean levelset*/
+	GetMaskOfIceVerticesLSMx(femmodel,true);
+	SetActiveNodesLSMx(femmodel,true);
+
+	IssmDouble rho_ice   = femmodel->parameters->FindParam(MaterialsRhoIceEnum);
+	IssmDouble rho_water = femmodel->parameters->FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble g         = femmodel->parameters->FindParam(ConstantsGEnum);
+
+	/*Constrain all nodes that are grounded and unconstrain the ones that float*/
+	for(Object* & object : femmodel->elements->objects){
+
+		/*Get current element and return if not on base*/
+		Element *element  = xDynamicCast<Element*>(object);
+		if(!element->IsOnBase()) continue;
+
+		int         numnodes  = element->GetNumberOfNodes();
+		IssmDouble *mask      = xNew<IssmDouble>(numnodes);
+		IssmDouble *bed       = xNew<IssmDouble>(numnodes);
+		IssmDouble *thickness = xNew<IssmDouble>(numnodes);
+		IssmDouble *ls_active = xNew<IssmDouble>(numnodes);
+
+		element->GetInputListOnNodes(&mask[0],MaskOceanLevelsetEnum);
+		element->GetInputListOnNodes(&bed[0],BaseEnum);
+		element->GetInputListOnNodes(&thickness[0],ThicknessEnum);
+		element->GetInputListOnNodes(&ls_active[0],HydrologyMaskNodeActivationEnum);
+
+		//for(int in=0;in<numnodes;in++){ //
+		for(int in=0;in<3;in++){ //
+			Node* node=element->GetNode(in);
+			if(mask[in]>0. && ls_active[in]==1.){
+				node->Activate(); //Not sure if we need this!
+			}
+			else{
+				IssmDouble phi =  rho_ice*g*thickness[in] + rho_water*g*bed[in]; //FIXME this is correct!
+				node->Deactivate();// Not sure if we need this
+				node->ApplyConstraint(0,phi);
+			}
+		}
+		xDelete<IssmDouble>(mask);
+		xDelete<IssmDouble>(bed);
+		xDelete<IssmDouble>(thickness);
+		xDelete<IssmDouble>(ls_active);
+	}
+
 	return;
 }/*}}}*/
@@ -505,39 +537,41 @@
 
 	/*Skip if water or ice shelf element*/
-	if(element->IsAllFloating()) return;
+	if(element->IsAllFloating() || !element->IsIceInElement()) return;
+	if(!element->IsOnBase()) return;
+	Element* basalelement = element->SpawnBasalElement();
 
 	/*Intermediaries */
-	IssmDouble newgap = 0.;
-	IssmDouble  Jdet,meltrate,G,dh[2],B,A,n,dt;
+	IssmDouble  newgap = 0.;
+	IssmDouble  Jdet,meltrate,G,dh[3],B,A,n,dt;
 	IssmDouble  gap,bed,thickness,head;
 	IssmDouble  lr,br,vx,vy,beta,lc;
 	IssmDouble  alpha2,frictionheat;
 	IssmDouble* xyz_list = NULL;
-  	IssmDouble  dpressure_water[2],dbed[2],PMPheat,dissipation;
+	IssmDouble  dpressure_water[3],dbed[3],PMPheat,dissipation;
 	IssmDouble q = 0.;
-   	IssmDouble channelization = 0.;
+	IssmDouble channelization = 0.;
 
 	/*Retrieve all inputs and parameters*/
-	element->GetVerticesCoordinates(&xyz_list);
-	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	IssmDouble  latentheat      = element->FindParam(MaterialsLatentheatEnum);
-	IssmDouble  g               = element->FindParam(ConstantsGEnum);
-	IssmDouble  rho_ice         = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  rho_water       = element->FindParam(MaterialsRhoFreshwaterEnum);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
-	Input* head_input           = element->GetInput(HydrologyHeadEnum);              _assert_(head_input);
-	Input* gap_input            = element->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
-	Input* thickness_input      = element->GetInput(ThicknessEnum);                  _assert_(thickness_input);
-	Input* base_input           = element->GetInput(BaseEnum);                       _assert_(base_input);
-	Input* B_input              = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input              = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* lr_input             = element->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
-	Input* br_input             = element->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
+	basalelement->GetVerticesCoordinates(&xyz_list);
+	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
+	IssmDouble  latentheat      = basalelement->FindParam(MaterialsLatentheatEnum);
+	IssmDouble  g               = basalelement->FindParam(ConstantsGEnum);
+	IssmDouble  rho_ice         = basalelement->FindParam(MaterialsRhoIceEnum);
+	IssmDouble  rho_water       = basalelement->FindParam(MaterialsRhoFreshwaterEnum);
+	Input* geothermalflux_input = basalelement->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
+	Input* head_input           = basalelement->GetInput(HydrologyHeadEnum);              _assert_(head_input);
+	Input* gap_input            = basalelement->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
+	Input* thickness_input      = basalelement->GetInput(ThicknessEnum);                  _assert_(thickness_input);
+	Input* base_input           = basalelement->GetInput(BaseEnum);                       _assert_(base_input);
+	Input* B_input              = basalelement->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input* n_input              = basalelement->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input* lr_input             = basalelement->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
+	Input* br_input             = basalelement->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
 
 	/*Get conductivity from inputs*/
-	IssmDouble conductivity = GetConductivity(element);
-
-	/*Build friction element, needed later: */
-	Friction* friction=new Friction(element,2);
+	IssmDouble conductivity = GetConductivity(basalelement);
+
+	/*Build friction basalelement, needed later: */
+	Friction* friction=new Friction(basalelement,2);
 
 	/*Keep track of weights*/
@@ -545,8 +579,8 @@
 
 	/* Start  looping on the number of gaussian points: */
-	Gauss* gauss=element->NewGauss(2);
+	Gauss* gauss=basalelement->NewGauss(2);
 	while(gauss->next()){
 
-		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
 
 		geothermalflux_input->GetInputValue(&G,gauss);
@@ -581,6 +615,6 @@
 		if(pressure_water>pressure_ice) pressure_water = pressure_ice;
 
-      /* Compute change in sensible heat due to changes in pressure melting point*/
-	   dpressure_water[0] = rho_water*g*(dh[0] - dbed[0]);
+		/* Compute change in sensible heat due to changes in pressure melting point*/
+		dpressure_water[0] = rho_water*g*(dh[0] - dbed[0]);
 		dpressure_water[1] = rho_water*g*(dh[1] - dbed[1]);
 		dissipation=rho_water*g*conductivity*(dh[0]*dh[0]+dh[1]*dh[1]);
@@ -588,15 +622,14 @@
 		meltrate = 1/latentheat*(G+frictionheat+rho_water*g*conductivity*(dh[0]*dh[0]+dh[1]*dh[1]));
 
-		element->AddInput(DummyEnum,&meltrate,P0Enum);
-		element->AddInput(EsaEmotionEnum,&frictionheat,P0Enum);
-		element->AddInput(EsaNmotionEnum,&dissipation,P0Enum);
-		element->AddInput(EsaUmotionEnum,&PMPheat,P0Enum);
-
+		//element->AddBasalInput(DummyEnum,&meltrate,P0Enum);
+		//element->AddBasalInput(EsaEmotionEnum,&frictionheat,P0Enum);
+		//element->AddBasalInput(EsaNmotionEnum,&dissipation,P0Enum);
+		//element->AddBasalInput(EsaUmotionEnum,&PMPheat,P0Enum);
 
 		newgap += gauss->weight*Jdet*(gap+dt*(
-					meltrate/rho_ice
-					-A*pow(fabs(pressure_ice-pressure_water),n-1)*(pressure_ice-pressure_water)*gap
-					+beta*sqrt(vx*vx+vy*vy)
-					));
+						meltrate/rho_ice
+						-A*pow(fabs(pressure_ice-pressure_water),n-1)*(pressure_ice-pressure_water)*gap
+						+beta*sqrt(vx*vx+vy*vy)
+						));
 
 
@@ -604,5 +637,5 @@
 
 		/* Compute basal water flux */
-      q += gauss->weight*Jdet*(conductivity*sqrt(dh[0]*dh[0]+dh[1]*dh[1]));
+		q += gauss->weight*Jdet*(conductivity*sqrt(dh[0]*dh[0]+dh[1]*dh[1]));
 
 		/* Compute "degree of channelization" (ratio of melt opening to opening by sliding) */
@@ -620,13 +653,13 @@
 
 	/*Add new gap as an input*/
-	element->AddInput(HydrologyGapHeightEnum,&newgap,P0Enum);
+	element->AddBasalInput(HydrologyGapHeightEnum,&newgap,P0Enum);
 
 	/*Divide by connectivity, add basal flux as an input*/
 	q = q/totalweights;
-	element->AddInput(HydrologyBasalFluxEnum,&q,P0Enum);
+	element->AddBasalInput(HydrologyBasalFluxEnum,&q,P0Enum);
 
 	/* Divide by connectivity, add degree of channelization as an input */
 	channelization = channelization/totalweights;
-	element->AddInput(DegreeOfChannelizationEnum,&channelization,P0Enum);
+	element->AddBasalInput(DegreeOfChannelizationEnum,&channelization,P0Enum);
 
 	/*Clean up and return*/
@@ -634,4 +667,5 @@
 	delete friction;
 	delete gauss;
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
 }/*}}}*/
 void HydrologyShaktiAnalysis::UpdateEffectivePressure(FemModel* femmodel){/*{{{*/
@@ -646,5 +680,6 @@
 
 	/*Skip if water or ice shelf element*/
-	if(element->IsAllFloating()) return;
+	if(element->IsAllFloating() || !element->IsIceInElement()) return;
+	if(!element->IsOnBase()) return;
 
 	/*Intermediaries*/
@@ -663,5 +698,4 @@
 	Input* base_input      = element->GetInput(BaseEnum);          _assert_(base_input);
 
-
    Gauss* gauss=element->NewGauss();
    for (int i=0;i<numnodes;i++){
@@ -676,5 +710,5 @@
 
 	/*Add new gap as an input*/
-	element->AddInput(EffectivePressureEnum,N,element->GetElementType());
+	element->AddBasalInput(EffectivePressureEnum,N,element->GetElementType());
 
 	/*Clean up and return*/
Index: /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp	(revision 28013)
@@ -14,5 +14,5 @@
 	if(hydrology_model!=HydrologyshreveEnum) return;
 
-	IoModelToConstraintsx(constraints,iomodel,"md.hydrologyshreve.spcwatercolumn",HydrologyShreveAnalysisEnum,P1Enum);
+	IoModelToConstraintsx(constraints,iomodel,"md.hydrology.spcwatercolumn",HydrologyShreveAnalysisEnum,P1Enum);
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp	(revision 28013)
@@ -107,4 +107,8 @@
 			iomodel->FetchDataToInput(inputs,elements,"md.geometry.bed",BedEnum);
 			break;
+		case CalvingVonmisesADEnum:
+			iomodel->FetchDataToInput(inputs,elements,"md.calving.basin_id",CalvingBasinIdEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.geometry.bed",BedEnum);
+			break;
 		case CalvingDev2Enum:
 			iomodel->FetchDataToInput(inputs,elements,"md.calving.stress_threshold_groundedice",CalvingStressThresholdGroundediceEnum);
@@ -115,4 +119,6 @@
 		case CalvingParameterizationEnum:
 			iomodel->FetchDataToInput(inputs,elements,"md.geometry.bed",BedEnum);
+			break;
+		case CalvingCalvingMIPEnum:
 			break;
 
@@ -146,5 +152,5 @@
 		case FrontalForcingsDefaultEnum:
 			iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.meltingrate",CalvingMeltingrateEnum);
-			if (calvinglaw == CalvingParameterizationEnum) {
+			if ((calvinglaw == CalvingParameterizationEnum) || (calvinglaw == CalvingCalvingMIPEnum)) {
 				iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.ablationrate",CalvingAblationrateEnum);
 			}
@@ -153,9 +159,13 @@
          /*Retrieve thermal forcing only in the case of non-arma FrontalForcingsRignot*/
          iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.thermalforcing",ThermalForcingEnum);
-         /* Do not break here, still retrieve basin_ID,subglacial_discharge, etc.*/
-      case FrontalForcingsRignotarmaEnum:
          iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.basin_id",FrontalForcingsBasinIdEnum);
          iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.subglacial_discharge",FrontalForcingsSubglacialDischargeEnum);
-         break;	
+			break;
+		case FrontalForcingsRignotarmaEnum:
+			bool isdischargearma;
+			iomodel->FindConstant(&isdischargearma,"md.frontalforcings.isdischargearma");
+         iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.basin_id",FrontalForcingsBasinIdEnum);
+         if(isdischargearma==false) iomodel->FetchDataToInput(inputs,elements,"md.frontalforcings.subglacial_discharge",FrontalForcingsSubglacialDischargeEnum);
+			break;	
 		default:
 			_error_("Frontal forcings"<<EnumToStringx(melt_parameterization)<<" not supported yet");
@@ -183,4 +193,19 @@
 		case CalvingVonmisesEnum:
 			parameters->AddObject(iomodel->CopyConstantObject("md.calving.min_thickness",CalvingMinthicknessEnum));
+			break;
+		case CalvingVonmisesADEnum:
+			parameters->AddObject(iomodel->CopyConstantObject("md.calving.min_thickness",CalvingMinthicknessEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.calving.num_basins",CalvingNumberofBasinsEnum));
+
+			iomodel->FetchData(&transparam,&M,&N,"md.calving.stress_threshold_groundedice");
+         _assert_(M>=1 && N>=1);
+         parameters->AddObject(new DoubleVecParam(CalvingADStressThresholdGroundediceEnum,transparam,M));
+         xDelete<IssmDouble>(transparam);
+
+         iomodel->FetchData(&transparam,&M,&N,"md.calving.stress_threshold_floatingice");
+         _assert_(M>=1 && N>=1);
+         parameters->AddObject(new DoubleVecParam(CalvingADStressThresholdFloatingiceEnum,transparam,M));
+         xDelete<IssmDouble>(transparam);
+         
 			break;
 		case CalvingMinthicknessEnum:
@@ -234,4 +259,8 @@
 			parameters->AddObject(iomodel->CopyConstantObject("md.calving.rc",CalvingRcEnum));
 			break;
+		case CalvingCalvingMIPEnum:
+			parameters->AddObject(iomodel->CopyConstantObject("md.calving.experiment",CalvingUseParamEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.calving.min_thickness",CalvingMinthicknessEnum));
+			break;
 		default:
 			_error_("Calving law "<<EnumToStringx(calvinglaw)<<" not supported yet");
@@ -245,5 +274,6 @@
 			break;
 		case FrontalForcingsRignotarmaEnum:
-			/*Retrieve autoregressive parameters*/
+			parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.num_basins",FrontalForcingsNumberofBasinsEnum));
+			/*Retrieve thermal forcing parameters*/
 			parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.num_params",FrontalForcingsNumberofParamsEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.num_breaks",FrontalForcingsNumberofBreaksEnum));
@@ -273,5 +303,31 @@
          parameters->AddObject(new DoubleMatParam(FrontalForcingsARMAmonthtrendsEnum,transparam,M,N));
          xDelete<IssmDouble>(transparam);
-			/*Do not break here, generic FrontalForcingsRignot parameters still to be retrieved*/
+			parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.isdischargearma",FrontalForcingsIsDischargeARMAEnum));
+			/*Retrieve subglacial discharge parameters */
+			bool isdischargearma;
+			parameters->FindParam(&isdischargearma,FrontalForcingsIsDischargeARMAEnum);
+			if(isdischargearma==true){
+				parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.sd_num_params",FrontalForcingsSdNumberofParamsEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.sd_num_breaks",FrontalForcingsSdNumberofBreaksEnum));
+      	   parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.sd_ar_order",FrontalForcingsSdarOrderEnum));
+      	   parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.sd_ma_order",FrontalForcingsSdmaOrderEnum));
+      	   parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.sd_arma_timestep",FrontalForcingsSdARMATimestepEnum));
+      	   iomodel->FetchData(&transparam,&M,&N,"md.frontalforcings.sd_polynomialparams");
+      	   parameters->AddObject(new DoubleMatParam(FrontalForcingsSdpolyparamsEnum,transparam,M,N));
+      	   xDelete<IssmDouble>(transparam);
+      	   iomodel->FetchData(&transparam,&M,&N,"md.frontalforcings.sd_datebreaks");
+      	   parameters->AddObject(new DoubleMatParam(FrontalForcingsSddatebreaksEnum,transparam,M,N));
+      	   xDelete<IssmDouble>(transparam);
+      	   iomodel->FetchData(&transparam,&M,&N,"md.frontalforcings.sd_arlag_coefs");
+      	   parameters->AddObject(new DoubleMatParam(FrontalForcingsSdarlagcoefsEnum,transparam,M,N));
+      	   xDelete<IssmDouble>(transparam);
+      	   iomodel->FetchData(&transparam,&M,&N,"md.frontalforcings.sd_malag_coefs");
+      	   parameters->AddObject(new DoubleMatParam(FrontalForcingsSdmalagcoefsEnum,transparam,M,N));
+      	   xDelete<IssmDouble>(transparam);
+				iomodel->FetchData(&transparam,&M,&N,"md.frontalforcings.sd_monthlyfrac");
+      	   parameters->AddObject(new DoubleMatParam(FrontalForcingsSdMonthlyFracEnum,transparam,M,N));
+      	   xDelete<IssmDouble>(transparam);
+			}
+			break;
 		case FrontalForcingsRignotEnum:
 			parameters->AddObject(iomodel->CopyConstantObject("md.frontalforcings.num_basins",FrontalForcingsNumberofBasinsEnum));
@@ -698,5 +754,5 @@
 
 	/*Apply minimum thickness criterion*/
-	if(calvinglaw==CalvingMinthicknessEnum || calvinglaw==CalvingVonmisesEnum || calvinglaw==CalvingParameterizationEnum){
+	if(calvinglaw==CalvingMinthicknessEnum || calvinglaw==CalvingVonmisesEnum || calvinglaw==CalvingParameterizationEnum || calvinglaw==CalvingVonmisesADEnum || calvinglaw==CalvingCalvingMIPEnum){
 
 		IssmDouble mig_max = femmodel->parameters->FindParam(MigrationMaxEnum);
Index: /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp	(revision 28013)
@@ -118,6 +118,6 @@
 	bool   isgroundingline;
 	bool   ismovingfront;
-	bool   isoceancoupling;
 	bool   issmb;
+	int    isoceancoupling;
 	int    grdmodel;
 
@@ -170,4 +170,5 @@
 		case FloatingMeltRateEnum:
 			iomodel->FetchDataToInput(inputs,elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.basalforcings.perturbation_melting_rate",BasalforcingsPerturbationMeltingRateEnum,0.);
 			if(isstochastic){
             iomodel->FetchDataToInput(inputs,elements,"md.stochasticforcing.default_id",StochasticForcingDefaultIdEnum);
@@ -208,5 +209,5 @@
 				if(iomodel->domaintype!=Domain2DhorizontalEnum && !element->IsOnBase()) continue;
 				for(int kk=0;kk<K;kk++){
-					element->DatasetInputAdd(BasalforcingsIsmip6TfEnum,array3d[kk],inputs,iomodel,Ms[kk],Ns[kk],1,BasalforcingsIsmip6TfEnum,7,kk);
+					element->DatasetInputAdd(BasalforcingsIsmip6TfEnum,array3d[kk],inputs,iomodel,Ms[kk],Ns[kk],1,BasalforcingsIsmip6TfEnum,kk);
 				}
 			}
@@ -622,5 +623,5 @@
 	IssmDouble  fraction1,fraction2;
 	IssmDouble  Jdet,dt;
-	IssmDouble  ms,mb,gmb,fmb,thickness;
+	IssmDouble  ms,mb,gmb,fmb,thickness,fmb_pert;
 	IssmDouble  vx,vy,vel,dvxdx,dvydy,xi,h,tau;
 	IssmDouble  dvx[2],dvy[2];
@@ -653,4 +654,5 @@
 	Input* gmb_input        = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);  _assert_(gmb_input);
 	Input* fmb_input        = element->GetInput(BasalforcingsFloatingiceMeltingRateEnum);  _assert_(fmb_input);
+	//Input* fmb_pert_input   = element->GetInput(BasalforcingsPerturbationMeltingRateEnum); _assert_(fmb_pert_input);
 	Input* gllevelset_input = element->GetInput(MaskOceanLevelsetEnum);              _assert_(gllevelset_input);
 	Input* ms_input         = element->GetInput(SmbMassBalanceEnum);                       _assert_(ms_input);
@@ -659,9 +661,4 @@
 	Input* vyaverage_input  = element->GetInput(VyAverageEnum);										_assert_(vyaverage_input);
 
-//	if(element->Id()==9){
-//		gmb_input->Echo();
-//		_error_("S");
-//	}
-
 	h=element->CharacteristicLength();
 
@@ -685,4 +682,5 @@
 		gmb_input->GetInputValue(&gmb,gauss);
 		fmb_input->GetInputValue(&fmb,gauss);
+		//fmb_pert_input->GetInputValue(&fmb_pert,gauss);
 		gllevelset_input->GetInputValue(&gllevelset,gauss);
 		thickness_input->GetInputValue(&thickness,gauss);
@@ -697,5 +695,7 @@
 		}
 		else if(melt_style==NoMeltOnPartiallyFloatingEnum){
-			if (phi<0.00000001) mb=fmb;
+			if (phi<0.00000001){
+				mb=fmb;//+fmb_pert;
+			}
 			else mb=gmb;
 		}
Index: /issm/trunk/src/c/analyses/SealevelchangeAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/SealevelchangeAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/SealevelchangeAnalysis.cpp	(revision 28013)
@@ -470,5 +470,5 @@
 			 *with steps equal to timeacc:*/
 			if(viscous){
-				nt=reCast<int>((final_time-start_time)/timeacc)+1;
+				nt=reCast<int,IssmDouble>((final_time-start_time)/timeacc)+1;
 #ifdef _HAVE_AD_
 				G_viscoelastic_interpolated=xNew<IssmDouble>(M*nt,"t");
Index: /issm/trunk/src/c/analyses/SmbAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/SmbAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/SmbAnalysis.cpp	(revision 28013)
@@ -206,5 +206,7 @@
 			break;
 		case SMBsemicEnum:
-			iomodel->FetchDataToInput(inputs,elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
+			int ismethod;
+			//if(VerboseSolution()) _printf0_("   smb semic: UpdateElements.\n");
+			//iomodel->FetchDataToInput(inputs,elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
 			iomodel->FetchDataToInput(inputs,elements,"md.smb.s0gcm",SmbS0gcmEnum);
 			iomodel->FetchDataToInput(inputs,elements,"md.smb.dailysnowfall",SmbDailysnowfallEnum);
@@ -217,8 +219,44 @@
 			iomodel->FetchDataToInput(inputs,elements,"md.smb.dailyairhumidity",SmbDailyairhumidityEnum);
 			iomodel->FetchDataToInput(inputs,elements,"md.smb.dailytemperature",SmbDailytemperatureEnum);
-			break;
-		case SMBdebrisMLEnum:
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.debris",DebrisThicknessEnum);
-			break;
+			// assign initial SEMIC temperature from initialization class.
+			if(VerboseSolution()) _printf0_("   smb semic: UpdateElements - temperature.\n");
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
+
+			iomodel->FindConstant(&ismethod,"md.smb.ismethod");
+			if (ismethod == 1){
+				if(VerboseSolution()) _printf0_("   smb semic: UpdateElements - albedo.\n");
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.albedo",SmbAlbedoInitEnum);
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.albedo_snow",SmbAlbedoSnowInitEnum);
+				if(VerboseSolution()) _printf0_("   smb semic: UpdateElements - Hice/Hsnow.\n");
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.hice",SmbHIceInitEnum);
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.hsnow",SmbHSnowInitEnum);
+
+				// initial Temperature amplitude.
+				if(VerboseSolution()) _printf0_("   smb semic: UpdateElements - Tamp.\n");
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.Tamp",SmbTampEnum);
+
+				// assign masking 
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.mask",SmbMaskEnum);
+				iomodel->FetchDataToInput(inputs,elements,"md.smb.qmr",SmbSemicQmrInitEnum);
+				if(VerboseSolution()) _printf0_("   smb semic: UpdateElements - done.\n");
+			}
+			break;
+		case SMBdebrisEvattEnum:
+                        iomodel->FetchDataToInput(inputs,elements,"md.initialization.debris",DebrisThicknessEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.s0t",SmbS0tEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.snowheight",SmbSnowheightEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.temperature",SmbMonthlytemperaturesEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.precipitation",SmbPrecipitationEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.dsradiation",SmbMonthlydsradiationEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.dlradiation",SmbMonthlydlradiationEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.windspeed",SmbMonthlywindspeedEnum);
+                        iomodel->FetchDataToDatasetInput(inputs,elements,"md.smb.airhumidity",SmbMonthlyairhumidityEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.precipitation_anomaly",SmbPrecipitationsAnomalyEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.temperature_anomaly",SmbTemperaturesAnomalyEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.dsradiation_anomaly",SmbDsradiationAnomalyEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.dlradiation_anomaly",SmbDlradiationAnomalyEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.windspeed_anomaly",SmbWindspeedAnomalyEnum);
+                        iomodel->FetchDataToInput(inputs,elements,"md.smb.airhumidity_anomaly",SmbAirhumidityAnomalyEnum);
+                        break;
 		default:
 			_error_("Surface mass balance model "<<EnumToStringx(smb_model)<<" not supported yet");
@@ -420,7 +458,34 @@
 			break;
 		case SMBsemicEnum:
+			int ismethod;
+			parameters->FindParam(&ismethod,SmbSemicMethodEnum);
+			if (ismethod == 1){
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.desfacElevation",SmbDesfacElevEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.hcrit",SmbSemicHcritEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.rcrit",SmbSemicRcritEnum));
+				/*Define albedo parameters.*/
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.albedo_scheme",SmbAlbedoSchemeEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.alb_smax",SmbAlbedoSnowMaxEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.alb_smin",SmbAlbedoSnowMinEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.albi",SmbAlbedoIceEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.albl",SmbAlbedoLandEnum));
+
+				//albedo parameter - slatter
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.tmin",SmbSemicTmaxEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.tmax",SmbSemicTminEnum));
+
+				//albedo parameter - isba & denby
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.mcrit",SmbSemicMcritEnum));
+				//albedo parameter - isba
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.tau_a",SmbSemicTauAEnum)); 
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.tau_f",SmbSemicTauFEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.wcrit",SmbSemicWcritEnum));
+				//albedo parameter - alex
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.tmid",SmbSemicTmidEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.smb.afac",SmbSemicAfacEnum));
+			}
 			/*Nothing to add to parameters*/
 			break;
-		case SMBdebrisMLEnum:
+		case SMBdebrisEvattEnum:
 			/*Nothing to add to parameters*/
 			break;
@@ -514,14 +579,16 @@
 		case SMBsemicEnum:
 			#ifdef _HAVE_SEMIC_
-			if(VerboseSolution())_printf0_("  call smb SEMIC module\n");
-			SmbSemicx(femmodel);
+			if(VerboseSolution())_printf0_("   call smb SEMIC module\n");
+			int ismethod;
+			femmodel->parameters->FindParam(&ismethod,SmbSemicMethodEnum);
+			SmbSemicx(femmodel,ismethod);
 			#else
 			_error_("SEMIC not installed");
 			#endif //_HAVE_SEMIC_
 			break;
-		case SMBdebrisMLEnum:
-			if(VerboseSolution())_printf0_("        call smb debris Mayer & Liculli module\n");
-			SmbDebrisMLx(femmodel);
-			break;
+		case SMBdebrisEvattEnum:
+                        if(VerboseSolution())_printf0_("        call smb Evatt debris module\n");
+                        SmbDebrisEvattx(femmodel);
+                        break;
 		default:
 			_error_("Surface mass balance model "<<EnumToStringx(smb_model)<<" not supported yet");
Index: /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp	(revision 28013)
@@ -692,6 +692,5 @@
 	/*Intermediaries*/
 	int    materials_type,finiteelement,fe_FS;
-	int    approximation,frictionlaw;
-	int    FrictionCoupling;
+	int    approximation;
 	int*   finiteelement_list=NULL;
 	bool   isSSA,isL1L2,isMOLHO,isHO,isFS,iscoupling;
@@ -710,5 +709,4 @@
 	iomodel->FindConstant(&materials_type,"md.materials.type");
 	iomodel->FindConstant(&ismovingfront,"md.transient.ismovingfront");
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 
 	/*return if no processing required*/
@@ -876,111 +874,6 @@
 	}
 
-	/*Friction law variables*/
-	switch(frictionlaw){
-		case 1:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 2:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			break;
-		case 3:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 4:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			break;
-		case 5:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
-			break;
-		case 6:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			break;
-		case 7:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-
-			}
-			break;
-		case 9:
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionQEnum);
-			break;
-		case 10:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.till_friction_angle",FrictionTillFrictionAngleEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.sediment_compressibility_coefficient",FrictionSedimentCompressibilityCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
-			break;
-		case 11:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.Cmax",FrictionCmaxEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 12:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.f",FrictionfEnum);
-			break;
-		case 13:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		default:
-			_error_("friction law "<< frictionlaw <<" not supported");
-	}
-
-#ifdef _HAVE_ANDROID_
-	inputs->DuplicateInput(FrictionCoefficientEnum,AndroidFrictionCoefficientEnum);
-#endif
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
 
 	/*Free data: */
@@ -1014,5 +907,4 @@
 	parameters->AddObject(iomodel->CopyConstantObject("md.stressbalance.FSreconditioning",StressbalanceFSreconditioningEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.stressbalance.shelf_dampening",StressbalanceShelfDampeningEnum));
-	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
 
 	/*XTH LATH parameters*/
@@ -1037,62 +929,6 @@
 	iomodel->DeleteData(&requestedoutputs,numoutputs,"md.stressbalance.requested_outputs");
 
-	/*Deal with friction parameters*/
-	int frictionlaw;
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	switch(frictionlaw){
-		case 1:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 2:
-			break;
-		case 3:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 4:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 5:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.f",FrictionFEnum));
-			break;
-		case 6:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			break;
-		case 7:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 8:
-			break;
-		case 9:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
-			break;
-		case 10:
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,2)); /*comment this line to use effective pressure from Beuler and Pelt (2015)*/
-			parameters->AddObject(new DoubleParam(FrictionEffectivePressureLimitEnum,0.));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.pseudoplasticity_exponent",FrictionPseudoplasticityExponentEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.threshold_speed",FrictionThresholdSpeedEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.delta",FrictionDeltaEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.void_ratio",FrictionVoidRatioEnum));
-			break;
-		case 11:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 12:
-			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		case 13:
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-			break;
-		default: _error_("Friction law "<<frictionlaw<<" not implemented yet");
-	}
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp	(revision 28013)
@@ -113,8 +113,6 @@
 	bool   isSIA;
 	bool   ismovingfront;
-	int    frictionlaw;
 	iomodel->FindConstant(&isSIA,"md.flowequation.isSIA");
 	iomodel->FindConstant(&ismovingfront,"md.transient.ismovingfront");
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 
 	/*Now, is the flag SIA on? otherwise, do nothing: */
@@ -140,25 +138,4 @@
 	iomodel->DeleteData(1,"md.flowequation.element_equation");
 
-	/*Friction law variables*/
-	switch(frictionlaw){
-		case 1:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			break;
-		case 2:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			break;
-		case 6:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			break;
-		default:
-			_error_("not supported");
-	}
-
 	iomodel->FetchDataToInput(inputs,elements,"md.geometry.thickness",ThicknessEnum);
 	iomodel->FetchDataToInput(inputs,elements,"md.mask.ocean_levelset",MaskOceanLevelsetEnum);
@@ -168,8 +145,12 @@
 	}
 
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
+
 }/*}}}*/
 void StressbalanceSIAAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
 
-	/*No specific parameters*/
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/ThermalAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/ThermalAnalysis.cpp	(revision 28012)
+++ /issm/trunk/src/c/analyses/ThermalAnalysis.cpp	(revision 28013)
@@ -106,6 +106,6 @@
 void ThermalAnalysis::UpdateElements(Elements* elements,Inputs* inputs,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
-	int frictionlaw,basalforcing_model,materialstype;
-	int FrictionCoupling;
+	int basalforcing_model,materialstype;
+
 	/*Now, is the model 3d? otherwise, do nothing: */
 	if(iomodel->domaintype==Domain2DhorizontalEnum)return;
@@ -127,5 +127,4 @@
 	iomodel->FindConstant(&dakota_analysis,"md.qmu.isdakota");
 	iomodel->FindConstant(&ismovingfront,"md.transient.ismovingfront");
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 	iomodel->FindConstant(&materialstype,"md.materials.type");
 
@@ -183,73 +182,7 @@
 			break;
 	}
-	/*Friction law variables*/
-	switch(frictionlaw){
-		case 1:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 2:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			break;
-		case 3:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 4:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			break;
-		case 5:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
-			break;
-		case 6:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
-			break;
-		case 7:
-			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
-			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
-			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
-			}
-			break;
-		case 9:
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(inputs,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(inputs,elements,1.,FrictionQEnum);
-			break;
-		default:
-			_error_("friction law not supported");
-	}
+
+	/*Friction*/
+	FrictionUpdateInputs(elements, inputs, iomodel);
 }/*}}}*/
 void ThermalAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -265,5 +198,4 @@
 	parameters->AddObject(iomodel->CopyConstantObject("md.thermal.isenthalpy",ThermalIsenthalpyEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.thermal.isdynamicbasalspc",ThermalIsdynamicbasalspcEnum));
-	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
 
 	iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.thermal.requested_outputs");
@@ -272,25 +204,6 @@
 	iomodel->DeleteData(&requestedoutputs,numoutputs,"md.thermal.requested_outputs");
 
-	/*Deal with friction parameters*/
-	int frictionlaw;
-	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==6){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-	}
-	if(frictionlaw==4){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-	}
-	if(frictionlaw==9){
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
-		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
-		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
-	}
-
+	/*Friction*/
+	FrictionUpdateParameters(parameters, iomodel);
 }/*}}}*/
 
Index: /issm/trunk/src/c/analyses/analyses.h
===================================================================
--- /issm/trunk/src/c/analyses/analyses.h	(revision 28012)
+++ /issm/trunk/src/c/analyses/analyses.h	(revision 28013)
@@ -28,4 +28,5 @@
 #include "./LoveAnalysis.h"
 #include "./EsaAnalysis.h"
+#include "./HydrologyArmapwAnalysis.h"
 #include "./HydrologyDCEfficientAnalysis.h"
 #include "./HydrologyDCInefficientAnalysis.h"
Index: /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp	(revision 28013)
@@ -29,9 +29,9 @@
 	this->definitionenum = -1;
 	this->name = NULL;
-	this->weights_enum = UNDEF;
-	this->timepassedflag = false;
-}
-/*}}}*/
-Cfdragcoeffabsgrad::Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, int in_weights_enum, bool in_timepassedflag){/*{{{*/
+	this->J = 0.;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfdragcoeffabsgrad::Cfdragcoeffabsgrad(char* in_name, int in_definitionenum){/*{{{*/
 
 	this->definitionenum=in_definitionenum;
@@ -40,6 +40,28 @@
 	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
 
-	this->weights_enum=in_weights_enum;
-	this->timepassedflag=in_timepassedflag;
+	this->J = 0.;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfdragcoeffabsgrad::Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->J = in_J;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfdragcoeffabsgrad::Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J, bool in_firsttimepassed){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->J = in_J;
+	this->firsttimepassed = in_firsttimepassed;
 }
 /*}}}*/
@@ -50,5 +72,5 @@
 /*Object virtual function resolutoin: */
 Object* Cfdragcoeffabsgrad::copy() {/*{{{*/
-	Cfdragcoeffabsgrad* mf = new Cfdragcoeffabsgrad(this->name,this->definitionenum, this->weights_enum,this->timepassedflag);
+	Cfdragcoeffabsgrad* mf = new Cfdragcoeffabsgrad(this->name,this->definitionenum, this->J, this->firsttimepassed);
 	return (Object*) mf;
 }
@@ -60,6 +82,4 @@
 void Cfdragcoeffabsgrad::Echo(void){/*{{{*/
 	_printf_(" Cfdragcoeffabsgrad: " << name << " " << this->definitionenum << "\n");
-	_printf_("    weights_enum: " << weights_enum << " " << EnumToStringx(weights_enum) << "\n");
-	_printf_("	  timepassedflag: "<<timepassedflag<<"\n");
 }
 /*}}}*/
@@ -76,6 +96,6 @@
 	marshallhandle->call(this->definitionenum);
 	marshallhandle->call(this->name);
-	marshallhandle->call(this->weights_enum);
-	marshallhandle->call(this->timepassedflag);
+	marshallhandle->call(this->J);
+	marshallhandle->call(this->firsttimepassed);
 } 
 /*}}}*/
@@ -99,22 +119,24 @@
 
 	/*recover parameters: */
-	IssmDouble J=0.;
+	IssmDouble J_part=0.;
 	IssmDouble J_sum=0.;
 
-	for(Object* & object : femmodel->elements->objects){
-		Element* element=xDynamicCast<Element*>(object);
-		J+=this->Cfdragcoeffabsgrad_Calculation(element,weights_enum);
-	}
-
-	ISSM_MPI_Allreduce ( (void*)&J,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
-	ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
-	J=J_sum;
-
-	timepassedflag = true;
-	return J;
+	if (!this->firsttimepassed){
+		for(Object* & object : femmodel->elements->objects){
+			Element* element=xDynamicCast<Element*>(object);
+			J_part+=this->Cfdragcoeffabsgrad_Calculation(element);
+		}
+
+		ISSM_MPI_Allreduce ( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+		ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+		this->J=J_sum;
+
+		this->firsttimepassed = true;
+	}
+	return this->J;
 }/*}}}*/
-IssmDouble Cfdragcoeffabsgrad::Cfdragcoeffabsgrad_Calculation(Element* element, int weights_enum){/*{{{*/
-
-	int        domaintype,numcomponents;
+IssmDouble Cfdragcoeffabsgrad::Cfdragcoeffabsgrad_Calculation(Element* element){/*{{{*/
+
+	int        domaintype,numcomponents,frictionlaw;
 	IssmDouble Jelem=0.;
 	IssmDouble Jdet;
@@ -145,5 +167,19 @@
 	/*Get input if it already exists*/
 	DatasetInput *datasetinput = basalelement->GetDatasetInput(definitionenum);  _assert_(datasetinput);
-	Input        *drag_input   = basalelement->GetInput(FrictionCoefficientEnum); _assert_(drag_input);
+	Input        *drag_input   = NULL;
+
+	/* get the friction law: if 2-Weertman, 11-Schoof, 14-RegularizedCoulomb 15-RegularizedCoulomb2, which has a different names of C */
+	element->FindParam(&frictionlaw, FrictionLawEnum);
+	switch(frictionlaw) {
+		case 2:
+		case 11:
+		case 14:
+		case 15:
+			drag_input = basalelement->GetInput(FrictionCEnum); _assert_(drag_input);
+			break;
+		default:
+			drag_input = basalelement->GetInput(FrictionCoefficientEnum); _assert_(drag_input);
+	}
+
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.h
===================================================================
--- /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.h	(revision 28013)
@@ -17,26 +17,28 @@
 
 		int         definitionenum;
-		char*       name;
-		int         weights_enum;
-		bool			timepassedflag;
+		char       *name;
+		bool			firsttimepassed;
+		IssmDouble  J;
 
 		/*Cfdragcoeffabsgrad constructors, destructors :*/
 		Cfdragcoeffabsgrad();
-		Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, int in_weights_enum,  bool timepassedflag);
+		Cfdragcoeffabsgrad(char* in_name, int in_definitionenum);
+		Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J);
+		Cfdragcoeffabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J, bool in_firsttimepassed);
 		~Cfdragcoeffabsgrad();
 
 		/*Object virtual function resolutoin: */
-		Object* copy();
-		void DeepEcho(void);
-		void Echo(void);
-		int Id(void);
-		void Marshall(MarshallHandle* marshallhandle);
-		int ObjectEnum(void);
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
 
 		/*Definition virtual function resolutoin: */
-		int DefinitionEnum();
-		char* Name();
-		IssmDouble Response(FemModel* femmodel);
-		IssmDouble Cfdragcoeffabsgrad_Calculation(Element* element, int weights_enum);
+		int         DefinitionEnum();
+		char       *Name();
+		IssmDouble  Response(FemModel                       *femmodel);
+		IssmDouble  Cfdragcoeffabsgrad_Calculation(Element  *element);
 };
 #endif  /* _CFDRAGCOEFFABSGRAD_H_ */
Index: /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.cpp	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.cpp	(revision 28013)
@@ -0,0 +1,243 @@
+/*!\file Cfdragcoeffabsgradtransient.cpp
+ * \brief: Cfdragcoeffabsgradtransient Object
+ */
+
+/*Headers:*/
+/*{{{*/
+#ifdef HAVE_CONFIG_H
+   #include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./classes.h"
+#include "./ExternalResults/ExternalResult.h"
+#include "./ExternalResults/Results.h"
+#include "../datastructures/datastructures.h"
+#include "./Elements/Element.h"
+#include "./Elements/Elements.h"
+#include "./FemModel.h"
+#include "../modules/SurfaceAreax/SurfaceAreax.h"
+#include "../classes/Params/Parameters.h"
+#include "../classes/gauss/Gauss.h"
+#include "./Inputs/DatasetInput.h"
+/*}}}*/
+
+/*Cfdragcoeffabsgradtransient constructors, destructors :*/
+Cfdragcoeffabsgradtransient::Cfdragcoeffabsgradtransient(){/*{{{*/
+
+	this->definitionenum = -1;
+	this->name = NULL;
+	this->datatimes         = NULL;
+	this->passedflags   = NULL;
+	this->J = 0.;
+}
+/*}}}*/
+Cfdragcoeffabsgradtransient::Cfdragcoeffabsgradtransient(char* in_name, int in_definitionenum, int in_num_datatimes, IssmDouble* in_datatimes, bool* in_passedflags, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+	
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+	xMemCpy<bool>(this->passedflags,in_passedflags,this->num_datatimes);
+
+	this->J = in_J;
+}
+/*}}}*/
+Cfdragcoeffabsgradtransient::Cfdragcoeffabsgradtransient(char* in_name, int in_definitionenum, int in_num_datatimes, IssmDouble* in_datatimes){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+
+	/*initialize passedtimes to false*/
+	for(int i=0;i<this->num_datatimes;i++) this->passedflags[i]= false;
+	this->J = 0;
+}
+/*}}}*/
+Cfdragcoeffabsgradtransient::~Cfdragcoeffabsgradtransient(){/*{{{*/
+	if(this->name)xDelete(this->name);
+	if(this->datatimes) xDelete(this->datatimes);
+	if(this->passedflags) xDelete(this->passedflags);
+}
+/*}}}*/
+/*Object virtual function resolutoin: */
+Object* Cfdragcoeffabsgradtransient::copy() {/*{{{*/
+	Cfdragcoeffabsgradtransient* mf = new Cfdragcoeffabsgradtransient(this->name,this->definitionenum, this->num_datatimes, this->datatimes,this->passedflags, this->J);
+	return (Object*) mf;
+}
+/*}}}*/
+void Cfdragcoeffabsgradtransient::DeepEcho(void){/*{{{*/
+	this->Echo();
+}
+/*}}}*/
+void Cfdragcoeffabsgradtransient::Echo(void){/*{{{*/
+	_printf_(" Cfdragcoeffabsgradtransient: " << name << " " << this->definitionenum << "\n");
+	_error_("not implemented yet");
+}
+/*}}}*/
+int Cfdragcoeffabsgradtransient::Id(void){/*{{{*/
+	return -1;
+}
+/*}}}*/
+void Cfdragcoeffabsgradtransient::Marshall(MarshallHandle* marshallhandle){/*{{{*/
+
+	/*ok, marshall operations: */
+   int object_enum=CfdragcoeffabsgradtransientEnum;
+   marshallhandle->call(object_enum);
+
+	marshallhandle->call(this->definitionenum);
+	marshallhandle->call(this->name);
+	marshallhandle->call(this->num_datatimes);
+   marshallhandle->call(this->datatimes,this->num_datatimes);
+   marshallhandle->call(this->passedflags,this->num_datatimes);
+   marshallhandle->call(this->J);
+} 
+/*}}}*/
+int Cfdragcoeffabsgradtransient::ObjectEnum(void){/*{{{*/
+	return CfdragcoeffabsgradtransientEnum;
+}
+/*}}}*/
+/*Definition virtual function resolutoin: */
+int Cfdragcoeffabsgradtransient::DefinitionEnum(){/*{{{*/
+	return this->definitionenum;
+}
+/*}}}*/
+char* Cfdragcoeffabsgradtransient::Name(){/*{{{*/
+	char* name2=xNew<char>(strlen(this->name)+1);
+	xMemCpy(name2,this->name,strlen(this->name)+1);
+
+	return name2;
+}
+/*}}}*/
+IssmDouble Cfdragcoeffabsgradtransient::Response(FemModel* femmodel){/*{{{*/
+
+	/*recover time parameters: */
+	IssmDouble time;
+	femmodel->parameters->FindParam(&time,TimeEnum);
+	
+	/*Find closest datatime that is less than time*/
+	int pos=-1;
+	for(int i=0;i<this->num_datatimes;i++){
+		if(this->datatimes[i]<=time){
+			pos = i;
+		}
+		else{
+			break;
+		}
+	}
+
+	/*if pos=-1, time is earlier than the first data observation in this dataset*/
+	if(pos==-1){
+		_assert_(this->J==0.);
+		return 0.;
+	}
+
+	/*Check that we have not yet calculated this cost function*/
+	if(this->passedflags[pos]){
+		return this->J;
+	}
+	
+	/*Calculate cost function for this time slice*/
+	IssmDouble J_part=0.;
+	IssmDouble J_sum=0.;
+
+	for(Object* & object : femmodel->elements->objects){
+		Element* element=xDynamicCast<Element*>(object);
+		J_part+=this->Cfdragcoeffabsgradtransient_Calculation(element);
+	}
+
+	ISSM_MPI_Allreduce ( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+	ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+
+	this->passedflags[pos]= true;
+	this->J += J_sum;
+	
+	return this->J;
+}/*}}}*/
+IssmDouble Cfdragcoeffabsgradtransient::Cfdragcoeffabsgradtransient_Calculation(Element* element){/*{{{*/
+
+	int        domaintype,numcomponents,frictionlaw;
+	IssmDouble Jelem=0.;
+	IssmDouble Jdet;
+	IssmDouble dp[2],weight;
+	IssmDouble* xyz_list = NULL;
+
+	/*Get basal element*/
+	if(!element->IsOnBase()) return 0.;
+
+	/*If on water, return 0: */
+	if(!element->IsIceInElement()) return 0.;
+
+	/*Get problem dimension*/
+	element->FindParam(&domaintype,DomainTypeEnum);
+	switch(domaintype){
+		case Domain2DverticalEnum:   numcomponents   = 1; break;
+		case Domain3DEnum:           numcomponents   = 2; break;
+		case Domain2DhorizontalEnum: numcomponents   = 2; break;
+		default: _error_("not supported yet");
+	}
+
+	/*Spawn surface element*/
+	Element* basalelement = element->SpawnBasalElement();
+
+	/* Get node coordinates*/
+	basalelement->GetVerticesCoordinates(&xyz_list);
+
+	/*Get input if it already exists*/
+	DatasetInput *datasetinput = basalelement->GetDatasetInput(definitionenum);  _assert_(datasetinput);
+	Input        *drag_input   = NULL;
+
+	/* get the friction law: if 2-Weertman, 11-Schoof, 14-RegularizedCoulomb 15-RegularizedCoulomb2, which has a different names of C */
+	element->FindParam(&frictionlaw, FrictionLawEnum);
+	switch(frictionlaw) {
+		case 2:
+		case 11:
+		case 14:
+		case 15:
+			drag_input = basalelement->GetInput(FrictionCEnum); _assert_(drag_input);
+			break;
+		default:
+			drag_input = basalelement->GetInput(FrictionCoefficientEnum); _assert_(drag_input);
+	}
+
+
+	/* Start  looping on the number of gaussian points: */
+	Gauss* gauss=basalelement->NewGauss(2);
+	while(gauss->next()){
+
+		/* Get Jacobian determinant: */
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+
+		/*Get all parameters at gaussian point*/
+		datasetinput->GetInputValue(&weight,gauss,WeightsSurfaceObservationEnum);
+		drag_input->GetInputDerivativeValue(&dp[0],xyz_list,gauss);
+
+		/*Add to cost function*/
+		Jelem+=weight*.5*dp[0]*dp[0]*Jdet*gauss->weight;
+		if(numcomponents==2) Jelem+=weight*.5*dp[1]*dp[1]*Jdet*gauss->weight;
+	}
+
+	/*clean up and Return: */
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
+	xDelete<IssmDouble>(xyz_list);
+	delete gauss;
+	return Jelem;
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.h
===================================================================
--- /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.h	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfdragcoeffabsgradtransient.h	(revision 28013)
@@ -0,0 +1,45 @@
+/*!\file Cfdragcoeffabsgradtransient.h
+ * \brief: header file for Cfdragcoeffabsgradtransient object
+ */
+
+#ifndef _CFDRAGCOEFFABSGRADTRANSIENT_H_
+#define _CFDRAGCOEFFABSGRADTRANSIENT_H_
+
+/*Headers:*/
+#include "./Definition.h"
+class FemModel;
+
+IssmDouble OutputDefinitionsResponsex(FemModel* femmodel,int output_enum);
+
+class Cfdragcoeffabsgradtransient: public Object, public Definition{
+
+	public: 
+
+		int         definitionenum;
+		char       *name;
+		int         num_datatimes;
+		IssmDouble *datatimes;
+		bool       *passedflags;
+		IssmDouble  J;
+
+		/*Cfdragcoeffabsgradtransient constructors, destructors :*/
+		Cfdragcoeffabsgradtransient();
+		Cfdragcoeffabsgradtransient(char* in_name, int in_definitionenum, int num_datatimes, IssmDouble* in_datatime);
+		Cfdragcoeffabsgradtransient(char* in_name, int in_definitionenum, int num_datatimes, IssmDouble* in_datatime, bool* in_timepassedflag, IssmDouble in_J);
+		~Cfdragcoeffabsgradtransient();
+
+		/*Object virtual function resolutoin: */
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
+
+		/*Definition virtual function resolutoin: */
+		int         DefinitionEnum();
+		char       *Name();
+		IssmDouble  Response(FemModel                       *femmodel);
+		IssmDouble  Cfdragcoeffabsgradtransient_Calculation(Element  *element);
+};
+#endif  /* _CFDRAGCOEFFABSGRADTRANSIENT_H_ */
Index: /issm/trunk/src/c/classes/Cflevelsetmisfit.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cflevelsetmisfit.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Cflevelsetmisfit.cpp	(revision 28013)
@@ -30,11 +30,10 @@
 	this->name = NULL;
 	this->model_enum = UNDEF;
-	this->observation_enum = UNDEF;
-	this->weights_enum = UNDEF;
 	this->datatime=0.;
 	this->timepassedflag = false;
-}
-/*}}}*/
-Cflevelsetmisfit::Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, int in_observation_enum, int in_weights_enum, IssmDouble in_datatime, bool in_timepassedflag){/*{{{*/
+	this->J = 0.;
+}
+/*}}}*/
+Cflevelsetmisfit::Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime){/*{{{*/
 
 	this->definitionenum=in_definitionenum;
@@ -44,8 +43,20 @@
 
 	this->model_enum=in_model_enum;
-	this->observation_enum=in_observation_enum;
-	this->weights_enum=in_weights_enum;
+	this->datatime=in_datatime;
+	this->timepassedflag=false;
+	this->J = 0.;
+}
+/*}}}*/
+Cflevelsetmisfit::Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime, bool in_timepassedflag, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->model_enum=in_model_enum;
 	this->datatime=in_datatime;
 	this->timepassedflag=in_timepassedflag;
+	this->J = in_J;
 }
 /*}}}*/
@@ -56,5 +67,5 @@
 /*Object virtual function resolutoin: */
 Object* Cflevelsetmisfit::copy() {/*{{{*/
-	Cflevelsetmisfit* mf = new Cflevelsetmisfit(this->name,this->definitionenum, this->model_enum,this->observation_enum,this->weights_enum,this->datatime,this->timepassedflag);
+	Cflevelsetmisfit* mf = new Cflevelsetmisfit(this->name,this->definitionenum, this->model_enum,this->datatime,this->timepassedflag, this->J);
 	return (Object*) mf;
 }
@@ -67,6 +78,4 @@
 	_printf_(" Cflevelsetmisfit: " << name << " " << this->definitionenum << "\n");
 	_printf_("    model_enum: " << model_enum << " " << EnumToStringx(model_enum) << "\n");
-	_printf_("    observation_enum: " << observation_enum << " " << EnumToStringx(observation_enum) << "\n");
-	_printf_("    weights_enum: " << weights_enum << " " << EnumToStringx(weights_enum) << "\n");
 	_printf_("    datatime: " << datatime << "\n");
 	_printf_("	  timepassedflag: "<<timepassedflag<<"\n");
@@ -86,8 +95,7 @@
 	marshallhandle->call(this->model_enum);
 	marshallhandle->call(this->name);
-	marshallhandle->call(this->observation_enum);
-	marshallhandle->call(this->weights_enum);
 	marshallhandle->call(this->datatime);
 	marshallhandle->call(this->timepassedflag);
+	marshallhandle->call(this->J);
 } 
 /*}}}*/
@@ -114,25 +122,24 @@
 	 /*recover time parameters: */
 	 femmodel->parameters->FindParam(&time,TimeEnum);
-
-	 IssmDouble J=0.;
-	 IssmDouble J_sum=0.;
-
 	 if(datatime<=time && !timepassedflag){
+
+		 IssmDouble J_part = 0.;
+		 IssmDouble J_sum  = 0.;
+
 		 for(Object* & object : femmodel->elements->objects){
 			 Element* element=xDynamicCast<Element*>(object);
-			 J+=this->Cflevelsetmisfit_Calculation(element,model_enum,observation_enum,weights_enum);
+			 J_part+=this->Cflevelsetmisfit_Calculation(element,model_enum);
 		 }
 
-		 ISSM_MPI_Allreduce ( (void*)&J,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+		 ISSM_MPI_Allreduce ( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
 		 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
-		 J=J_sum;
-
-		 timepassedflag = true;
-		 return J;
+
+		 this->timepassedflag = true;
+		 this->J = J_sum;
 	 }
-	 else return J;
- }
-	/*}}}*/
-IssmDouble Cflevelsetmisfit::Cflevelsetmisfit_Calculation(Element* element, int model_enum, int observation_enum, int weights_enum){/*{{{*/
+
+	 return this->J;
+ }/*}}}*/
+IssmDouble Cflevelsetmisfit::Cflevelsetmisfit_Calculation(Element* element, int model_enum){/*{{{*/
 
 	int        domaintype,numcomponents;
Index: /issm/trunk/src/c/classes/Cflevelsetmisfit.h
===================================================================
--- /issm/trunk/src/c/classes/Cflevelsetmisfit.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Cflevelsetmisfit.h	(revision 28013)
@@ -18,13 +18,13 @@
 		int         definitionenum;
 		int         model_enum;
-		char*       name;
-		int         observation_enum;
-		int         weights_enum;
-		IssmDouble	datatime;
-		bool			timepassedflag;
+		char       *name;
+		IssmDouble  datatime;
+		bool        timepassedflag;
+		IssmDouble  J;
 
 		/*Cflevelsetmisfit constructors, destructors :*/
 		Cflevelsetmisfit();
-		Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, int in_observation_enum, int in_weights_enum, IssmDouble in_datatime, bool timepassedflag);
+		Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime);
+		Cflevelsetmisfit(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime, bool timepassedflag, IssmDouble in_J);
 		~Cflevelsetmisfit();
 
@@ -41,5 +41,5 @@
 		char* Name();
 		IssmDouble Response(FemModel* femmodel);
-		IssmDouble Cflevelsetmisfit_Calculation(Element* element, int model_enum, int observation_enum, int weights_enum);
+		IssmDouble Cflevelsetmisfit_Calculation(Element* element, int model_enum);
 		IssmDouble Heaviside(IssmDouble x);
 };
Index: /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.cpp	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.cpp	(revision 28013)
@@ -0,0 +1,192 @@
+/*!\file Cfrheologybbarabsgrad.cpp
+ * \brief: Cfrheologybbarabsgrad Object
+ */
+
+/*Headers:*/
+/*{{{*/
+#ifdef HAVE_CONFIG_H
+   #include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./classes.h"
+#include "./ExternalResults/ExternalResult.h"
+#include "./ExternalResults/Results.h"
+#include "../datastructures/datastructures.h"
+#include "./Elements/Element.h"
+#include "./Elements/Elements.h"
+#include "./FemModel.h"
+#include "../modules/SurfaceAreax/SurfaceAreax.h"
+#include "../classes/Params/Parameters.h"
+#include "../classes/gauss/Gauss.h"
+#include "./Inputs/DatasetInput.h"
+/*}}}*/
+
+/*Cfrheologybbarabsgrad constructors, destructors :*/
+Cfrheologybbarabsgrad::Cfrheologybbarabsgrad(){/*{{{*/
+
+	this->definitionenum = -1;
+	this->name = NULL;
+	this->J = 0.;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfrheologybbarabsgrad::Cfrheologybbarabsgrad(char* in_name, int in_definitionenum){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->J=0;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfrheologybbarabsgrad::Cfrheologybbarabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->J=in_J;
+	this->firsttimepassed = false;
+}
+/*}}}*/
+Cfrheologybbarabsgrad::Cfrheologybbarabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J, bool in_firsttimepassed){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->J=in_J;
+	this->firsttimepassed = in_firsttimepassed;
+}
+/*}}}*/
+Cfrheologybbarabsgrad::~Cfrheologybbarabsgrad(){/*{{{*/
+	if(this->name)xDelete(this->name);
+}
+/*}}}*/
+/*Object virtual function resolutoin: */
+Object* Cfrheologybbarabsgrad::copy() {/*{{{*/
+	Cfrheologybbarabsgrad* mf = new Cfrheologybbarabsgrad(this->name,this->definitionenum, this->J, this->firsttimepassed);
+	return (Object*) mf;
+}
+/*}}}*/
+void Cfrheologybbarabsgrad::DeepEcho(void){/*{{{*/
+	this->Echo();
+}
+/*}}}*/
+void Cfrheologybbarabsgrad::Echo(void){/*{{{*/
+	_printf_(" Cfrheologybbarabsgrad: " << name << " " << this->definitionenum << "\n");
+}
+/*}}}*/
+int Cfrheologybbarabsgrad::Id(void){/*{{{*/
+	return -1;
+}
+/*}}}*/
+void Cfrheologybbarabsgrad::Marshall(MarshallHandle* marshallhandle){/*{{{*/
+	
+	/*ok, marshall operations: */
+	int object_enum=CfrheologybbarabsgradEnum;
+	marshallhandle->call(object_enum);
+
+	marshallhandle->call(this->definitionenum);
+	marshallhandle->call(this->name);
+	marshallhandle->call(this->J);
+	marshallhandle->call(this->firsttimepassed);
+} 
+/*}}}*/
+int Cfrheologybbarabsgrad::ObjectEnum(void){/*{{{*/
+	return CfrheologybbarabsgradEnum;
+}
+/*}}}*/
+/*Definition virtual function resolutoin: */
+int Cfrheologybbarabsgrad::DefinitionEnum(){/*{{{*/
+	return this->definitionenum;
+}
+/*}}}*/
+char* Cfrheologybbarabsgrad::Name(){/*{{{*/
+	char* name2=xNew<char>(strlen(this->name)+1);
+	xMemCpy(name2,this->name,strlen(this->name)+1);
+
+	return name2;
+}
+/*}}}*/
+IssmDouble Cfrheologybbarabsgrad::Response(FemModel* femmodel){/*{{{*/
+
+	/*recover parameters: */
+	IssmDouble J_part=0.;
+	IssmDouble J_sum=0.;
+
+	if (!this->firsttimepassed){
+		for(Object* & object : femmodel->elements->objects){
+			Element* element=xDynamicCast<Element*>(object);
+			J_part+=this->Cfrheologybbarabsgrad_Calculation(element);
+		}
+
+		ISSM_MPI_Allreduce( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+		ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+		this->J = J_sum;
+
+		this->firsttimepassed = true;
+	}
+	return this->J;
+}/*}}}*/
+IssmDouble Cfrheologybbarabsgrad::Cfrheologybbarabsgrad_Calculation(Element* element){/*{{{*/
+
+	int        domaintype,numcomponents;
+	IssmDouble Jelem=0.;
+	IssmDouble Jdet;
+	IssmDouble dp[2],weight;
+	IssmDouble* xyz_list = NULL;
+
+	/*Get basal element*/
+	if(!element->IsOnBase()) return 0.;
+
+	/*If on water, return 0: */
+	if(!element->IsIceInElement()) return 0.;
+
+	/*Get problem dimension*/
+	element->FindParam(&domaintype,DomainTypeEnum);
+	switch(domaintype){
+		case Domain2DverticalEnum:   numcomponents   = 1; break;
+		case Domain3DEnum:           numcomponents   = 2; break;
+		case Domain2DhorizontalEnum: numcomponents   = 2; break;
+		default: _error_("not supported yet");
+	}
+
+	/*Spawn surface element*/
+	Element* basalelement = element->SpawnBasalElement();
+
+	/* Get node coordinates*/
+	basalelement->GetVerticesCoordinates(&xyz_list);
+
+	/*Get input if it already exists*/
+	DatasetInput *datasetinput = basalelement->GetDatasetInput(definitionenum);  _assert_(datasetinput);
+	Input* rheologyb_input=basalelement->GetInput(MaterialsRheologyBbarEnum);                  _assert_(rheologyb_input);
+
+	/* Start  looping on the number of gaussian points: */
+	Gauss* gauss=basalelement->NewGauss(2);
+	while(gauss->next()){
+
+		/* Get Jacobian determinant: */
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+
+		/*Get all parameters at gaussian point*/
+		datasetinput->GetInputValue(&weight,gauss,WeightsSurfaceObservationEnum);
+		rheologyb_input->GetInputDerivativeValue(&dp[0],xyz_list,gauss);
+
+		/*Add to cost function*/
+		Jelem+=weight*.5*dp[0]*dp[0]*Jdet*gauss->weight;
+		if(numcomponents==2) Jelem+=weight*.5*dp[1]*dp[1]*Jdet*gauss->weight;
+	}
+
+	/*clean up and Return: */
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
+	xDelete<IssmDouble>(xyz_list);
+	delete gauss;
+	return Jelem;
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.h
===================================================================
--- /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.h	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfrheologybbarabsgrad.h	(revision 28013)
@@ -0,0 +1,45 @@
+/*!\file Cfrheologybbarabsgrad.h
+ * \brief: header file for Cfrheologybbarabsgrad object
+ */
+
+#ifndef _CFRHEOLOGYBBARABSGRAD_H_
+#define _CFRHEOLOGYBBARABSGRAD_H_
+
+/*Headers:*/
+#include "./Definition.h"
+#include "./FemModel.h"
+
+IssmDouble OutputDefinitionsResponsex(FemModel* femmodel,int output_enum);
+void  GetVectorFromInputsx( IssmDouble** pvector, int* pvector_size, FemModel* femmodel,int name);
+
+class Cfrheologybbarabsgrad: public Object, public Definition{
+
+	public: 
+
+		int         definitionenum;
+		char*       name;
+		bool			firsttimepassed;
+		IssmDouble  J;
+
+		/*Cfrheologybbarabsgrad constructors, destructors :*/
+		Cfrheologybbarabsgrad();
+		Cfrheologybbarabsgrad(char* in_name, int in_definitionenum);
+		Cfrheologybbarabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J);
+		Cfrheologybbarabsgrad(char* in_name, int in_definitionenum, IssmDouble in_J, bool in_firsttimepassed);
+		~Cfrheologybbarabsgrad();
+
+		/*Object virtual function resolutoin: */
+		Object* copy();
+		void DeepEcho(void);
+		void Echo(void);
+		int Id(void);
+		void Marshall(MarshallHandle* marshallhandle);
+		int ObjectEnum(void);
+
+		/*Definition virtual function resolutoin: */
+		int DefinitionEnum();
+		char* Name();
+		IssmDouble Response(FemModel* femmodel);
+		IssmDouble Cfrheologybbarabsgrad_Calculation(Element* element);
+};
+#endif  /* _CFRHEOLOGYBBARABSGRAD_H_ */
Index: /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.cpp	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.cpp	(revision 28013)
@@ -0,0 +1,227 @@
+/*!\file Cfrheologybbarabsgradtransient.cpp
+ * \brief: Cfrheologybbarabsgradtransient Object
+ */
+
+/*Headers:*/
+/*{{{*/
+#ifdef HAVE_CONFIG_H
+   #include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./classes.h"
+#include "./ExternalResults/ExternalResult.h"
+#include "./ExternalResults/Results.h"
+#include "../datastructures/datastructures.h"
+#include "./Elements/Element.h"
+#include "./Elements/Elements.h"
+#include "./FemModel.h"
+#include "../modules/SurfaceAreax/SurfaceAreax.h"
+#include "../classes/Params/Parameters.h"
+#include "../classes/gauss/Gauss.h"
+#include "./Inputs/DatasetInput.h"
+/*}}}*/
+
+/*Cfrheologybbarabsgradtransient constructors, destructors :*/
+Cfrheologybbarabsgradtransient::Cfrheologybbarabsgradtransient(){/*{{{*/
+
+	this->definitionenum = -1;
+	this->name = NULL;
+	this->datatimes         = NULL;
+	this->passedflags   = NULL;
+	this->J = 0.;
+}
+/*}}}*/
+Cfrheologybbarabsgradtransient::Cfrheologybbarabsgradtransient(char* in_name, int in_definitionenum, int in_num_datatimes, IssmDouble* in_datatimes, bool* in_passedflags, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+	xMemCpy<bool>(this->passedflags,in_passedflags,this->num_datatimes);
+
+	this->J = in_J;
+}
+/*}}}*/
+Cfrheologybbarabsgradtransient::Cfrheologybbarabsgradtransient(char* in_name, int in_definitionenum, int in_num_datatimes, IssmDouble* in_datatimes){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+
+	/*initialize passedtimes to false*/
+	for(int i=0;i<this->num_datatimes;i++) this->passedflags[i]= false;
+	this->J = 0;
+}
+/*}}}*/
+Cfrheologybbarabsgradtransient::~Cfrheologybbarabsgradtransient(){/*{{{*/
+	if(this->name)xDelete(this->name);
+}
+/*}}}*/
+/*Object virtual function resolutoin: */
+Object* Cfrheologybbarabsgradtransient::copy() {/*{{{*/
+	Cfrheologybbarabsgradtransient* mf = new Cfrheologybbarabsgradtransient(this->name,this->definitionenum, this->num_datatimes, this->datatimes, this->passedflags, this->J);
+	return (Object*) mf;
+}
+/*}}}*/
+void Cfrheologybbarabsgradtransient::DeepEcho(void){/*{{{*/
+	this->Echo();
+}
+/*}}}*/
+void Cfrheologybbarabsgradtransient::Echo(void){/*{{{*/
+	_printf_(" Cfrheologybbarabsgradtransient: " << name << " " << this->definitionenum << "\n");
+	_error_("not implemented yet");
+}
+/*}}}*/
+int Cfrheologybbarabsgradtransient::Id(void){/*{{{*/
+	return -1;
+}
+/*}}}*/
+void Cfrheologybbarabsgradtransient::Marshall(MarshallHandle* marshallhandle){/*{{{*/
+	
+	/*ok, marshall operations: */
+	int object_enum=CfrheologybbarabsgradtransientEnum;
+	marshallhandle->call(object_enum);
+
+	marshallhandle->call(this->definitionenum);
+	marshallhandle->call(this->name);
+	marshallhandle->call(this->num_datatimes);
+   marshallhandle->call(this->datatimes,this->num_datatimes);
+   marshallhandle->call(this->passedflags,this->num_datatimes);
+   marshallhandle->call(this->J);
+} 
+/*}}}*/
+int Cfrheologybbarabsgradtransient::ObjectEnum(void){/*{{{*/
+	return CfrheologybbarabsgradtransientEnum;
+}
+/*}}}*/
+/*Definition virtual function resolutoin: */
+int Cfrheologybbarabsgradtransient::DefinitionEnum(){/*{{{*/
+	return this->definitionenum;
+}
+/*}}}*/
+char* Cfrheologybbarabsgradtransient::Name(){/*{{{*/
+	char* name2=xNew<char>(strlen(this->name)+1);
+	xMemCpy(name2,this->name,strlen(this->name)+1);
+
+	return name2;
+}
+/*}}}*/
+IssmDouble Cfrheologybbarabsgradtransient::Response(FemModel* femmodel){/*{{{*/
+
+	/*recover time parameters: */
+	IssmDouble time;
+	femmodel->parameters->FindParam(&time,TimeEnum);
+	
+	/*Find closest datatime that is less than time*/
+	int pos=-1;
+	for(int i=0;i<this->num_datatimes;i++){
+		if(this->datatimes[i]<=time){
+			pos = i;
+		}
+		else{
+			break;
+		}
+	}
+
+	/*if pos=-1, time is earlier than the first data observation in this dataset*/
+	if(pos==-1){
+		_assert_(this->J==0.);
+		return 0.;
+	}
+	
+	/*Check that we have not yet calculated this cost function*/
+	if(this->passedflags[pos]){
+		return this->J;
+	}
+	
+	/*Calculate cost function for this time slice*/
+	IssmDouble J_part=0.;
+	IssmDouble J_sum=0.;
+
+	for(Object* & object : femmodel->elements->objects){
+		Element* element=xDynamicCast<Element*>(object);
+		J_part+=this->Cfrheologybbarabsgradtransient_Calculation(element);
+	}
+
+	ISSM_MPI_Allreduce( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+	ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+
+	this->passedflags[pos]= true;
+	this->J += J_sum;
+
+	return this->J;
+}/*}}}*/
+IssmDouble Cfrheologybbarabsgradtransient::Cfrheologybbarabsgradtransient_Calculation(Element* element){/*{{{*/
+
+	int        domaintype,numcomponents;
+	IssmDouble Jelem=0.;
+	IssmDouble Jdet;
+	IssmDouble dp[2],weight;
+	IssmDouble* xyz_list = NULL;
+
+	/*Get basal element*/
+	if(!element->IsOnBase()) return 0.;
+
+	/*If on water, return 0: */
+	if(!element->IsIceInElement()) return 0.;
+
+	/*Get problem dimension*/
+	element->FindParam(&domaintype,DomainTypeEnum);
+	switch(domaintype){
+		case Domain2DverticalEnum:   numcomponents   = 1; break;
+		case Domain3DEnum:           numcomponents   = 2; break;
+		case Domain2DhorizontalEnum: numcomponents   = 2; break;
+		default: _error_("not supported yet");
+	}
+
+	/*Spawn surface element*/
+	Element* basalelement = element->SpawnBasalElement();
+
+	/* Get node coordinates*/
+	basalelement->GetVerticesCoordinates(&xyz_list);
+
+	/*Get input if it already exists*/
+	DatasetInput *datasetinput = basalelement->GetDatasetInput(definitionenum);  _assert_(datasetinput);
+	Input* rheologyb_input=basalelement->GetInput(MaterialsRheologyBbarEnum);                  _assert_(rheologyb_input);
+
+	/* Start  looping on the number of gaussian points: */
+	Gauss* gauss=basalelement->NewGauss(2);
+	while(gauss->next()){
+
+		/* Get Jacobian determinant: */
+		basalelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+
+		/*Get all parameters at gaussian point*/
+		datasetinput->GetInputValue(&weight,gauss,WeightsSurfaceObservationEnum);
+		rheologyb_input->GetInputDerivativeValue(&dp[0],xyz_list,gauss);
+
+		/*Add to cost function*/
+		Jelem+=weight*.5*dp[0]*dp[0]*Jdet*gauss->weight;
+		if(numcomponents==2) Jelem+=weight*.5*dp[1]*dp[1]*Jdet*gauss->weight;
+	}
+
+	/*clean up and Return: */
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
+	xDelete<IssmDouble>(xyz_list);
+	delete gauss;
+	return Jelem;
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.h
===================================================================
--- /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.h	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfrheologybbarabsgradtransient.h	(revision 28013)
@@ -0,0 +1,45 @@
+/*!\file Cfrheologybbarabsgradtransient.h
+ * \brief: header file for Cfrheologybbarabsgradtransient object
+ */
+
+#ifndef _CFRHEOLOGYBBARABSGRADTRANSIENT_H_
+#define _CFRHEOLOGYBBARABSGRADTRANSIENT_H_
+
+/*Headers:*/
+#include "./Definition.h"
+class FemModel;
+
+IssmDouble OutputDefinitionsResponsex(FemModel* femmodel,int output_enum);
+
+class Cfrheologybbarabsgradtransient: public Object, public Definition{
+
+	public: 
+
+		int         definitionenum;
+		char       *name;
+		int         num_datatimes;
+		IssmDouble *datatimes;
+		bool       *passedflags;
+		IssmDouble  J;
+
+		/*Cfrheologybbarabsgradtransient constructors, destructors :*/
+		Cfrheologybbarabsgradtransient();
+		Cfrheologybbarabsgradtransient(char* in_name, int in_definitionenum, int num_datatimes, IssmDouble* in_datatime);
+		Cfrheologybbarabsgradtransient(char* in_name, int in_definitionenum, int num_datatimes, IssmDouble* in_datatime, bool* in_timepassedflag, IssmDouble in_J);
+		~Cfrheologybbarabsgradtransient();
+
+		/*Object virtual function resolutoin: */
+		Object* copy();
+		void DeepEcho(void);
+		void Echo(void);
+		int Id(void);
+		void Marshall(MarshallHandle* marshallhandle);
+		int ObjectEnum(void);
+
+		/*Definition virtual function resolutoin: */
+		int DefinitionEnum();
+		char* Name();
+		IssmDouble Response(FemModel* femmodel);
+		IssmDouble Cfrheologybbarabsgradtransient_Calculation(Element* element);
+};
+#endif  /* _CFRHEOLOGYBBARABSGRADTRANSIENT_H_ */
Index: /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp	(revision 28013)
@@ -31,8 +31,9 @@
 	this->datatime=0.;
 	this->timepassedflag = false;
-
-}
-/*}}}*/
-Cfsurfacelogvel::Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime, bool in_timepassedflag){/*{{{*/
+	this->J = 0.;
+
+}
+/*}}}*/
+Cfsurfacelogvel::Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime){/*{{{*/
 
 	this->definitionenum=in_definitionenum;
@@ -42,5 +43,20 @@
 
 	this->datatime=in_datatime;
+
+	this->timepassedflag=false;
+	this->J=0.;
+
+}
+/*}}}*/
+Cfsurfacelogvel::Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime, bool in_timepassedflag,IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->datatime=in_datatime;
 	this->timepassedflag=in_timepassedflag;
+	this->J=in_J;
 
 }
@@ -52,5 +68,5 @@
 /*Object virtual function resolutoin: */
 Object* Cfsurfacelogvel::copy() {/*{{{*/
-	Cfsurfacelogvel* mf = new Cfsurfacelogvel(this->name,this->definitionenum,this->datatime,this->timepassedflag);
+	Cfsurfacelogvel* mf = new Cfsurfacelogvel(this->name,this->definitionenum,this->datatime,this->timepassedflag, this->J);
 	return (Object*) mf;
 }
@@ -64,4 +80,5 @@
 	_printf_("    datatime: " << datatime << "\n");
 	_printf_("	  timepassedflag: "<<timepassedflag<<"\n");
+	_printf_("	  J: "<<J<<"\n");
 }
 /*}}}*/
@@ -78,4 +95,5 @@
 	marshallhandle->call(this->datatime);
 	marshallhandle->call(this->timepassedflag);
+	marshallhandle->call(this->J);
 } 
 /*}}}*/
@@ -104,22 +122,20 @@
 	if(this->datatime<=time && !this->timepassedflag){
 
-		IssmDouble J=0.;
+		IssmDouble J_part=0.;
 		IssmDouble J_sum=0.;
 
 		for(Object* & object : femmodel->elements->objects){
 			Element* element=xDynamicCast<Element*>(object);
-			J+=this->Cfsurfacelogvel_Calculation(element,definitionenum);
-		}
-
-		ISSM_MPI_Allreduce ( (void*)&J,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+			J_part+=this->Cfsurfacelogvel_Calculation(element,definitionenum);
+		}
+
+		ISSM_MPI_Allreduce ( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
 		ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
-		J=J_sum;
 
 		this->timepassedflag = true;
-		return J_sum;
-	}
-	else{
-		return 0.;
-	}
+		this->J = J_sum;
+	}
+
+	return this->J;
 }/*}}}*/
 IssmDouble Cfsurfacelogvel::Cfsurfacelogvel_Calculation(Element* element, int definitionenum){/*{{{*/
Index: /issm/trunk/src/c/classes/Cfsurfacelogvel.h
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacelogvel.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfsurfacelogvel.h	(revision 28013)
@@ -20,17 +20,19 @@
 		IssmDouble	datatime;
 		bool			timepassedflag;
+		IssmDouble  J;
 
 		/*Cfsurfacelogvel constructors, destructors :*/
 		Cfsurfacelogvel();
-		Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime, bool timepassedflag);
+		Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime);
+		Cfsurfacelogvel(char* in_name, int in_definitionenum, IssmDouble in_datatime, bool timepassedflag, IssmDouble in_J);
 		~Cfsurfacelogvel();
 
 		/*Object virtual function resolutoin: */
-		Object* copy();
-		void DeepEcho(void);
-		void Echo(void);
-		int Id(void);
-		void Marshall(MarshallHandle* marshallhandle);
-		int ObjectEnum(void);
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
 
 		/*Definition virtual function resolutoin: */
Index: /issm/trunk/src/c/classes/Cfsurfacesquare.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacesquare.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfsurfacesquare.cpp	(revision 28013)
@@ -30,11 +30,10 @@
 	this->name             = NULL;
 	this->model_enum       = UNDEF;
-	this->observation_enum = UNDEF;
-	this->weights_enum     = UNDEF;
 	this->datatime         = 0.;
 	this->timepassedflag   = false;
-}
-/*}}}*/
-Cfsurfacesquare::Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, int in_observation_enum, int in_weights_enum, IssmDouble in_datatime, bool in_timepassedflag){/*{{{*/
+	this->J                = 0.;
+}
+/*}}}*/
+Cfsurfacesquare::Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime){/*{{{*/
 
 	this->definitionenum=in_definitionenum;
@@ -44,8 +43,20 @@
 
 	this->model_enum=in_model_enum;
-	this->observation_enum=in_observation_enum;
-	this->weights_enum=in_weights_enum;
+	this->datatime=in_datatime;
+	this->timepassedflag=false;
+	this->J=0.;
+}
+/*}}}*/
+Cfsurfacesquare::Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime, bool in_timepassedflag, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name		= xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->model_enum=in_model_enum;
 	this->datatime=in_datatime;
 	this->timepassedflag=in_timepassedflag;
+	this->J=in_J;
 }
 /*}}}*/
@@ -57,5 +68,5 @@
 /*Object virtual function resolutoin: */
 Object* Cfsurfacesquare::copy() {/*{{{*/
-	Cfsurfacesquare* mf = new Cfsurfacesquare(this->name,this->definitionenum, this->model_enum,this->observation_enum,this->weights_enum,this->datatime,this->timepassedflag);
+	Cfsurfacesquare* mf = new Cfsurfacesquare(this->name,this->definitionenum, this->model_enum,this->datatime,this->timepassedflag,this->J);
 	return (Object*) mf;
 }
@@ -68,8 +79,7 @@
 	_printf_(" Cfsurfacesquare: " << name << " " << this->definitionenum << "\n");
 	_printf_("    model_enum: " << model_enum << " " << EnumToStringx(model_enum) << "\n");
-	_printf_("    observation_enum: " << observation_enum << " " << EnumToStringx(observation_enum) << "\n");
-	_printf_("    weights_enum: " << weights_enum << " " << EnumToStringx(weights_enum) << "\n");
 	_printf_("    datatime: " << datatime << "\n");
 	_printf_("	  timepassedflag: "<<timepassedflag<<"\n");
+	_printf_("	  J: "<<J<<"\n");
 }
 /*}}}*/
@@ -84,11 +94,9 @@
 
 	marshallhandle->call(this->definitionenum);
-	marshallhandle->call(this->local);
 	marshallhandle->call(this->model_enum);
 	marshallhandle->call(this->name);
-	marshallhandle->call(this->observation_enum);
-	marshallhandle->call(this->weights_enum);
 	marshallhandle->call(this->datatime);
 	marshallhandle->call(this->timepassedflag);
+	marshallhandle->call(this->J);
 } 
 /*}}}*/
@@ -119,24 +127,23 @@
 	if(this->datatime<=time && !this->timepassedflag){
 
-		IssmDouble J=0.;
+		IssmDouble J_part=0.;
 		IssmDouble J_sum=0.;
 
 		for(Object* & object : femmodel->elements->objects){
 			Element* element=xDynamicCast<Element*>(object);
-			J+=this->Cfsurfacesquare_Calculation(element,model_enum,observation_enum,weights_enum);
+			J_part+=this->Cfsurfacesquare_Calculation(element,model_enum);
 		}
 
-		ISSM_MPI_Allreduce ( (void*)&J,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+		ISSM_MPI_Allreduce ( (void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
 		ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
 
 		this->timepassedflag = true;
-		return J_sum;
+		this->J = J_sum;
 	}
-	else{
-		return 0.;
-	}
-}
-/*}}}*/
-IssmDouble Cfsurfacesquare::Cfsurfacesquare_Calculation(Element* element, int model_enum, int observation_enum, int weights_enum){/*{{{*/
+
+	return this->J;
+}
+/*}}}*/
+IssmDouble Cfsurfacesquare::Cfsurfacesquare_Calculation(Element* element, int model_enum){/*{{{*/
 
 	int        domaintype,numcomponents;
Index: /issm/trunk/src/c/classes/Cfsurfacesquare.h
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacesquare.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Cfsurfacesquare.h	(revision 28013)
@@ -17,30 +17,29 @@
 
 		int         definitionenum;
-		int         local;     
 		int         model_enum;
-		char*       name;
-		int         observation_enum;
-		int         weights_enum;
-		IssmDouble	datatime;
-		bool			timepassedflag;
+		char       *name;
+		IssmDouble  datatime;
+		bool        timepassedflag;
+		IssmDouble  J;
 
 		/*Cfsurfacesquare constructors, destructors :*/
 		Cfsurfacesquare();
-		Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, int in_observation_enum, int in_weights_enum, IssmDouble in_datatime, bool timepassedflag);
+		Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime);
+		Cfsurfacesquare(char* in_name, int in_definitionenum, int in_model_enum, IssmDouble in_datatime, bool timepassedflag, IssmDouble in_J);
 		~Cfsurfacesquare();
 
 		/*Object virtual function resolutoin: */
-		Object* copy();
-		void DeepEcho(void);
-		void Echo(void);
-		int Id(void);
-		void Marshall(MarshallHandle* marshallhandle);
-		int ObjectEnum(void);
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
 
 		/*Definition virtual function resolutoin: */
-		int DefinitionEnum();
-		char* Name();
-		IssmDouble Response(FemModel* femmodel);
-		IssmDouble Cfsurfacesquare_Calculation(Element* element, int model_enum, int observation_enum, int weights_enum);
+		int         DefinitionEnum();
+		char       *Name();
+		IssmDouble  Response(FemModel                    *femmodel);
+		IssmDouble  Cfsurfacesquare_Calculation(Element  *element, int model_enum);
 };
 #endif  /* _CFSURFACESQUARE_H_ */
Index: /issm/trunk/src/c/classes/Cfsurfacesquaretransient.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacesquaretransient.cpp	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfsurfacesquaretransient.cpp	(revision 28013)
@@ -0,0 +1,235 @@
+/*!\file Cfsurfacesquaretransient.cpp
+ * \brief: Cfsurfacesquaretransient Object
+ */
+
+/*Headers:*/
+/*{{{*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./classes.h"
+#include "./ExternalResults/ExternalResult.h"
+#include "./ExternalResults/Results.h"
+#include "../datastructures/datastructures.h"
+#include "./Elements/Element.h"
+#include "./Elements/Elements.h"
+#include "./FemModel.h"
+#include "../modules/SurfaceAreax/SurfaceAreax.h"
+#include "../classes/Params/Parameters.h"
+#include "../classes/gauss/Gauss.h"
+#include "./Inputs/DatasetInput.h"
+/*}}}*/
+
+/*Cfsurfacesquaretransient constructors, destructors :*/
+Cfsurfacesquaretransient::Cfsurfacesquaretransient(){/*{{{*/
+
+	this->definitionenum = -1;
+	this->name           = NULL;
+	this->model_enum     = UNDEF;
+	this->datatimes      = NULL;
+	this->passedflags    = NULL;
+	this->J              = 0.;
+}
+/*}}}*/
+Cfsurfacesquaretransient::Cfsurfacesquaretransient(char* in_name, int in_definitionenum, int in_model_enum, int in_num_datatimes, IssmDouble* in_datatimes, bool* in_passedflags, IssmDouble in_J){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name = xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->model_enum=in_model_enum;
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+	xMemCpy<bool>(this->passedflags,in_passedflags,this->num_datatimes);
+
+	this->J = in_J;
+}
+/*}}}*/
+Cfsurfacesquaretransient::Cfsurfacesquaretransient(char* in_name, int in_definitionenum, int in_model_enum, int in_num_datatimes, IssmDouble* in_datatimes){/*{{{*/
+
+	this->definitionenum=in_definitionenum;
+
+	this->name = xNew<char>(strlen(in_name)+1);
+	xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
+
+	this->model_enum=in_model_enum;
+	this->num_datatimes = in_num_datatimes;
+
+	/*Allocate arrays*/
+	_assert_(this->num_datatimes>0);
+	this->datatimes   = xNew<IssmDouble>(this->num_datatimes);
+	this->passedflags = xNew<bool>(this->num_datatimes);
+	xMemCpy<IssmDouble>(this->datatimes,in_datatimes,this->num_datatimes);
+
+	/*initialize passedtimes to false*/
+	for(int i=0;i<this->num_datatimes;i++) this->passedflags[i]= false;
+	this->J = 0;
+}
+/*}}}*/
+Cfsurfacesquaretransient::~Cfsurfacesquaretransient(){/*{{{*/
+	if(this->name) xDelete(this->name);
+	if(this->datatimes) xDelete(this->datatimes);
+	if(this->passedflags) xDelete(this->passedflags);
+}
+/*}}}*/
+
+/*Object virtual function resolutoin: */
+Object* Cfsurfacesquaretransient::copy() {/*{{{*/
+	Cfsurfacesquaretransient* output = new Cfsurfacesquaretransient(this->name,this->definitionenum, this->model_enum, this->num_datatimes, this->datatimes,this->passedflags, this->J);
+	return (Object*)output;
+}
+/*}}}*/
+void Cfsurfacesquaretransient::DeepEcho(void){/*{{{*/
+	this->Echo();
+}
+/*}}}*/
+void Cfsurfacesquaretransient::Echo(void){/*{{{*/
+	_printf_(" Cfsurfacesquaretransient: " << name << " " << this->definitionenum << "\n");
+	_printf_("    model_enum: " << model_enum << " " << EnumToStringx(model_enum) << "\n");
+	_error_("not implemented yet");
+}
+/*}}}*/
+int Cfsurfacesquaretransient::Id(void){/*{{{*/
+	return -1;
+}
+/*}}}*/
+void Cfsurfacesquaretransient::Marshall(MarshallHandle* marshallhandle){/*{{{*/
+
+	int object_enum=CfsurfacesquaretransientEnum;
+	marshallhandle->call(object_enum);
+
+	marshallhandle->call(this->definitionenum);
+	marshallhandle->call(this->model_enum);
+	marshallhandle->call(this->name);
+	marshallhandle->call(this->num_datatimes);
+	marshallhandle->call(this->datatimes,this->num_datatimes);
+	marshallhandle->call(this->passedflags,this->num_datatimes);
+	marshallhandle->call(this->J);
+} 
+/*}}}*/
+int Cfsurfacesquaretransient::ObjectEnum(void){/*{{{*/
+	return CfsurfacesquaretransientEnum;
+}
+/*}}}*/
+
+/*Definition virtual function resolutoin: */
+int Cfsurfacesquaretransient::DefinitionEnum(){/*{{{*/
+	return this->definitionenum;
+}
+/*}}}*/
+char* Cfsurfacesquaretransient::Name(){/*{{{*/
+	char* name2=xNew<char>(strlen(this->name)+1);
+	xMemCpy(name2,this->name,strlen(this->name)+1);
+
+	return name2;
+}
+/*}}}*/
+IssmDouble Cfsurfacesquaretransient::Response(FemModel* femmodel){/*{{{*/
+
+	/*recover model time parameters: */
+	IssmDouble time;
+	femmodel->parameters->FindParam(&time,TimeEnum);
+
+	/*Find closest datatime that is less than time*/
+	int pos=-1;
+	for(int i=0;i<this->num_datatimes;i++){
+		if(this->datatimes[i]<=time){
+			pos = i;
+		}
+		else{
+			break;
+		}
+	}
+
+	/*if pos=-1, time is earlier than the first data observation in this dataset*/
+	if(pos==-1){
+		_assert_(this->J==0.);
+		return 0.;
+	}
+
+	/*Check that we have not yet calculated this cost function*/
+	if(this->passedflags[pos]){
+		return this->J;
+	}
+
+	/*Calculate cost function for this time slice*/
+	IssmDouble J_part=0.;
+	for(Object* & object : femmodel->elements->objects){
+		Element* element=xDynamicCast<Element*>(object);
+		J_part+=this->Cfsurfacesquaretransient_Calculation(element,model_enum);
+	}
+
+	/*Sum across partition*/
+	IssmDouble J_sum;
+	ISSM_MPI_Allreduce((void*)&J_part,(void*)&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+	ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+
+	/*Record this cost function so that we do not recalculate it later*/
+	this->passedflags[pos]= true;
+	this->J += J_sum;
+
+	/*Return full cost function this far*/
+	return this->J;
+}/*}}}*/
+IssmDouble Cfsurfacesquaretransient::Cfsurfacesquaretransient_Calculation(Element* element, int model_enum){/*{{{*/
+
+	IssmDouble Jelem=0.;
+	IssmDouble misfit,Jdet;
+	IssmDouble model,obs,weight;
+	IssmDouble* xyz_list = NULL;
+
+	/*Get basal element*/
+	if(!element->IsOnSurface()) return 0.;
+
+	/*If on water, return 0: */
+	if(!element->IsIceInElement()) return 0.;
+
+	/*Spawn surface element*/
+	Element* topelement = element->SpawnTopElement();
+
+	/* Get node coordinates*/
+	topelement->GetVerticesCoordinates(&xyz_list);
+
+	/*Retrieve all inputs we will be needing: */
+	DatasetInput *datasetinput = topelement->GetDatasetInput(definitionenum); _assert_(datasetinput);
+	Input        *model_input  = topelement->GetInput(model_enum);            _assert_(model_input);
+
+	/* Start  looping on the number of gaussian points: */
+	Gauss* gauss=topelement->NewGauss(2);
+	while(gauss->next()){
+
+		/* Get Jacobian determinant: */
+		topelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
+
+		/*Get all parameters at gaussian point*/
+		datasetinput->GetInputValue(&weight,gauss,WeightsSurfaceObservationEnum);
+		datasetinput->GetInputValue(&obs,gauss,SurfaceObservationEnum);
+		model_input->GetInputValue(&model,gauss);
+
+		/*Compute Misfit
+		 *     
+		 *       1  [           2 ]
+		 *  J = --- | (x - x   )  |
+		 *       2  [       obs   ]
+		 **/
+		misfit=0.5*(model-obs)*(model-obs);
+
+		/*Add to cost function*/
+		Jelem+=misfit*weight*Jdet*gauss->weight;
+	}
+
+	/*clean up and Return: */
+	if(topelement->IsSpawnedElement()){topelement->DeleteMaterials(); delete topelement;};
+	xDelete<IssmDouble>(xyz_list);
+	delete gauss;
+	return Jelem;
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Cfsurfacesquaretransient.h
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacesquaretransient.h	(revision 28013)
+++ /issm/trunk/src/c/classes/Cfsurfacesquaretransient.h	(revision 28013)
@@ -0,0 +1,46 @@
+/*!\file Cfsurfacesquaretransient.h
+ * \brief: header file for Cfsurfacesquaretransient object
+ */
+
+#ifndef _CFSURFACESQUARETRANSIENT_H_
+#define _CFSURFACESQUARETRANSIENT_H_
+
+/*Headers:*/
+#include "./Definition.h"
+class FemModel;
+
+IssmDouble OutputDefinitionsResponsex(FemModel* femmodel,int output_enum);
+
+class Cfsurfacesquaretransient: public Object, public Definition{
+
+	public: 
+
+		int         definitionenum;
+		int         model_enum;
+		char       *name;
+		int         num_datatimes;
+		IssmDouble *datatimes;
+		bool       *passedflags;
+		IssmDouble  J;
+
+		/*Cfsurfacesquaretransient constructors, destructors :*/
+		Cfsurfacesquaretransient();
+		Cfsurfacesquaretransient(char* in_name, int in_definitionenum, int in_model_enum,int num_datatimes, IssmDouble* in_datatime);
+		Cfsurfacesquaretransient(char* in_name, int in_definitionenum, int in_model_enum,int num_datatimes, IssmDouble* in_datatime, bool* in_timepassedflag, IssmDouble in_J);
+		~Cfsurfacesquaretransient();
+
+		/*Object virtual function resolutoin: */
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
+
+		/*Definition virtual function resolutoin: */
+		int         DefinitionEnum();
+		char       *Name();
+		IssmDouble  Response(FemModel *femmodel);
+		IssmDouble  Cfsurfacesquaretransient_Calculation(Element  *element, int model_enum);
+};
+#endif  /* _CFSURFACESQUARE_H_ */
Index: /issm/trunk/src/c/classes/DependentObject.cpp
===================================================================
--- /issm/trunk/src/c/classes/DependentObject.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/DependentObject.cpp	(revision 28013)
@@ -16,19 +16,16 @@
 DependentObject::DependentObject(){/*{{{*/
 	this->name=NULL;
-	this->index=-1;
 	this->response_value=0.;
 }
 /*}}}*/
-DependentObject::DependentObject(char* in_name,int in_index){/*{{{*/
+DependentObject::DependentObject(char* in_name){/*{{{*/
 
 	this->name=xNew<char>(strlen(in_name)+1); xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
-	this->index=in_index;
 	this->response_value=0.;
 
 }/*}}}*/
-DependentObject::DependentObject(char* in_name,int in_index,IssmDouble in_response){/*{{{*/
+DependentObject::DependentObject(char* in_name,IssmDouble in_response){/*{{{*/
 
 	this->name=xNew<char>(strlen(in_name)+1); xMemCpy<char>(this->name,in_name,strlen(in_name)+1);
-	this->index=in_index;
 	this->response_value=in_response;
 
@@ -40,5 +37,5 @@
 /*Object virtual functions definitions:*/
 Object* DependentObject::copy(void) { /*{{{*/
-	return new DependentObject(name,index,response_value);
+	return new DependentObject(name,response_value);
 } /*}}}*/
 void DependentObject::DeepEcho(void){/*{{{*/
@@ -50,5 +47,4 @@
 	_printf_("DependentObject:\n");
 	_printf_("   name: " << this->name << "\n");
-	if(this->index>=0) _printf_("   index: " << this->index << "\n");
 	_printf_("   response_value: " << this->response_value<< "\n");
 }
@@ -70,16 +66,17 @@
 	marshallhandle->call(this->name);
 
-	marshallhandle->call(this->index);
 	marshallhandle->call(this->response_value);
 }/*}}}*/
 
 /*DependentObject methods: */
-void  DependentObject::Responsex(IssmDouble* poutput_value,FemModel* femmodel){/*{{{*/
+void  DependentObject::RecordResponsex(FemModel* femmodel){/*{{{*/
 
 	/*Is this some special type of response for which we need to go in the output definitions? :*/
 	if (StringToEnumx(this->name,false)==-1){
-		*poutput_value=OutputDefinitionsResponsex(femmodel,this->name);
+		this->response_value = OutputDefinitionsResponsex(femmodel,this->name);
 	}
-	else femmodel->Responsex(poutput_value,this->name);
+	else{
+		femmodel->Responsex(&this->response_value, this->name);
+	}
 }
 /*}}}*/
@@ -88,8 +85,4 @@
 }
 /*}}}*/
-void DependentObject::AddValue(IssmDouble in_value){/*{{{*/
-	this->response_value+=in_value;
-}
-/*}}}*/
 void DependentObject::ResetResponseValue(){/*{{{*/
 	this->response_value=0.;
Index: /issm/trunk/src/c/classes/DependentObject.h
===================================================================
--- /issm/trunk/src/c/classes/DependentObject.h	(revision 28012)
+++ /issm/trunk/src/c/classes/DependentObject.h	(revision 28013)
@@ -18,11 +18,10 @@
 
 		char* name;
-		int   index;  /*0: scalar, 1: vertex*/
 		IssmDouble response_value;
 
 		/*DependentObject constructors, destructors */
 		DependentObject();
-		DependentObject(char* name,int index);
-		DependentObject(char* name,int index,IssmDouble in_response);
+		DependentObject(char* name);
+		DependentObject(char* name, IssmDouble in_response);
 		~DependentObject();
 
@@ -36,7 +35,6 @@
 
 		/*DependentObject methods: */
-		void       Responsex(IssmDouble *poutput_value,FemModel*femmodel);
+		void       RecordResponsex(FemModel*femmodel);
 		IssmDouble GetValue(void);
-		void       AddValue(IssmDouble in_value);
 		void       ResetResponseValue(void);
 
Index: /issm/trunk/src/c/classes/Elements/Element.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Element.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Element.cpp	(revision 28013)
@@ -30,4 +30,17 @@
 extern "C" void run_semic_(IssmDouble *sf_in, IssmDouble *rf_in, IssmDouble *swd_in, IssmDouble *lwd_in, IssmDouble *wind_in, IssmDouble *sp_in, IssmDouble *rhoa_in,
 			IssmDouble *qq_in, IssmDouble *tt_in, IssmDouble *tsurf_out, IssmDouble *smb_out, IssmDouble *saccu_out, IssmDouble *smelt_out);
+
+extern "C" void run_semic_transient_(int *nx, int *ntime, int *nloop, 
+			IssmDouble *sf_in, IssmDouble *rf_in, IssmDouble *swd_in, 
+			IssmDouble *lwd_in, IssmDouble *wind_in, IssmDouble *sp_in, IssmDouble *rhoa_in,
+			IssmDouble *qq_in, IssmDouble *tt_in, IssmDouble *tsurf_in, IssmDouble *qmr_in,
+			IssmDouble *tstic,
+			IssmDouble *hcrit, IssmDouble *rcrit,
+			IssmDouble *mask, IssmDouble *hice, IssmDouble *hsnow,
+			IssmDouble *albedo_in, IssmDouble *albedo_snow_in,
+			int *alb_scheme, IssmDouble *alb_smax, IssmDouble *alb_smin, IssmDouble *albi, IssmDouble *albl,
+			IssmDouble *Tamp, 
+			IssmDouble *tmin, IssmDouble *tmax, IssmDouble *tmid, IssmDouble *mcrit, IssmDouble *wcrit, IssmDouble *tau_a, IssmDouble* tau_f, IssmDouble *afac, bool *verbose,
+			IssmDouble *tsurf_out, IssmDouble *smb_out, IssmDouble *smbi_out, IssmDouble *smbs_out, IssmDouble *saccu_out, IssmDouble *smelt_out, IssmDouble *refr_out, IssmDouble *albedo_out, IssmDouble *albedo_snow_out, IssmDouble *hsnow_out, IssmDouble *hice_out, IssmDouble *qmr_out);
 #endif
 // _HAVE_SEMIC_
@@ -99,5 +112,19 @@
          outenum_type   = BasalforcingsSpatialDeepwaterMeltingRateEnum;
          break;
-   }
+		case(FrontalForcingsSubglacialDischargearmaEnum):
+         arenum_type    = SubglacialdischargeValuesAutoregressionEnum;
+         maenum_type    = SubglacialdischargeValuesMovingaverageEnum;
+         basinenum_type = FrontalForcingsBasinIdEnum;
+         noiseenum_type = SubglacialdischargeARMANoiseEnum;
+         outenum_type   = FrontalForcingsSubglacialDischargeEnum;
+         break;
+		case(HydrologyarmapwEnum):
+         arenum_type    = WaterPressureValuesAutoregressionEnum;
+         maenum_type    = WaterPressureValuesMovingaverageEnum;
+         basinenum_type = HydrologyBasinsIdEnum;
+         noiseenum_type = FrictionWaterPressureNoiseEnum;
+         outenum_type   = WaterPressureArmaPerturbationEnum;
+         break;
+	}
 
 	/*Get time parameters*/
@@ -126,13 +153,14 @@
 			indperiod = 0;
 			for(int i=0;i<numbreaks;i++){
-				if(tatstep>datebreaks_basin[i]) indperiod = i+1;
+				if(tatstep>=datebreaks_basin[i]) indperiod = i+1;
 			}
 			/*Compute polynomial with parameters of indperiod*/
-			if(indperiod==0) telapsed_break = tatstep;
+			if(indperiod==0) telapsed_break = tatstep-starttime;
 			else             telapsed_break = tatstep-datebreaks_basin[indperiod-1];
 			for(int j=0;j<numparams;j++)   sumpoly[s] = sumpoly[s]+polyparams_basin[indperiod+j*numperiods]*pow(telapsed_break,j);
 		}
-		else for(int j=0;j<numparams;j++) sumpoly[s] = sumpoly[s]+polyparams_basin[j*numperiods]*pow(tatstep,j);
-	}
+		else for(int j=0;j<numparams;j++) sumpoly[s] = sumpoly[s]+polyparams_basin[j*numperiods]*pow(tatstep-starttime,j);
+	}
+
 	/*Initialze autoregressive and moving-average values at first time step*/
 	if(time<=starttime+dt){
@@ -174,5 +202,10 @@
 			/*Stochastic variable value*/
          varlist[v] = sumpoly[0]+autoregressionterm+movingaverageterm+noiseterm;
-      }
+      
+			/*Impose zero-bound*/
+			if(outenum_type == ThermalForcingEnum || outenum_type == FrontalForcingsSubglacialDischargeEnum) varlist[v] = max(varlist[v],0.0);
+
+		}
+
       /*Update autoregression and moving-average values*/
       IssmDouble* temparrayar = xNew<IssmDouble>(numvertices*arorder);
@@ -911,5 +944,5 @@
 
 	/*Get material parameters :*/
-	IssmDouble rho_water = this->FindParam(MaterialsRhoSeawaterEnum);
+	IssmDouble rho_water = this->FindParam(MaterialsRhoFreshwaterEnum);
 	IssmDouble rho_ice   = this->FindParam(MaterialsRhoIceEnum);
 
@@ -1218,4 +1251,50 @@
 	/*Return: */
 	return this->FloatingArea(scaled);
+}
+/*}}}*/
+void       Element::FrictionAlpha2CreateInput(void){/*{{{*/
+
+	/*Return if element is inactive*/
+	if(this->IsAllFloating() || !this->IsIceInElement()) return;
+
+	/*Intermediaries*/
+	int      domaintype, dim;
+	Element* basalelement = NULL;
+
+	/*Get basal element*/
+	this->FindParam(&domaintype,DomainTypeEnum);
+	switch(domaintype){
+		case Domain2DhorizontalEnum:
+			basalelement = this;
+			dim = 2;
+			break;
+		case Domain3DEnum: case Domain2DverticalEnum:
+			if(!this->IsOnBase()) return;
+			basalelement = this->SpawnBasalElement(true);
+			dim = 2;
+			break;
+		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
+	}
+
+	int numvertices = basalelement->GetNumberOfVertices();
+
+	/*build friction object, used later on: */
+	Friction* friction=new Friction(basalelement,dim);
+	IssmDouble alpha2_list[MAXVERTICES];
+	IssmDouble alpha2;
+
+	Gauss* gauss=basalelement->NewGauss();
+	for(int i=0;i<numvertices;i++){
+		gauss->GaussVertex(i);
+
+		friction->GetAlpha2(&alpha2,gauss);
+		alpha2_list[i] = alpha2;
+	}
+
+	/*Clean up and return*/
+	delete gauss;
+	delete friction;
+	if(basalelement->IsSpawnedElement()){basalelement->DeleteMaterials(); delete basalelement;};
+	this->AddBasalInput(FrictionAlpha2Enum,&alpha2_list[0],P1Enum);
 }
 /*}}}*/
@@ -1870,8 +1949,11 @@
 		/*Are we in transient or static? */
 		if(M==1){
-			values[0]=vector[0];
+			if(N!=1) _error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
+			_assert_(N==1);
 			this->SetElementInput(inputs,vector_enum,vector[0]);
 		}
+
 		else if(M==iomodel->numberofvertices){
+			if(N!=1) _error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
 			for(int i=0;i<NUM_VERTICES;i++) values[i]=vector[vertexids[i]-1];
 			this->SetElementInput(inputs,NUM_VERTICES,vertexlids,values,vector_enum);
@@ -1912,5 +1994,5 @@
 			}
 			else{
-				_error_("Patch interpolation not supported yet");
+				_error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
 			}
 			xDelete<IssmDouble>(evalues);
@@ -1918,13 +2000,34 @@
 		}
 		else{
-			_error_("nodal vector is either numberofvertices or numberofvertices+1 long. Field provided (" << EnumToStringx(vector_enum) << ") is " << M << " long");
+			_error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
 		}
 	}
 	else if(vector_type==2){ //element vector
 
-		IssmDouble value;
-
 		/*Are we in transient or static? */
-		if(M==iomodel->numberofelements){
+		if(M==1){
+			if(N!=1) _error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
+			this->SetElementInput(inputs,vector_enum,vector[0]);
+		}
+		else if(M==2){
+			/*create transient input: */
+			IssmDouble* times = xNew<IssmDouble>(N);
+			for(int t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
+
+			inputs->SetTransientInput(vector_enum,times,N);
+			TransientInput* transientinput = inputs->GetTransientInput(vector_enum);
+
+			for(int t=0;t<N;t++){
+				IssmDouble value=vector[t]; //values are on the first line, times are on the second line
+				switch(this->ObjectEnum()){
+					case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&(this->lid),&value,P0Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(t,1,&(this->lid),&value,P0Enum); break;
+					default: _error_("Not implemented yet");
+				}
+			}
+			xDelete<IssmDouble>(times);
+		}
+		else if(M==iomodel->numberofelements){
+			if(N!=1) _error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
 			if (code==5){ //boolean
 				this->SetBoolInput(inputs,vector_enum,reCast<bool>(vector[this->Sid()]));
@@ -1945,5 +2048,5 @@
 			TransientInput* transientinput = inputs->GetTransientInput(vector_enum);
 			for(int t=0;t<N;t++){
-				value=vector[N*this->Sid()+t];
+				IssmDouble value=vector[N*this->Sid()+t];
 				switch(this->ObjectEnum()){
 					case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&(this->lid),&value,P0Enum); break;
@@ -1954,24 +2057,8 @@
 			xDelete<IssmDouble>(times);
 		}
-		else if(M==1 || M==2){
-			/*create transient input: */
-			IssmDouble* times = xNew<IssmDouble>(N);
-			if(M==1)times[0]=0;
-			if(M==2)for(int t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-
-			inputs->SetTransientInput(vector_enum,times,N);
-			TransientInput* transientinput = inputs->GetTransientInput(vector_enum);
-
-			for(int t=0;t<N;t++){
-				value=vector[t]; //values are on the first line, times are on the second line
-				switch(this->ObjectEnum()){
-					case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&(this->lid),&value,P0Enum); break;
-					case PentaEnum: transientinput->AddPentaTimeInput(t,1,&(this->lid),&value,P0Enum); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			xDelete<IssmDouble>(times);
-		}
-		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(vector_enum) << ") is " << M << " long");
+
+		else{
+			_error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
+		}
 	}
 	else if(vector_type==3){ //Double array matrix
@@ -1984,7 +2071,11 @@
 			xDelete<IssmDouble>(layers);
 		}
-		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(vector_enum) << ") is " << M << " long");
-	}
-	else _error_("Cannot add input for vector type " << vector_type << " (not supported)");
+		else{
+			_error_("Size of Input "<<EnumToStringx(vector_enum)<<" "<<M<<"x"<<N<<" not supported");
+		}
+	}
+	else{
+		_error_("Cannot add input for vector type " << vector_type << " (not supported)");
+	}
 }
 /*}}}*/
@@ -2114,10 +2205,9 @@
 	else _error_("not currently supported type of M and N attempted");
 }/*}}}*/
-void       Element::DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs* inputs,IoModel* iomodel,int M,int N,int vector_type,int input_enum,int code,int input_id){/*{{{*/
+void       Element::DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs* inputs,IoModel* iomodel,int M,int N,int vector_type,int input_enum,int input_id){/*{{{*/
 	/*enum_type: the name of the DatasetInput (eg Outputdefinition1)
 	 * vector: information being stored (eg observations)
 	 * vector_type: is if by element or by vertex
 	 * input_enum: is the name of the vector being stored
-	 * code: what type of data is in the vector (booleans, ints, doubles)
 	 */
 
@@ -2178,35 +2268,8 @@
 		/*Are we in transient or static? */
 		if(M==iomodel->numberofelements){
-			if (code==5){ //boolean
-				_error_("not implemented");
-				//datasetinput->AddInput(new BoolInput(input_enum,reCast<bool>(vector[this->Sid()])),input_id);
-			}
-			else if (code==6){ //integer
-				_error_("not implemented");
-				//datasetinput->AddInput(new IntInput(input_enum,reCast<int>(vector[this->Sid()])),input_id);
-			}
-			else if (code==7){ //IssmDouble
-				_error_("not implemented");
-				//datasetinput->AddInput(new DoubleInput(input_enum,vector[this->Sid()]),input_id);
-			}
-			else _error_("could not recognize nature of vector from code " << code);
+			_error_("not implemented");
 		}
 		else if(M==iomodel->numberofelements+1){
 			_error_("not supported");
-			///*create transient input: */
-			//IssmDouble* times = xNew<IssmDouble>(N);
-			//for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-			//TransientInput* transientinput=new TransientInput(input_enum,times,N);
-			//TriaInput* bof=NULL;
-			//for(t=0;t<N;t++){
-			//	value=vector[N*this->Sid()+t];
-			//	switch(this->ObjectEnum()){
-			//		case TriaEnum:  transientinput->AddTimeInput(new TriaInput( input_enum,&value,P0Enum)); break;
-			//		case PentaEnum: transientinput->AddTimeInput(new PentaInput(input_enum,&value,P0Enum)); break;
-			//		case TetraEnum: transientinput->AddTimeInput(new TetraInput(input_enum,&value,P0Enum)); break;
-			//		default: _error_("Not implemented yet");
-			//	}
-			//}
-			//xDelete<IssmDouble>(times);
 		}
 		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(input_enum) << ") is " << M << " long");
@@ -2340,4 +2403,17 @@
 	Input* input=this->GetInput(MaskOceanLevelsetEnum); _assert_(input);
 	return (input->GetInputMax()<=0.);
+}
+/*}}}*/
+bool       Element::IsAllMinThicknessInElement(){/*{{{*/
+
+	IssmDouble minthickness = this->FindParam(MasstransportMinThicknessEnum);
+
+	Input* input=this->GetInput(ThicknessEnum); _assert_(input);
+	if(input->GetInputMax()<=(minthickness+0.00000001)){
+		return true;
+	}
+	else{
+                return false;
+        }
 }
 /*}}}*/
@@ -2411,12 +2487,23 @@
    const int numvertices = this->GetNumberOfVertices();
    bool isadjustsmb = false;
-	int basinid,bb1,bb2;
-   IssmDouble ela,refelevation_b;
+	int basinid,bb1,bb2,mindex;
+	IssmDouble ela,refelevation_b,time,dt,fracyear,yts;
+   IssmDouble monthsteps[12]  = {0.,1./12,2./12,3./12,4./12,5./12,6./12,7./12,8./12,9./12,10./12,11./12};
    IssmDouble* surfacelist  = xNew<IssmDouble>(numvertices);
    IssmDouble* smblist      = xNew<IssmDouble>(numvertices);
-   /* numelevbins values of lapse rates */
+   /* numelevbins values of lapse rates at current month */
 	IssmDouble* lapserates_b = xNew<IssmDouble>(numelevbins);
-   /* (numelevbins-1) limits between elevation bins (be cautious with indexing) */
+   /* (numelevbins-1) limits between elevation bins at current month (be cautious with indexing) */
 	IssmDouble* elevbins_b   = xNew<IssmDouble>(numelevbins-1);
+
+	/*Find month of current time step*/
+	this->parameters->FindParam(&yts,ConstantsYtsEnum);
+   this->parameters->FindParam(&time,TimeEnum);
+   this->parameters->FindParam(&dt,TimesteppingTimeStepEnum); 
+   fracyear     = time/yts-floor(time/yts);
+   for(int i=1;i<12;i++){
+		if(fracyear>=monthsteps[i-1]) mindex = i-1;
+	}
+   if(fracyear>=monthsteps[11]) mindex = 11;
 
    /*Retrieve SMB values non-adjusted for SMB lapse rate*/
@@ -2427,21 +2514,24 @@
    this->GetInputValue(&basinid,SmbBasinsIdEnum);
    refelevation_b = refelevation[basinid];
-	for(int ii=0;ii<(numelevbins-1);ii++) elevbins_b[ii] = elevbins[basinid*(numelevbins-1)+ii];
+	/*Retrieve bins and laps rates for this basin at this month*/
+	for(int ii=0;ii<(numelevbins-1);ii++) elevbins_b[ii] = elevbins[basinid*(numelevbins-1)*12+mindex*(numelevbins-1)+ii];
 	for(int ii=0;ii<numelevbins;ii++){
-		lapserates_b[ii] = lapserates[basinid*numelevbins+ii];
+		lapserates_b[ii] = lapserates[basinid*numelevbins*12+mindex*numelevbins+ii];
 		if(lapserates_b[ii]!=0) isadjustsmb=true;
 	}
+	
 	/*Adjust SMB if any lapse rate value is non-zero*/
 	if(isadjustsmb){
-	
+
+		_assert_(dt<yts);
 	   for(int v=0;v<numvertices;v++){
 	      /*Find elevation bin of Reference elevation and of Vertex*/
+			bb1 = 0;
+			bb2 = 0;
 			for(int ii=0;ii<(numelevbins-1);ii++){
-				if(surfacelist[v]<=elevbins_b[ii]) bb1 = ii;	
-				if(refelevation_b<=elevbins_b[ii]) bb2 = ii;
+				if(surfacelist[v]>=elevbins_b[ii]) bb1 = ii+1;
+				if(refelevation_b>=elevbins_b[ii]) bb2 = ii+1;
 			}
-			/*Check for elevations above highest bin limit */
-			if(surfacelist[v]>elevbins_b[numelevbins-1-1]) bb1 = numelevbins-1;
-			if(refelevation_b>elevbins_b[numelevbins-1-1]) bb2 = numelevbins-1;
+
 			/*Vertex and Reference elevation in same elevation bin*/
 			if(bb1==bb2){
@@ -2478,4 +2568,5 @@
 	IssmDouble deepwaterel,upperwaterel,deepwatermelt,upperwatermelt;
 	IssmDouble base[MAXVERTICES];
+	IssmDouble perturbation[MAXVERTICES];
 	IssmDouble values[MAXVERTICES];
 	IssmDouble time;
@@ -2489,4 +2580,5 @@
 
 	this->GetInputListOnVertices(&base[0],BaseEnum);
+	this->GetInputListOnVertices(&perturbation[0],BasalforcingsPerturbationMeltingRateEnum);
 	for(int i=0;i<NUM_VERTICES;i++){
 		if(base[i]>=upperwaterel){
@@ -2500,4 +2592,7 @@
 			values[i]=deepwatermelt*alpha+(1.-alpha)*upperwatermelt;
 		}
+
+		/*Add perturbation*/
+		values[i] += perturbation[i];
 	}
 
@@ -2717,4 +2812,86 @@
 
 }/*}}}*/
+void       Element::MonthlyFactorBasin(IssmDouble* monthlyfac,int enum_type){/*{{{*/
+	
+	/*Variable declaration*/
+	bool ratevariable;
+   const int numvertices = this->GetNumberOfVertices();
+	int basinid,mindex,mindexnext,basinenum_type,varenum_type,indperiod;
+   IssmDouble time,dt,fracyear,fracyearnext,fracmonth,fracmonthnext,yts; 
+   IssmDouble monthsteps[12]  = {0.,1./12,2./12,3./12,4./12,5./12,6./12,7./12,8./12,9./12,10./12,11./12};
+   IssmDouble* monthlyfac_b   = xNew<IssmDouble>(12);
+   IssmDouble* monthlyrate_b  = xNew<IssmDouble>(12);
+	IssmDouble* fracdtinmonth  = xNew<IssmDouble>(12);
+	IssmDouble* rateinmonth    = xNew<IssmDouble>(numvertices*12);
+	IssmDouble* varlistinput   = xNew<IssmDouble>(numvertices);
+	IssmDouble* varlist        = xNewZeroInit<IssmDouble>(numvertices);
+
+	/*Get field-specific enums*/
+   switch(enum_type){
+      case(FrontalForcingsSubglacialDischargearmaEnum):
+         basinenum_type = FrontalForcingsBasinIdEnum;
+         varenum_type   = FrontalForcingsSubglacialDischargeEnum;
+         ratevariable   = true;
+			break;
+		case(HydrologyarmapwEnum):
+         basinenum_type = HydrologyBasinsIdEnum;
+         varenum_type   = FrictionWaterPressureEnum;
+         ratevariable   = false;
+			break;
+	}
+	
+	/*Evaluate the month index now and at (now-timestepjump)*/
+	this->parameters->FindParam(&yts,ConstantsYtsEnum);
+	this->parameters->FindParam(&time,TimeEnum);
+   this->parameters->FindParam(&dt,TimesteppingTimeStepEnum); _assert_(dt<yts);
+	fracyear     = time/yts-floor(time/yts);
+	fracyearnext = (time+dt)/yts-floor((time+dt)/yts);
+	for(int i=1;i<12;i++){
+		if(fracyear>=monthsteps[i-1])     mindex     = i-1;
+		if(fracyearnext>=monthsteps[i-1]) mindexnext = i-1;
+	}
+	if(fracyear>=monthsteps[11])         mindex     = 11;
+	if(fracyearnext>=monthsteps[11])     mindexnext = 11;
+
+	/*Calculate fraction of the time step spent in each month*/
+	for(int i=0;i<12;i++){
+		if(mindex<i && mindexnext>i)                            fracdtinmonth[i] = 1.0/dt*yts/12.0;
+		else if(mindex<i && mindexnext<i && mindexnext<mindex)  fracdtinmonth[i] = 1.0/dt*yts/12.0;
+		else if(mindex>i && mindexnext<mindex && mindexnext>i)  fracdtinmonth[i] = 1.0/dt*yts/12.0;
+		else if(mindex>i && mindexnext<mindex && mindexnext==i) fracdtinmonth[i] = 1.0/dt*yts*(fracyearnext-monthsteps[i]);
+		else if(mindex==i && mindexnext==i)                     fracdtinmonth[i] = 1.0/dt*yts*(fracyearnext-fracyear); 
+		else if(mindex==i && mindexnext!=mindex)                fracdtinmonth[i] = 1.0/dt*yts*(1.0/12-(fracyear-monthsteps[i]));
+		else if(mindexnext==i && mindex!=mindexnext)            fracdtinmonth[i] = 1.0/dt*yts*(fracyearnext-monthsteps[i]);
+		else	                                                  fracdtinmonth[i] = 0.0;
+	}
+
+	/*Get basin-specific parameters of the element*/
+   this->GetInputValue(&basinid,basinenum_type);
+	for(int i=0;i<12;i++) monthlyfac_b[i]   = monthlyfac[basinid*12+i];
+
+	/*Retrieve input*/
+	this->GetInputListOnVertices(varlistinput,varenum_type);
+
+	/*Calculate monthly rate for each month and weight-average it for application over dt*/
+	for(int v=0;v<numvertices;v++){
+		for(int i=0;i<12;i++){
+			if(ratevariable){
+				rateinmonth[v*12+i] = varlistinput[v]*monthlyfac_b[i]*12;
+				varlist[v]          = varlist[v]+fracdtinmonth[i]*rateinmonth[v*12+i];
+			}
+			else varlist[v]       = varlist[v]+fracdtinmonth[i]*monthlyfac_b[i]*varlistinput[v];
+		}
+	}
+	/*Update input*/
+   this->AddInput(varenum_type,varlist,P1DGEnum);
+
+	/*Clean-up*/
+	xDelete<IssmDouble>(fracdtinmonth);
+	xDelete<IssmDouble>(rateinmonth);
+	xDelete<IssmDouble>(monthlyfac_b);
+	xDelete<IssmDouble>(monthlyrate_b);
+	xDelete<IssmDouble>(varlist);
+	xDelete<IssmDouble>(varlistinput);
+}/*}}}*/
 void       Element::MonthlyPiecewiseLinearEffectBasin(int nummonthbreaks,IssmDouble* monthlyintercepts,IssmDouble* monthlytrends,IssmDouble* monthlydatebreaks,int enum_type){/*{{{*/
 	
@@ -2726,6 +2903,6 @@
    IssmDouble monthsteps[12] = {0.,1./12,2./12,3./12,4./12,5./12,6./12,7./12,8./12,9./12,10./12,11./12};
    IssmDouble* datebreaks_b  = xNew<IssmDouble>(nummonthbreaks);
-	IssmDouble* intercepts_b  = xNew<IssmDouble>(nummonthbreaks*12);
-	IssmDouble* trends_b      = xNew<IssmDouble>(nummonthbreaks*12);
+	IssmDouble* intercepts_b  = xNew<IssmDouble>(numperiods*12);
+	IssmDouble* trends_b      = xNew<IssmDouble>(numperiods*12);
 	IssmDouble* varlist       = xNew<IssmDouble>(numvertices);
 
@@ -3610,4 +3787,340 @@
 }
 /*}}}*/
+void       Element::SmbDebrisEvatt(){/*{{{*/
+
+        const int NUM_VERTICES          = this->GetNumberOfVertices();
+        const int NUM_VERTICES_DAYS_PER_YEAR  = NUM_VERTICES * 365; // 365 FIXME
+
+        int             i,vertexlids[MAXVERTICES];;
+        IssmDouble* smb=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* melt=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* summermelt=xNew<IssmDouble>(NUM_VERTICES); 
+        IssmDouble* albedo=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* summeralbedo=xNew<IssmDouble>(NUM_VERTICES); 
+        IssmDouble* accu=xNew<IssmDouble>(NUM_VERTICES);
+        
+        // climate inputs
+        IssmDouble* temperature=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* precip=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* lw=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* sw=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* wind=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* humidity=xNew<IssmDouble>(NUM_VERTICES_DAYS_PER_YEAR);
+        IssmDouble* yearlytemperatures=xNew<IssmDouble>(NUM_VERTICES); memset(yearlytemperatures, 0., NUM_VERTICES*sizeof(IssmDouble));
+        IssmDouble* p_ampl=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* t_ampl=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* lw_ampl=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* sw_ampl=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* wind_ampl=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* humidity_ampl=xNew<IssmDouble>(NUM_VERTICES);
+
+        IssmDouble* surface=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* s0t=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* snowheight=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* debriscover=xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble rho_water,rho_ice,Tf,debris,debris_here;
+        IssmDouble qlaps,rlaps,dsgrad,dlgrad,windspeedgrad,humiditygrad,Tm;
+        IssmDouble inv_twelve=1./365.;
+        IssmDouble time,yts,time_yr,lambda;
+        IssmDouble DailyMelt,CleanIceDailyMelt, CumDailyMelt=0,CleanIceMelt,CumDailySummerMelt=0;
+        IssmDouble MeanAlbedo=0, MeanSummerAlbedo=0;
+        bool isdebris,isAnderson,iscryokarst;
+        this->parameters->FindParam(&isdebris,TransientIsdebrisEnum);
+        this->parameters->FindParam(&isAnderson,SmbDebrisIsAndersonEnum);
+        this->parameters->FindParam(&iscryokarst,SmbDebrisIsCryokarstEnum);
+        IssmDouble PhiD=0.,p;
+        IssmDouble icealbedo=this->FindParam(SmbIcealbedoEnum);
+        IssmDouble snowalbedo=this->FindParam(SmbSnowalbedoEnum);
+        IssmDouble debrisalbedo=this->FindParam(SmbDebrisalbedoEnum);
+        IssmDouble Lm=this->FindParam(MaterialsLatentheatEnum); 
+        IssmDouble D0=this->FindParam(SmbDebrisAndersonD0Enum);
+        int step;
+        this->FindParam(&step,StepEnum);
+
+        // cryokarst
+        int dim=1,domaintype;
+        this->parameters->FindParam(&domaintype,DomainTypeEnum);
+        if(domaintype!=Domain2DverticalEnum){
+                        dim=2;
+        }
+        IssmDouble taud_plus=110e3, taud_minus=60e3;
+        IssmDouble taud, slope, gravity, taudx, taudy;
+        this->parameters->FindParam(&gravity,ConstantsGEnum);
+        IssmDouble* slopex         = xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* slopey         = xNew<IssmDouble>(NUM_VERTICES);
+        IssmDouble* icethickness   = xNew<IssmDouble>(NUM_VERTICES);
+
+        /*Get material parameters :*/
+        rho_water=this->FindParam(MaterialsRhoSeawaterEnum);
+        rho_ice=this->FindParam(MaterialsRhoIceEnum);
+        IssmDouble sconv=(rho_water/rho_ice); 
+        Tf=this->FindParam(MaterialsMeltingpointEnum);
+
+        /*Get parameters for height corrections*/
+        qlaps=this->FindParam(SmbDesfacEnum); // comment MR; on alpine galciers we dont have the desertification effect
+        rlaps=this->FindParam(SmbRlapsEnum);
+        dsgrad=this->FindParam(SmbSWgradEnum);
+        dlgrad=this->FindParam(SmbLWgradEnum);
+        windspeedgrad=this->FindParam(SmbWindspeedgradEnum);
+        humiditygrad=this->FindParam(SmbHumiditygradEnum);
+
+        /* Get time */
+        this->parameters->FindParam(&time,TimeEnum);
+        this->parameters->FindParam(&yts,ConstantsYtsEnum);
+        time_yr=floor(time/yts)*yts;
+
+        /*Get inputs*/
+        DatasetInput* tempday     =this->GetDatasetInput(SmbMonthlytemperaturesEnum); _assert_(tempday);
+        DatasetInput* precipday   =this->GetDatasetInput(SmbPrecipitationEnum);       _assert_(precipday);
+        DatasetInput* lwday       =this->GetDatasetInput(SmbMonthlydlradiationEnum); _assert_(lwday);
+        DatasetInput* swday       =this->GetDatasetInput(SmbMonthlydsradiationEnum);       _assert_(swday);
+        DatasetInput* windday     =this->GetDatasetInput(SmbMonthlywindspeedEnum); _assert_(windday);
+        DatasetInput* humidityday =this->GetDatasetInput(SmbMonthlyairhumidityEnum); _assert_(humidityday);
+
+        /*loop over vertices: */
+        Gauss* gauss=this->NewGauss();
+        for(int month=0;month<365;month++){
+                for(int iv=0;iv<NUM_VERTICES;iv++){
+                        gauss->GaussVertex(iv);
+                        tempday->GetInputValue(&temperature[iv*365+month],gauss,month);
+                        temperature[iv*365+month]=temperature[iv*365+month]-Tf; // conversion from Kelvin to celcius for PDD module
+                        precipday->GetInputValue(&precip[iv*365+month],gauss,month);
+                        precip[iv*365+month]=precip[iv*365+month]*yts; // from m/s to m/a
+                        lwday->GetInputValue(&lw[iv*365+month],gauss,month);
+                        swday->GetInputValue(&sw[iv*365+month],gauss,month);
+                        windday->GetInputValue(&wind[iv*365+month],gauss,month);
+                        humidityday->GetInputValue(&humidity[iv*365+month],gauss,month);
+                }
+        }
+
+        /*Recover info at the vertices: */
+        GetInputListOnVertices(&surface[0],SurfaceEnum);
+        GetInputListOnVertices(&s0t[0],SmbS0tEnum);
+        GetInputListOnVertices(&snowheight[0],SmbSnowheightEnum);
+        GetInputListOnVertices(&debriscover[0],DebrisThicknessEnum);
+        GetInputListOnVertices(&t_ampl[0],SmbTemperaturesAnomalyEnum);
+        GetInputListOnVertices(&p_ampl[0],SmbPrecipitationsAnomalyEnum);
+        GetInputListOnVertices(&lw_ampl[0],SmbDsradiationAnomalyEnum);
+        GetInputListOnVertices(&sw_ampl[0],SmbDlradiationAnomalyEnum);
+        GetInputListOnVertices(&wind_ampl[0],SmbWindspeedAnomalyEnum);
+        GetInputListOnVertices(&humidity_ampl[0],SmbAirhumidityAnomalyEnum);
+        if(iscryokarst){
+                GetInputListOnVertices(&slopex[0],SurfaceSlopeXEnum);
+                GetInputListOnVertices(&icethickness[0],ThicknessEnum);
+                if(dim==2){
+                        GetInputListOnVertices(&slopey[0],SurfaceSlopeYEnum);
+                }
+                taudx=rho_ice*gravity*icethickness[i]*slopex[i];
+                if(dim==2) taudy=rho_ice*gravity*icethickness[i]*slopey[i];
+                taud=sqrt(taudx*taudx+taudy*taudy);
+        }
+        IssmDouble Alphaeff,Alphaeff_cleanice;
+
+        /*measure the surface mass balance*/
+        for (int iv = 0; iv<NUM_VERTICES; iv++){
+
+                IssmDouble st=(surface[iv]-s0t[iv])/1000.;
+
+                int ismb_end=1;
+                if(isdebris & !isAnderson) ismb_end=2;
+                for (int ismb=0;ismb<ismb_end;ismb++){
+                        if(ismb==0){
+                                // calc a reference smb to identify accum and melt region; debris only develops in ablation area
+                                debris=0.;
+                                PhiD=0.;
+                                if(isAnderson) debris_here=debriscover[iv]; // store debris for later
+                        }else{
+                                // debris only develops in ablation area
+                                /*if((accu[iv]/yts-CleanIceMelt)<(-1e-2)/yts){
+                                        debris=debriscover[iv];
+                                }else{
+                                        debris=0.;
+                                }*/
+                                debris=0.;
+                                if(debris<=0.) debris=0.;
+                                if(isdebris) PhiD=FindParam(DebrisPackingFractionEnum);
+                                CumDailyMelt=0;
+                                CumDailySummerMelt=0;
+                                debris_here=debriscover[iv];
+                        }
+
+                        /* Now run the debris part */
+
+                        // Climate inputs
+                        IssmDouble Tm;          // C air temperature
+                        IssmDouble In;          // Wm^-2 incoming long wave
+                        IssmDouble Q;           // Wm^-2 incoming short wave
+                        IssmDouble Um;          // ms^-1 measured wind speed
+                        IssmDouble Humidity;    // relative humidity
+                        IssmDouble P;           // precip
+
+                        // other parameters
+                        IssmDouble Qh=0.006;   // kg m^-3      saturated humidity level // not used
+                        IssmDouble Qm=0.8*Qh;  // kg m^-3      measured humiditiy level // not used
+                        IssmDouble Rhoaa=1.22; // kgm^-3       air densitiy
+                        IssmDouble K=0.585;    // Wm^-1K^-1    thermal conductivity          0.585
+                        IssmDouble Xr=0.01;    // ms^-1        surface roughness             0.01
+                        IssmDouble Ustar=0.16; // ms^-1        friction velocity             0.16
+                        IssmDouble Ca=1000;    // jkg^-1K^-1   specific heat capacity of air
+                        IssmDouble Lv=2.50E+06;// jkg^-1K^-1   latent heat of evaporation
+                        IssmDouble Eps=0.95;   //              thermal emissivity
+                        IssmDouble Sigma=5.67E-08;// Wm^-2K^-4    Stefan Boltzmann constant
+                        IssmDouble Gamma=180.;    // m^-1         wind speed attenuation        234
+                
+                        // Calculate effective albedo
+                        IssmDouble Alphaeff,Alphaeff_cleanice;
+                        IssmDouble mean_ela,delta=2000;
+                        
+                        // compute cleanice albedo based on previous SMB distribution
+                        //if(step==1){
+                                mean_ela=3000; //FIXME
+                        //}else{
+                        //        mean_ela=FindParam(SmbMeanElaEnum);
+                        //}
+                        Alphaeff_cleanice=icealbedo+(snowalbedo-icealbedo)*(1+tanh(PI*(surface[iv]-mean_ela)/delta))/2;
+                        Alphaeff=Alphaeff_cleanice; // will be updated below
+
+                        
+                        accu[iv]=0.;
+                        for (int iday=0;iday<365;iday++) {
+
+                                Tm=temperature[iv*365+iday]-st*rlaps;//+t_ampl[iv];//+(rand()%10-5)/5;
+                                In=lw[iv*365+iday]-st*dlgrad+lw_ampl[iv];
+                                Q=sw[iv*365+iday]+st*dsgrad+sw_ampl[iv];
+                                Humidity=humidity[iv*365+iday]-st*humiditygrad+humidity_ampl[iv];
+                                Um=wind[iv*365+iday]-st*windspeedgrad+wind_ampl[iv];
+                                P=(qlaps*st*precip[iv*365+iday]+precip[iv*365+iday]+p_ampl[iv])*sconv/365.; // convert precip from w.e. -> i.e
+
+                                /*Partition of precip in solid and liquid parts */
+                                IssmDouble temp_plus=1; 
+                                IssmDouble temp_minus=-1.;
+                                IssmDouble frac_solid;
+                                if(Tm>=temp_plus){
+                                        frac_solid=0;
+                                }else if(Tm<=temp_minus){
+                                        frac_solid=1;
+                                }else{
+                                        frac_solid=1*(1-cos(PI*(temp_plus-Tm)/(temp_plus-temp_minus)))/2;
+                                }
+
+                                /*Get yearly temperatures and accumulation */
+                                yearlytemperatures[iv]=yearlytemperatures[iv]+((temperature[iv*365+iday]-rlaps*st+Tf+t_ampl[iv]))/365; // Has to be in Kelvin
+                                accu[iv]=accu[iv]+P*frac_solid;
+                                if(yearlytemperatures[iv]>Tf) yearlytemperatures[iv]=Tf;
+
+                                CleanIceDailyMelt=((In-(Eps*Sigma*(Tf*Tf*Tf*Tf))+
+                                        Q*(1.-Alphaeff)+
+                                        (Rhoaa*Ca*Ustar*Ustar)/(Um-Ustar*(2.-(exp(Gamma*Xr))))*Tm)/((1-PhiD)*rho_ice*Lm)/(1.+
+                                        ((Rhoaa*Ca*Ustar*Ustar)/(Um-Ustar*(2.-(exp(Gamma*Xr))))+4.*Eps*Sigma*(Tf*Tf*Tf))/
+                                        K*debris)-(Lv*Ustar*Ustar*((Humidity))*(exp(-Gamma*Xr)))/((1.-PhiD)*
+                                        rho_ice*Lm*Ustar)/(((Um
+                                        -2.*Ustar)*exp(-Gamma*Xr))/Ustar+exp(Gamma*debris)));
+                                if(CleanIceDailyMelt<0) CleanIceDailyMelt=0.;
+                                DailyMelt=CleanIceDailyMelt;
+
+                                if(ismb==1){
+
+                                        //snowheight[iv]=snowheight[iv]+(P-CleanIceDailyMelt*yts/365);
+                                        IssmDouble sn_prev;
+                                        sn_prev=snowheight[iv];
+                                        snowheight[iv]=sn_prev+(-CleanIceDailyMelt*yts/365);//P
+                                        
+                                        if(snowheight[iv]<=0) snowheight[iv]=0.;
+                                        if(snowheight[iv]<=0.0001){
+                                                p=debris_here*PhiD/(2*0.2*0.01); //Eq. 51 from Evatt et al 2015 without source term g*t
+                                                if(p>1.) p=1.;
+                                                if(p>=0.999){
+                                                        Alphaeff=debrisalbedo;
+                                                } else {
+                                                        Alphaeff=Alphaeff_cleanice+p*(debrisalbedo-Alphaeff_cleanice);
+                                                }
+                                                debris=debris_here;
+                                                DailyMelt=((In-(Eps*Sigma*(Tf*Tf*Tf*Tf))+
+                                                        Q*(1.-Alphaeff)+
+                                                        (Rhoaa*Ca*Ustar*Ustar)/(Um-Ustar*(2.-(exp(Gamma*Xr))))*Tm)/((1-PhiD)*rho_ice*Lm)/(1.+
+                                                        ((Rhoaa*Ca*Ustar*Ustar)/(Um-Ustar*(2.-(exp(Gamma*Xr))))+4.*Eps*Sigma*(Tf*Tf*Tf))/
+                                                        K*debris)-(Lv*Ustar*Ustar*((Humidity))*(exp(-Gamma*Xr)))/((1.-PhiD)*
+                                                        rho_ice*Lm*Ustar)/(((Um-2.*Ustar)*exp(-Gamma*Xr))/Ustar+exp(Gamma*debris)));
+                                                if(DailyMelt<0) DailyMelt=0.;
+                                                MeanSummerAlbedo=MeanSummerAlbedo+Alphaeff;
+                                                CumDailySummerMelt=CumDailySummerMelt+DailyMelt/365;
+                                        }
+                                }
+                                CumDailyMelt=CumDailyMelt+DailyMelt/365;
+                        }
+                        MeanAlbedo=MeanAlbedo+Alphaeff;
+                        if(ismb==0) CleanIceMelt=CumDailyMelt;
+                }
+
+                if(iscryokarst){
+                        if(taud>=taud_plus){
+                                lambda=0;
+                        }else if(taud>=taud_minus & taud<taud_plus){
+                                lambda=0.1*(1-cos(PI*(taud_plus-taud)/(taud_plus-taud_minus)))/2;
+                        }else if(taud<taud_minus){
+                                lambda=0.1;
+                        }
+                }
+
+                // update values
+                melt[iv]=CumDailyMelt; // is already in m/s
+                accu[iv]=accu[iv]/yts;
+                if(isAnderson){
+                        smb[iv]=(accu[iv]-melt[iv])*D0/(D0+debris_here);
+                        if(iscryokarst){ 
+                                smb[iv]=lambda*(accu[iv]-melt[iv])+(1-lambda)*(accu[iv]-melt[iv])*D0/(D0+debris_here);
+                        }else{
+                                smb[iv]=(accu[iv]-melt[iv])*D0/(D0+debris_here);
+                        }
+                }else{
+                        if(iscryokarst){ 
+                                smb[iv]=lambda*(accu[iv]-CleanIceMelt)+(1-lambda)*(accu[iv]-melt[iv]);
+                        }else{
+                                smb[iv]=(accu[iv]-melt[iv]);
+                        }
+                }
+                albedo[iv]=MeanAlbedo;
+                summeralbedo[iv]=MeanSummerAlbedo;
+                summermelt[iv]=CumDailySummerMelt;
+        }
+
+        this->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+        this->AddInput(SmbAccumulationEnum,accu,P1Enum);
+        this->AddInput(SmbMeltEnum,melt,P1Enum);
+        this->AddInput(SmbSummerMeltEnum,summermelt,P1Enum);
+        this->AddInput(SmbSnowheightEnum,snowheight,P1Enum);
+        this->AddInput(SmbAlbedoEnum,albedo,P1Enum);
+        this->AddInput(SmbSummerAlbedoEnum,summeralbedo,P1Enum);
+        this->AddInput(TemperaturePDDEnum,yearlytemperatures,P1Enum); // TemperaturePDD is wrong here, but don't want to create new Enum ...
+
+        /*clean-up*/
+        xDelete<IssmDouble>(temperature);
+        xDelete<IssmDouble>(precip);
+        xDelete<IssmDouble>(lw);
+        xDelete<IssmDouble>(sw);
+        xDelete<IssmDouble>(wind);
+        xDelete<IssmDouble>(humidity);
+        xDelete<IssmDouble>(smb);
+        xDelete<IssmDouble>(surface);
+        xDelete<IssmDouble>(melt);
+        xDelete<IssmDouble>(summermelt);
+        xDelete<IssmDouble>(albedo);
+        xDelete<IssmDouble>(summeralbedo);
+        xDelete<IssmDouble>(accu);
+        xDelete<IssmDouble>(yearlytemperatures);
+        xDelete<IssmDouble>(s0t);
+        xDelete<IssmDouble>(snowheight);
+        xDelete<IssmDouble>(debriscover);
+        xDelete<IssmDouble>(t_ampl);
+        xDelete<IssmDouble>(p_ampl);
+        xDelete<IssmDouble>(lw_ampl);
+        xDelete<IssmDouble>(sw_ampl);
+        xDelete<IssmDouble>(humidity_ampl);
+        xDelete<IssmDouble>(wind_ampl);
+        xDelete<IssmDouble>(slopex);
+        xDelete<IssmDouble>(slopey);
+        xDelete<IssmDouble>(icethickness);
+}
+/*}}}*/
 void       Element::ResultInterpolation(int* pinterpolation,int* pnodesperelement,int* parray_size, int output_enum){/*{{{*/
 
@@ -3615,4 +4128,5 @@
 	switch(output_enum){
 		case ViscousHeatingEnum: this->ViscousHeatingCreateInput(); break;
+		case FrictionAlpha2Enum: this->FrictionAlpha2CreateInput(); break;
 		case StressMaxPrincipalEnum: this->StressMaxPrincipalCreateInput(); break;
 		case StressTensorxxEnum:
@@ -3666,4 +4180,7 @@
 														  this->CalvingRateVonmises();
 														  break;
+													  case CalvingVonmisesADEnum:
+														  this->CalvingRateVonmisesAD();
+														  break;
 													  case CalvingCrevasseDepthEnum:
 														  this->CalvingCrevasseDepth();
@@ -3671,4 +4188,7 @@
 													  case CalvingParameterizationEnum:
 														  this->CalvingRateParameterization();
+														  break;
+													  case CalvingCalvingMIPEnum:
+														  this->CalvingRateCalvingMIP();
 														  break;
 													  case CalvingTestEnum:
@@ -3837,12 +4357,12 @@
 
    /* Start looping on the number of vertices: */
-   GaussTria gauss;
+	Gauss* gauss=this->NewGauss();
    for(int iv=0;iv<numvertices;iv++){
-      gauss.GaussVertex(iv);
+		gauss->GaussVertex(iv);
 
       /* Get variables */
-      bed_input->GetInputValue(&bed,&gauss);
-      qsg_input->GetInputValue(&qsg,&gauss);
-      TF_input->GetInputValue(&TF,&gauss);
+      bed_input->GetInputValue(&bed,gauss);
+      qsg_input->GetInputValue(&qsg,gauss);
+		TF_input->GetInputValue(&TF,gauss);
 
       if(basin_icefront_area[basinid]==0.) meltrates[iv]=0.;
@@ -3863,5 +4383,6 @@
 
    /*Cleanup and return*/
-   xDelete<IssmDouble>(basin_icefront_area);
+   delete gauss;
+	xDelete<IssmDouble>(basin_icefront_area);
 }
 /*}}}*/
@@ -4082,4 +4603,321 @@
 	xDelete<IssmDouble>(st);
 	xDelete<IssmDouble>(s0gcm);
+}
+/*}}}*/
+void       Element::SmbSemicTransient(){/*{{{*/
+
+	bool isverbose=VerboseSmb();
+	if(isverbose && this->Sid()==0){
+		_printf0_("smb core: initialize.\n");
+	}
+	/*only compute SMB at the surface: */
+	if (!IsOnSurface()) return;
+
+	const int NUM_VERTICES 					= this->GetNumberOfVertices();
+
+	// daily forcing inputs
+	IssmDouble* dailyrainfall   =xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailysnowfall   =xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailydlradiation=xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailydsradiation=xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailywindspeed  =xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailypressure   =xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailyairdensity =xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailyairhumidity=xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* dailytemperature=xNew<IssmDouble>(NUM_VERTICES);
+
+	// inputs: geometry
+	IssmDouble* s=xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* s0gcm=xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble* st=xNew<IssmDouble>(NUM_VERTICES);
+
+	// inputs
+	IssmDouble* tsurf_in        =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* mask_in         =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* Tamp_in         =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* albedo_in       =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* albedo_snow_in  =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* hice_in         =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* hsnow_in        =xNew<IssmDouble>(NUM_VERTICES); 
+	IssmDouble* qmr_in          =xNew<IssmDouble>(NUM_VERTICES); 
+
+	// outputs
+	IssmDouble* tsurf_out  =xNew<IssmDouble>(NUM_VERTICES); memset(tsurf_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* smb_out    =xNew<IssmDouble>(NUM_VERTICES); memset(smb_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* smbi_out   =xNew<IssmDouble>(NUM_VERTICES); memset(smb_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* smbs_out   =xNew<IssmDouble>(NUM_VERTICES); memset(smb_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* saccu_out  =xNew<IssmDouble>(NUM_VERTICES); memset(saccu_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* smelt_out  =xNew<IssmDouble>(NUM_VERTICES); memset(smelt_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* refr_out  =xNew<IssmDouble>(NUM_VERTICES); memset(refr_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* albedo_out =xNew<IssmDouble>(NUM_VERTICES); memset(albedo_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* albedo_snow_out =xNew<IssmDouble>(NUM_VERTICES); memset(albedo_snow_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* hsnow_out   =xNew<IssmDouble>(NUM_VERTICES); memset(hsnow_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* hice_out    =xNew<IssmDouble>(NUM_VERTICES); memset(hice_out, 0., NUM_VERTICES*sizeof(IssmDouble));
+	IssmDouble* qmr_out     =xNew<IssmDouble>(NUM_VERTICES); memset(qmr_out, 0., NUM_VERTICES*sizeof(IssmDouble)); 
+
+	IssmDouble rho_water,rho_ice,desfac,desfacElev,rlaps,rdl;
+	IssmDouble alb_smax, alb_smin, albi, albl;
+	IssmDouble hcrit, rcrit; // parameters for ? and refreezing.
+	int alb_scheme;
+	// albedo parameters - slatter
+	IssmDouble tmin, tmax;
+	// albedo parameters - isba
+	IssmDouble mcrit, tau_a, tau_f, wcrit;
+	// albedo parameters - alex
+	IssmDouble tmid, afac;
+
+	IssmDouble tstart, time,yts,time_yr,dt;
+
+	/* Get time: */
+	this->parameters->FindParam(&time,TimeEnum);
+	this->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
+	this->parameters->FindParam(&yts,ConstantsYtsEnum);
+	this->parameters->FindParam(&tstart,TimesteppingStartTimeEnum);
+	time_yr=floor(time/yts)*yts;
+	//dt = dt * yts;
+
+	/*Get material parameters :*/
+	rho_water=this->FindParam(MaterialsRhoFreshwaterEnum);
+	rho_ice=this->FindParam(MaterialsRhoIceEnum);
+	desfac=this->FindParam(SmbDesfacEnum);
+	desfacElev=this->FindParam(SmbDesfacElevEnum);
+	rlaps=this->FindParam(SmbRlapsEnum);
+	rdl=this->FindParam(SmbRdlEnum);
+
+	this->FindParam(&alb_scheme,SmbAlbedoSchemeEnum);
+	this->FindParam(&hcrit,SmbSemicHcritEnum);
+	this->FindParam(&rcrit,SmbSemicRcritEnum);
+	alb_smax=this->FindParam(SmbAlbedoSnowMaxEnum);
+	alb_smin=this->FindParam(SmbAlbedoSnowMinEnum);
+	albi=this->FindParam(SmbAlbedoIceEnum);
+	albl=this->FindParam(SmbAlbedoLandEnum);
+
+	// albedo parameters
+	this->FindParam(&tmid,SmbSemicTmidEnum);
+	this->FindParam(&tmin,SmbSemicTminEnum);
+	this->FindParam(&tmax,SmbSemicTmaxEnum);
+	this->FindParam(&mcrit,SmbSemicMcritEnum);
+	this->FindParam(&wcrit,SmbSemicWcritEnum);
+	this->FindParam(&tau_a,SmbSemicTauAEnum);
+	this->FindParam(&tau_f,SmbSemicTauFEnum);
+	this->FindParam(&afac,SmbSemicAfacEnum);
+
+	/* Recover info at the vertices: */
+	GetInputListOnVertices(&s[0],SurfaceEnum);
+	GetInputListOnVertices(&s0gcm[0],SmbS0gcmEnum);
+
+	if(isverbose && this->Sid()==0){
+		_printf0_("smb core: allocate inputs.\n");
+		_printf0_("smb core: time_yr  : " << time_yr/yts <<"\n");
+		_printf0_("smb core: time     : " << time <<"\n");
+		_printf0_("smb core: dt       : " << dt <<"\n");
+	}
+	/* loop over vertices and days */
+	Gauss* gauss=this->NewGauss();
+	/* Retrieve inputs: */
+	Input* dailysnowfall_input    = this->GetInput(SmbDailysnowfallEnum,time); _assert_(dailysnowfall_input);
+	Input* dailyrainfall_input    = this->GetInput(SmbDailyrainfallEnum,time); _assert_(dailyrainfall_input);
+	Input* dailydlradiation_input = this->GetInput(SmbDailydlradiationEnum,time); _assert_(dailydlradiation_input);
+	Input* dailydsradiation_input = this->GetInput(SmbDailydsradiationEnum,time); _assert_(dailydsradiation_input);
+	Input* dailywindspeed_input   = this->GetInput(SmbDailywindspeedEnum,time); _assert_(dailywindspeed_input);
+	Input* dailypressure_input    = this->GetInput(SmbDailypressureEnum,time); _assert_(dailypressure_input);
+	Input* dailyairdensity_input  = this->GetInput(SmbDailyairdensityEnum,time); _assert_(dailyairdensity_input);
+	Input* dailyairhumidity_input = this->GetInput(SmbDailyairhumidityEnum,time); _assert_(dailyairhumidity_input);
+	Input* dailytemperature_input = this->GetInput(SmbDailytemperatureEnum,time); _assert_(dailytemperature_input);
+
+	/*temporal Enum depending on time*/
+	int enum_temp       =TemperatureSEMICEnum;
+	int enum_hice       =SmbHIceEnum;
+	int enum_hsnow      =SmbHSnowEnum;
+	int enum_albedo     =SmbAlbedoEnum;
+	int enum_albedo_snow=SmbAlbedoSnowEnum;
+	int enum_qmr        =SmbSemicQmrEnum;
+	if (tstart+dt == time) {
+		/* Load inital value at first time step*/
+		enum_temp=TemperatureEnum;
+		enum_hice=SmbHIceInitEnum;
+		enum_hsnow=SmbHSnowInitEnum;
+		enum_albedo=SmbAlbedoInitEnum;
+		enum_albedo_snow=SmbAlbedoSnowInitEnum;
+		enum_qmr        =SmbSemicQmrInitEnum;
+	} 
+	//if(isverbose && this->Sid()==0)_printf0_("smb core: assign temp.\n");
+	Input* tsurf_input       = this->GetInput(enum_temp); _assert_(tsurf_in);
+	//if(isverbose && this->Sid()==0)_printf0_("smb core: assign mask.\n");
+	Input* mask_input        = this->GetInput(SmbMaskEnum); _assert_(mask_input);
+	//if(isverbose && this->Sid()==0)_printf0_("smb core: assign Tamp.\n");
+	Input* Tamp_input        = this->GetInput(SmbTampEnum); _assert_(Tamp_input);
+	//if(isverbose && this->Sid()==0)_printf0_("smb core: assign albedo.\n");
+	Input* albedo_input      = this->GetInput(enum_albedo); _assert_(albedo_input);
+	Input* albedo_snow_input = this->GetInput(enum_albedo_snow); _assert_(albedo_snow_input);
+	Input* hice_input        = this->GetInput(enum_hice); _assert_(hice_input);
+	Input* hsnow_input       = this->GetInput(enum_hsnow); _assert_(hsnow_input);
+	Input* qmr_input         = this->GetInput(enum_qmr); _assert_(qmr_input);
+
+	if(isverbose && this->Sid()==0)_printf0_("smb core: assign inputs done....\n");
+	for(int iv=0;iv<NUM_VERTICES;iv++){
+		gauss->GaussVertex(iv);
+		/* get forcing */
+		dailyrainfall_input->GetInputValue(&dailyrainfall[iv],gauss);
+		dailysnowfall_input->GetInputValue(&dailysnowfall[iv],gauss);
+		dailydlradiation_input->GetInputValue(&dailydlradiation[iv],gauss);
+		dailydsradiation_input->GetInputValue(&dailydsradiation[iv],gauss);
+		dailywindspeed_input->GetInputValue(&dailywindspeed[iv],gauss);
+		dailypressure_input->GetInputValue(&dailypressure[iv],gauss);
+		dailyairdensity_input->GetInputValue(&dailyairdensity[iv],gauss);
+		dailyairhumidity_input->GetInputValue(&dailyairhumidity[iv],gauss);
+		dailytemperature_input->GetInputValue(&dailytemperature[iv],gauss);
+		tsurf_input->GetInputValue(&tsurf_in[iv],gauss);
+
+		/* Get Albedo information */
+		albedo_input->GetInputValue(&albedo_in[iv],gauss);
+		albedo_snow_input->GetInputValue(&albedo_snow_in[iv],gauss);
+		mask_input->GetInputValue(&mask_in[iv],gauss);
+		Tamp_input->GetInputValue(&Tamp_in[iv],gauss);
+
+		hsnow_input->GetInputValue(&hsnow_in[iv],gauss);
+		hice_input->GetInputValue(&hice_in[iv],gauss);
+		qmr_input->GetInputValue(&qmr_in[iv],gauss);
+
+		/* Surface temperature correction */
+		st[iv]=(s[iv]-s0gcm[iv])/1000.;
+		dailytemperature[iv]=dailytemperature[iv]-rlaps *st[iv];
+
+		/* Precipitation correction (Vizcaino et al. 2010) */
+		if (s0gcm[iv] < desfacElev) {
+			dailysnowfall[iv] = dailysnowfall[iv]*exp(desfac*(max(s[iv],desfacElev)-desfacElev));
+			dailyrainfall[iv] = dailyrainfall[iv]*exp(desfac*(max(s[iv],desfacElev)-desfacElev));
+		}else{
+			dailysnowfall[iv] = dailysnowfall[iv]*exp(desfac*(max(s[iv],desfacElev)-s0gcm[iv]));
+			dailyrainfall[iv] = dailyrainfall[iv]*exp(desfac*(max(s[iv],desfacElev)-s0gcm[iv]));
+		}
+
+		/* downward longwave radiation correction (Marty et al. 2002) */
+		st[iv]=(s[iv]-s0gcm[iv])/1000.;
+		dailydlradiation[iv]=dailydlradiation[iv]+rdl*st[iv];
+	}
+	if(isverbose && this->Sid()==0){
+		_printf0_("smb core: assign tsurf_in        :" << tsurf_in[0] << "\n");
+		_printf0_("smb core: assign dailytemperature:" << dailytemperature[0] << "\n");
+		_printf0_("smb core: assign hsnow           :" << hsnow_in[0] << "\n");
+		_printf0_("smb core: assign hice            :" << hice_in[0] << "\n");
+		_printf0_("smb core: assign mask            :" << mask_in[0] << "\n");
+		_printf0_("smb core: assign Tamp            :" << Tamp_in[0] << "\n");
+		_printf0_("smb core: assign albedo          :" << albedo_in[0] << "\n");
+		_printf0_("smb core: assign albedo_snow     :" << albedo_snow_in[0] << "\n");
+		_printf0_("smb core: assign albedo_scheme   :" << alb_scheme  << "\n");
+		_printf0_("smb core: assign qmr             :" << qmr_in[0]  << "\n");
+	}
+
+	if(isverbose && this->Sid()==0)_printf0_("smb core: call run_semic_transient module.\n");
+	/* call semic */
+	int nx=NUM_VERTICES, ntime=1, nloop=1;
+	bool semic_verbose=false; //VerboseSmb();
+	run_semic_transient_(&nx, &ntime, &nloop,
+			dailysnowfall,  dailyrainfall, dailydsradiation, dailydlradiation,
+			dailywindspeed, dailypressure, dailyairdensity,  dailyairhumidity, dailytemperature, tsurf_in, qmr_in, 
+			&dt,
+			&hcrit, &rcrit, 
+			mask_in, hice_in, hsnow_in, 
+			albedo_in, albedo_snow_in,
+			&alb_scheme, &alb_smax, &alb_smin, &albi, &albl,
+			Tamp_in,
+			&tmin, &tmax, &tmid, &mcrit, &wcrit, &tau_a, &tau_f, &afac, &semic_verbose,
+			tsurf_out, smb_out, smbi_out, smbs_out, saccu_out, smelt_out, refr_out, albedo_out, albedo_snow_out, hsnow_out, hice_out, qmr_out);
+
+	for (int iv = 0; iv<NUM_VERTICES; iv++){
+		/* 
+		 unit conversion: water -> ice
+		 w.e. : water equivalenet.
+		 */
+		smb_out[iv]  = smb_out[iv]*yts;  // w.e. m/sec -> m/yr
+		smbi_out[iv] = smbi_out[iv]*rho_water/rho_ice; // w.e. m/sec -> ice m/yr
+		smbs_out[iv] = smbs_out[iv]*yts; // w.e. m/sec -> m/yr
+		saccu_out[iv] = saccu_out[iv]*yts; // w.e. m/sec -> m/yr
+		smelt_out[iv] = smelt_out[iv]*rho_water/rho_ice; // w.e. m/sec -> ice m/yr
+		refr_out[iv]  = refr_out[iv]*rho_water/rho_ice; // w.e. m/sec -> ice m/yr
+	}
+
+	if(isverbose && this->Sid()==0){
+		_printf0_("smb core: tsurf_out " << tsurf_out[0] << " " << tsurf_out[1] << " " << tsurf_out[2] << "\n");
+		_printf0_("smb core: hice_out  " << hice_out[0] << " " <<  hice_out[1] << " " << hice_out[2] << "\n");
+		_printf0_("smb core: hsnow_out " << hsnow_out[0] << "\n");
+		_printf0_("smb core: smb_ice   " << smbi_out[0]*yts << "\n");
+		_printf0_("smb core: smb_ice   " << albedo_out[0] <<" "<<albedo_out[1] << " " << albedo_out[2] << "\n");
+	}
+
+	switch(this->ObjectEnum()){
+		case TriaEnum:
+			this->AddInput(TemperatureSEMICEnum,  &tsurf_out[0],P1DGEnum);
+			// SMBout = SMB_ice + SMB_snow values.
+			//this->AddInput(SmbMassBalanceTotalEnum,&smb_out[0],P1DGEnum);
+			// water equivalent SMB ice to ice equivalent.
+			this->AddInput(SmbMassBalanceEnum,     &smbi_out[0],P1DGEnum);
+			this->AddInput(SmbMassBalanceIceEnum,  &smbi_out[0],P1DGEnum);
+			this->AddInput(SmbMassBalanceSnowEnum, &smbs_out[0],P1DGEnum);
+			this->AddInput(SmbMassBalanceSemicEnum,&smb_out[0],P1DGEnum);
+			//this->AddInput(SmbMassBalanceSnowEnum,&smbs_out[0],P1DGEnum);
+			// saccu - accumulation of snow.
+			this->AddInput(SmbAccumulationEnum,&saccu_out[0],P1DGEnum);
+			// smelt 
+			this->AddInput(SmbMeltEnum,        &smelt_out[0],P1DGEnum);
+			this->AddInput(SmbRefreezeEnum,    &refr_out[0],P1DGEnum);
+			this->AddInput(SmbAlbedoEnum,      &albedo_out[0],P1DGEnum);
+			this->AddInput(SmbAlbedoSnowEnum,  &albedo_snow_out[0],P1DGEnum);
+			this->AddInput(SmbHSnowEnum,       &hsnow_out[0],P1DGEnum);
+			this->AddInput(SmbHIceEnum,        &hice_out[0],P1DGEnum);
+			this->AddInput(SmbSemicQmrEnum,    &qmr_out[0],P1DGEnum);
+			break;
+		case PentaEnum:
+			// TODO
+			break;
+		case TetraEnum:
+			// TODO
+			break;
+		default: _error_("Not implemented yet");
+	}
+
+	/*clean-up {{{*/
+	delete gauss;
+	xDelete<IssmDouble>(dailysnowfall);
+	xDelete<IssmDouble>(dailyrainfall);
+	xDelete<IssmDouble>(dailydlradiation);
+	xDelete<IssmDouble>(dailydsradiation);
+	xDelete<IssmDouble>(dailywindspeed);
+	xDelete<IssmDouble>(dailypressure);
+	xDelete<IssmDouble>(dailyairdensity);
+	xDelete<IssmDouble>(dailyairhumidity);
+	xDelete<IssmDouble>(dailypressure);
+	xDelete<IssmDouble>(dailytemperature);
+
+	/*for outputs*/
+	xDelete<IssmDouble>(tsurf_out);
+	xDelete<IssmDouble>(smb_out);
+	xDelete<IssmDouble>(smbi_out);
+	xDelete<IssmDouble>(smbs_out);
+	xDelete<IssmDouble>(saccu_out);
+	xDelete<IssmDouble>(smelt_out);
+	xDelete<IssmDouble>(refr_out);
+	xDelete<IssmDouble>(albedo_out);
+	xDelete<IssmDouble>(albedo_snow_out);
+	xDelete<IssmDouble>(hsnow_out);
+	xDelete<IssmDouble>(hice_out);
+	xDelete<IssmDouble>(qmr_out);
+
+	/*for inputs*/
+	xDelete<IssmDouble>(hsnow_in);
+	xDelete<IssmDouble>(hice_in);
+	xDelete<IssmDouble>(mask_in);
+	xDelete<IssmDouble>(Tamp_in);
+	xDelete<IssmDouble>(albedo_in);
+	xDelete<IssmDouble>(albedo_snow_in);
+	xDelete<IssmDouble>(tsurf_in);
+	xDelete<IssmDouble>(qmr_in);
+
+	/* for inputs:geometry */
+	xDelete<IssmDouble>(s);
+	xDelete<IssmDouble>(st);
+	xDelete<IssmDouble>(s0gcm);
+	/*}}}*/
 }
 /*}}}*/
@@ -4626,4 +5464,32 @@
 }
 /*}}}*/
+void       Element::SubglacialWaterPressure(int output_enum){/*{{{*/
+
+	bool ispwHydroArma;
+   int M;
+   int numvertices = this->GetNumberOfVertices();
+   IssmDouble p_water[numvertices];
+   IssmDouble* perturbationvalues = xNew<IssmDouble>(numvertices);
+   Gauss* gauss=this->NewGauss();
+   Friction* friction = new Friction(this);
+   /*Calculate subglacial water pressure*/
+   for(int i=0;i<numvertices;i++){
+         gauss->GaussVertex(i);
+         p_water[i] = friction->SubglacialWaterPressure(gauss);
+   }
+   /*Add perturbation in water pressure if HydrologyIsWaterPressureArmaEnum is true*/
+   this->parameters->FindParam(&ispwHydroArma,HydrologyIsWaterPressureArmaEnum);
+   if(ispwHydroArma){
+      this->GetInputListOnVertices(perturbationvalues,WaterPressureArmaPerturbationEnum);
+		for(int i=0;i<numvertices;i++) p_water[i] = p_water[i]+perturbationvalues[i];
+   }
+   /*Save*/
+   this->AddInput(output_enum,p_water,P1DGEnum);
+   /*Clean-up*/
+   delete gauss;
+   delete friction;
+   xDelete<IssmDouble>(perturbationvalues);
+
+}/*}}}*/
 void       Element::StrainRateESA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
 
@@ -4933,4 +5799,30 @@
 }
 /*}}}*/
+IssmDouble Element::TotalSmbMelt(IssmDouble* mask, bool scaled){/*{{{*/
+
+	/*Retrieve values of the mask defining the element: */
+	for(int i=0;i<this->GetNumberOfVertices();i++){
+		if(mask[this->vertices[i]->Sid()]<=0.){
+			return 0.;
+		}
+	}
+
+	/*Return: */
+	return this->TotalSmbMelt(scaled);
+}
+/*}}}*/
+IssmDouble Element::TotalSmbRefreeze(IssmDouble* mask, bool scaled){/*{{{*/
+
+	/*Retrieve values of the mask defining the element: */
+	for(int i=0;i<this->GetNumberOfVertices();i++){
+		if(mask[this->vertices[i]->Sid()]<=0.){
+			return 0.;
+		}
+	}
+
+	/*Return: */
+	return this->TotalSmbRefreeze(scaled);
+}
+/*}}}*/
 void       Element::TransformInvStiffnessMatrixCoord(ElementMatrix* Ke,int transformenum){/*{{{*/
 
Index: /issm/trunk/src/c/classes/Elements/Element.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Element.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Element.h	(revision 28013)
@@ -140,5 +140,5 @@
 		void               InputCreateP1FromConstant(Inputs* inputs,IoModel* iomodel,IssmDouble value,int vector_enum);
 		void               ControlInputCreate(IssmDouble* doublearray,IssmDouble* independents_min,IssmDouble* independents_max,Inputs*inputs,IoModel* iomodel,int M,int N,IssmDouble scale,int input_enum,int id);
-		void					 DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs* inputs,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code,int input_enum);
+		void					 DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs* inputs,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int input_enum);
 		void               InputUpdateFromConstant(IssmDouble constant, int name);
 		void               InputUpdateFromConstant(int constant, int name);
@@ -155,4 +155,5 @@
 		bool               IsOceanInElement();
 		bool               IsOceanOnlyInElement();
+		bool		   IsAllMinThicknessInElement();
 		bool               IsLandInElement();
 		void               Ismip6FloatingiceMeltingRate();
@@ -164,4 +165,5 @@
 		void               MigrateGroundingLine(IssmDouble* sheet_ungrounding);
 		void               MismipFloatingiceMeltingRate();
+		void               MonthlyFactorBasin(IssmDouble* monthlyfac,int enum_type); 
 		void               MonthlyPiecewiseLinearEffectBasin(int nummonthbreaks,IssmDouble* monthlyintercepts,IssmDouble* monthlytrends,IssmDouble* monthlydatebreaks,int enum_type); 
 		void               BeckmannGoosseFloatingiceMeltingRate();
@@ -175,4 +177,5 @@
 		void               PositiveDegreeDay(IssmDouble* pdds,IssmDouble* pds,IssmDouble signorm,bool ismungsm,bool issetpddfac);
 		void               PositiveDegreeDaySicopolis(bool isfirnwarming);
+		void               SmbDebrisEvatt();
 		void               RignotMeltParameterization();
 		void               ResultInterpolation(int* pinterpolation,int*nodesperelement,int* parray_size, int output_enum);
@@ -185,4 +188,5 @@
 		void               SetIntInput(Inputs* inputs,int enum_in,int value);
 		void               SmbSemic();
+		void               SmbSemicTransient();
 		int                Sid();
 		void               SmbGemb(IssmDouble timeinputs, int count);
@@ -195,7 +199,10 @@
 		void               StrainRateSSA1d(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input);
 		void               StressMaxPrincipalCreateInput(void);
+		void               SubglacialWaterPressure(int output_enum);
 		IssmDouble         TotalFloatingBmb(IssmDouble* mask, bool scaled);
 		IssmDouble         TotalGroundedBmb(IssmDouble* mask, bool scaled);
 		IssmDouble         TotalSmb(IssmDouble* mask, bool scaled);
+		IssmDouble         TotalSmbMelt(IssmDouble* mask, bool scaled);
+		IssmDouble         TotalSmbRefreeze(IssmDouble* mask, bool scaled);
 		void               TransformInvStiffnessMatrixCoord(ElementMatrix* Ke,int cs_enum);
 		void               TransformInvStiffnessMatrixCoord(ElementMatrix* Ke,Node** nodes,int numnodes,int cs_enum);
@@ -218,4 +225,5 @@
 		void               TransformStiffnessMatrixCoord(ElementMatrix* Ke,Node** nodes,int numnodes,int* cs_array);
 		void               TransformStiffnessMatrixCoord(ElementMatrix* Ke,int numnodes,int* transformenum_list){_error_("not implemented yet");};/*Tiling only*/
+		void               FrictionAlpha2CreateInput(void);
 		void               ViscousHeatingCreateInput(void);
 		void               ThermalToEnthalpy(IssmDouble * penthalpy,IssmDouble temperature,IssmDouble waterfraction,IssmDouble pressure);
@@ -234,5 +242,7 @@
 		virtual void		 BasalNodeIndices(int* pnumindices,int** pindices,int finiteelement){_error_("not implemented yet");};
 		virtual void       CalvingRateParameterization(void){_error_("not implemented yet");};
+		virtual void       CalvingRateCalvingMIP(void){_error_("not implemented yet");};
 		virtual void       CalvingRateVonmises(void){_error_("not implemented yet");};
+		virtual void       CalvingRateVonmisesAD(void){_error_("not implemented yet");};
 		virtual void       CalvingRateTest(void){_error_("not implemented yet");};
 		virtual void       CalvingCrevasseDepth(void){_error_("not implemented yet");};
@@ -381,4 +391,6 @@
 		virtual IssmDouble TotalGroundedBmb(bool scaled)=0;
 		virtual IssmDouble TotalSmb(bool scaled)=0;
+		virtual IssmDouble TotalSmbMelt(bool scaled)=0;
+		virtual IssmDouble TotalSmbRefreeze(bool scaled)=0;
 		virtual void       Update(Inputs* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finite_element)=0;
 		virtual void       UpdateConstraintsExtrudeFromBase(void)=0;
Index: /issm/trunk/src/c/classes/Elements/Penta.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Penta.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Penta.cpp	(revision 28013)
@@ -177,5 +177,13 @@
 			}
 		}
-		else _error_("not implemented yet");
+		else if(interpolation_enum==P0Enum){
+			Penta* penta=this;
+			for(;;){
+				penta->AddInput(input_enum,&values[0],interpolation_enum);
+				if (penta->IsOnSurface()) break;
+				penta=penta->GetUpperPenta(); _assert_(penta->Id()!=this->id);
+			}
+		}
+		else _error_("Interpolation "<<EnumToStringx(interpolation_enum)<<" not implemented yet");
 	}
 
@@ -709,5 +717,5 @@
 	IssmDouble  xyz_list[NUMVERTICES][3];
 	IssmDouble  viscosity;
-	IssmDouble  epsilon[6]; /* epsilon=[exx,eyy,exy];*/
+	IssmDouble  epsilon[6]; /* epsilon=[exx,eyy,ezz,exy,exz,eyz];*/
 	IssmDouble  tau_xx[NUMVERTICES];
 	IssmDouble	tau_yy[NUMVERTICES];
@@ -722,7 +730,7 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* vx_input=this->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=this->GetInput(VyEnum);             _assert_(vy_input);
-	Input* vz_input=this->GetInput(VzEnum);             _assert_(vz_input);
+	Input* vx_input=this->GetInput(VxEnum); _assert_(vx_input);
+	Input* vy_input=this->GetInput(VyEnum); _assert_(vy_input);
+	Input* vz_input=this->GetInput(VzEnum); _assert_(vz_input);
 
 	/* Start looping on the number of vertices: */
@@ -1443,103 +1451,15 @@
 IssmDouble Penta::GetIcefrontArea(){/*{{{*/
 
-	IssmDouble  bed[NUMVERTICES]; //basinId[NUMVERTICES];
-	IssmDouble	Haverage,frontarea;
-	IssmDouble  x1,y1,x2,y2,distance;
-	IssmDouble lsf[NUMVERTICES], Haux[NUMVERTICES], surfaces[NUMVERTICES], bases[NUMVERTICES];
-	int* indices=NULL;
-	IssmDouble* H=NULL;;
-	int nrfrontbed,numiceverts;
-
+	/*We need to be on base and cross the levelset*/
+	if(!IsZeroLevelset(MaskIceLevelsetEnum)) return 0;
 	if(!this->IsOnBase()) return 0;
-	if(!IsZeroLevelset(MaskIceLevelsetEnum)) return 0;
-
-	/*Retrieve all inputs and parameters*/
-	Element::GetInputListOnVertices(&bed[0],BedEnum);
-	Element::GetInputListOnVertices(&surfaces[0],SurfaceEnum);
-	Element::GetInputListOnVertices(&bases[0],BaseEnum);
-	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
-
-	nrfrontbed=0;
-	for(int i=0;i<NUMVERTICES2D;i++){
-		/*Find if bed<0*/
-		if(bed[i]<0.) nrfrontbed++;
-	}
-
-	if(nrfrontbed==3){
-		/*2. Find coordinates of where levelset crosses 0*/
-		int         numiceverts;
-		IssmDouble  s[2],x[2],y[2];
-		this->GetLevelsetIntersectionBase(&indices, &numiceverts,&s[0],MaskIceLevelsetEnum,0.);
-		_assert_(numiceverts);
-
-		/*3 Write coordinates*/
-		IssmDouble  xyz_list[NUMVERTICES][3];
-		::GetVerticesCoordinates(&xyz_list[0][0],this->vertices,NUMVERTICES);
-		int counter = 0;
-		if((numiceverts>0) && (numiceverts<NUMVERTICES2D)){
-			for(int i=0;i<numiceverts;i++){
-				for(int n=numiceverts;n<NUMVERTICES2D;n++){ // iterate over no-ice vertices
-					x[counter] = xyz_list[indices[i]][0]+s[counter]*(xyz_list[indices[n]][0]-xyz_list[indices[i]][0]);
-					y[counter] = xyz_list[indices[i]][1]+s[counter]*(xyz_list[indices[n]][1]-xyz_list[indices[i]][1]);
-					counter++;
-				}
-			}
-		}
-		else if(numiceverts==NUMVERTICES2D){ //NUMVERTICES ice vertices: calving front lies on element edge
-
-			for(int i=0;i<NUMVERTICES2D;i++){
-				if(lsf[indices[i]]==0.){
-					x[counter]=xyz_list[indices[i]][0];
-					y[counter]=xyz_list[indices[i]][1];
-					counter++;
-				}
-				if(counter==2) break;
-			}
-			if(counter==1){
-				/*We actually have only 1 vertex on levelset, write a single point as a segment*/
-				x[counter]=x[0];
-				y[counter]=y[0];
-				counter++;
-			}
-		}
-		else{
-			_error_("not sure what's going on here...");
-		}
-		x1=x[0]; y1=y[0]; x2=x[1]; y2=y[1];
-		distance=sqrt(pow((x1-x2),2)+pow((y1-y2),2));
-
-		int numthk=numiceverts+2;
-		H=xNew<IssmDouble>(numthk);
-		for(int iv=0;iv<NUMVERTICES2D;iv++) Haux[iv]=-bed[indices[iv]]; //sort bed in ice/noice
-
-		switch(numiceverts){
-			case 1: // average over triangle
-				H[0]=Haux[0];
-				H[1]=Haux[0]+s[0]*(Haux[1]-Haux[0]);
-				H[2]=Haux[0]+s[1]*(Haux[2]-Haux[0]);
-				Haverage=(H[1]+H[2])/2;
-				break;
-			case 2: // average over quadrangle
-				H[0]=Haux[0];
-				H[1]=Haux[1];
-				H[2]=Haux[0]+s[0]*(Haux[2]-Haux[0]);
-				H[3]=Haux[1]+s[1]*(Haux[2]-Haux[1]);
-				Haverage=(H[2]+H[3])/2;
-				break;
-			default:
-				_error_("Number of ice covered vertices wrong in Tria::GetIceFrontArea(void)");
-				break;
-		}
-		frontarea=distance*Haverage;
-	}
-	else return 0;
-
-	xDelete<int>(indices);
-	xDelete<IssmDouble>(H);
-
-	_assert_(frontarea>0);
+
+	/*Spawn Tria element from the base of the Penta: */
+	Tria* tria=(Tria*)SpawnTria(0,1,2);
+	IssmDouble frontarea = tria->GetIcefrontArea();
+	delete tria->material; delete tria;
+
 	return frontarea;
-}
-/*}}}*/
+}/*}}}*/
 void       Penta::GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum){/*{{{*/
 
@@ -3474,4 +3394,19 @@
 }
 /*}}}*/
+void       Penta::SetElementInput(int enum_in,IssmDouble value,int type){/*{{{*/
+
+	if(type==P0Enum){
+		this->inputs->SetPentaInput(enum_in,P0Enum,this->lid,value);
+	}
+	else if(type==P1Enum){
+		IssmDouble values[6]; 
+		for(int i=0;i<6;i++)values[i]=value;
+		int lidlist[6];
+		this->GetVerticesLidList(&lidlist[0]);
+		this->inputs->SetPentaInput(enum_in,P1Enum,6,&lidlist[0],&values[0]);
+	}
+	else _error_("interpolation type not supported yet");
+}
+/*}}}*/
 void       Penta::SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index,int offset, int M, int N){/*{{{*/
 
@@ -3490,7 +3425,4 @@
 		if(!IsOnBase()) return;
 	}
-
-	/*Get out if this is not an element input*/
-	if(!IsInputEnum(control_enum)) return;
 
 	/*Prepare index list*/
@@ -4353,4 +4285,140 @@
 	/*Return: */
 	return Total_Smb;
+}
+/*}}}*/
+IssmDouble Penta::TotalSmbMelt(bool scaled){/*{{{*/
+
+	/*The smbmelt[Gt yr-1] of one element is area[m2] * smb [ m ice yr^-1] * rho_ice [kg m-3] / 1e+10^12 */
+	IssmDouble base,smbmelt,rho_ice,scalefactor;
+	IssmDouble Total_SmbMelt=0;
+	IssmDouble lsf[NUMVERTICES];
+	IssmDouble xyz_list[NUMVERTICES][3];
+
+	/*Get material parameters :*/
+	rho_ice=FindParam(MaterialsRhoIceEnum);
+
+	if(!IsIceInElement() || !IsOnSurface()) return 0.;
+
+	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
+
+	/*First calculate the area of the base (cross section triangle)
+	 * http://en.wikipedia.org/wiki/Triangle
+	 * base = 1/2 abs((xA-xC)(yB-yA)-(xA-xB)(yC-yA))*/
+	base = 1./2. * fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));
+
+	/*Now get the average SMB over the element*/
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+   if(lsf[0]*lsf[1]<=0 || lsf[0]*lsf[2]<=0 || lsf[1]*lsf[2]<=0){
+		/*Partially ice-covered element*/
+		bool mainlyice;
+      int point;
+      IssmDouble* smbmelt_vertices = xNew<IssmDouble>(NUMVERTICES);
+		IssmDouble  weights[NUMVERTICES2D];
+		IssmDouble  lsf2d[NUMVERTICES2D];
+      IssmDouble f1,f2,phi;
+      Element::GetInputListOnVertices(&smbmelt_vertices[0],SmbMeltEnum);
+		for(int i=0;i<NUMVERTICES2D;i++) lsf2d[i] = lsf[i];
+		GetFractionGeometry2D(weights,&phi,&point,&f1,&f2,&mainlyice,lsf2d);
+		smbmelt = 0.0;
+		for(int i=0;i<NUMVERTICES2D;i++) smbmelt += weights[i]*smbmelt_vertices[i];
+
+		if(scaled==true){
+         IssmDouble* scalefactor_vertices   = xNew<IssmDouble>(NUMVERTICES);
+         Element::GetInputListOnVertices(&scalefactor_vertices[0],MeshScaleFactorEnum);
+         /*Compute loop only over lower vertices: i<NUMVERTICES2D*/
+         scalefactor = 0.0;
+         for(int i=0;i<NUMVERTICES2D;i++) scalefactor += weights[i]/phi*scalefactor_vertices[i];
+         xDelete<IssmDouble>(scalefactor_vertices);
+		}
+		else scalefactor = 1.0;
+
+		/*Cleanup*/
+      xDelete<IssmDouble>(smbmelt_vertices);
+	}
+
+	else{
+		/*Fully ice-covered element*/
+		Input* smbmelt_input = this->GetInput(SmbMeltEnum); _assert_(smbmelt_input);
+		smbmelt_input->GetInputAverage(&smbmelt);
+
+		if(scaled==true){
+			Input* scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+			scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
+		}
+		else scalefactor=1.0;
+	}
+
+	Total_SmbMelt=rho_ice*base*smbmelt*scalefactor;// smbmelt on element in kg s-1
+
+	/*Return: */
+	return Total_SmbMelt;
+}
+/*}}}*/
+IssmDouble Penta::TotalSmbRefreeze(bool scaled){/*{{{*/
+
+	/*The smbrefreeze[Gt yr-1] of one element is area[m2] * smb [ m ice yr^-1] * rho_ice [kg m-3] / 1e+10^12 */
+	IssmDouble base,smbrefreeze,rho_ice,scalefactor;
+	IssmDouble Total_SmbRefreeze=0;
+	IssmDouble lsf[NUMVERTICES];
+	IssmDouble xyz_list[NUMVERTICES][3];
+
+	/*Get material parameters :*/
+	rho_ice=FindParam(MaterialsRhoIceEnum);
+
+	if(!IsIceInElement() || !IsOnSurface()) return 0.;
+
+	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
+
+	/*First calculate the area of the base (cross section triangle)
+	 * http://en.wikipedia.org/wiki/Triangle
+	 * base = 1/2 abs((xA-xC)(yB-yA)-(xA-xB)(yC-yA))*/
+	base = 1./2. * fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));
+
+	/*Now get the average SMB over the element*/
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+   if(lsf[0]*lsf[1]<=0 || lsf[0]*lsf[2]<=0 || lsf[1]*lsf[2]<=0){
+		/*Partially ice-covered element*/
+		bool mainlyice;
+      int point;
+      IssmDouble* smbrefreeze_vertices = xNew<IssmDouble>(NUMVERTICES);
+		IssmDouble  weights[NUMVERTICES2D];
+		IssmDouble  lsf2d[NUMVERTICES2D];
+      IssmDouble f1,f2,phi;
+      Element::GetInputListOnVertices(&smbrefreeze_vertices[0],SmbRefreezeEnum);
+		for(int i=0;i<NUMVERTICES2D;i++) lsf2d[i] = lsf[i];
+		GetFractionGeometry2D(weights,&phi,&point,&f1,&f2,&mainlyice,lsf2d);
+		smbrefreeze = 0.0;
+		for(int i=0;i<NUMVERTICES2D;i++) smbrefreeze += weights[i]*smbrefreeze_vertices[i];
+
+		if(scaled==true){
+         IssmDouble* scalefactor_vertices   = xNew<IssmDouble>(NUMVERTICES);
+         Element::GetInputListOnVertices(&scalefactor_vertices[0],MeshScaleFactorEnum);
+         /*Compute loop only over lower vertices: i<NUMVERTICES2D*/
+         scalefactor = 0.0;
+         for(int i=0;i<NUMVERTICES2D;i++) scalefactor += weights[i]/phi*scalefactor_vertices[i];
+         xDelete<IssmDouble>(scalefactor_vertices);
+		}
+		else scalefactor = 1.0;
+
+		/*Cleanup*/
+      xDelete<IssmDouble>(smbrefreeze_vertices);
+	}
+
+	else{
+		/*Fully ice-covered element*/
+		Input* smbrefreeze_input = this->GetInput(SmbRefreezeEnum); _assert_(smbrefreeze_input);
+		smbrefreeze_input->GetInputAverage(&smbrefreeze);
+
+		if(scaled==true){
+			Input* scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+			scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
+		}
+		else scalefactor=1.0;
+	}
+
+	Total_SmbRefreeze=rho_ice*base*smbrefreeze*scalefactor;// smbrefreeze on element in kg s-1
+
+	/*Return: */
+	return Total_SmbRefreeze;
 }
 /*}}}*/
Index: /issm/trunk/src/c/classes/Elements/Penta.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Penta.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Penta.h	(revision 28013)
@@ -64,4 +64,5 @@
 		void           ComputeSigmaNN(){_error_("not implemented yet");};
 		void           ComputeStressTensor();
+		//void           ComputeMeanEla(IssmDouble* paltitude, int* pcounter);
 		void           Configure(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters,Inputs* inputsin);
 		void           ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int M,int N,int interp);
@@ -175,5 +176,5 @@
 		void           ResetHooks();
 		void           SetElementInput(int enum_in,IssmDouble value);
-		void           SetElementInput(int enum_in,IssmDouble value,int type){_error_("not implemented yet");};
+		void           SetElementInput(int enum_in,IssmDouble value,int type);
 		void           SetElementInput(Inputs* inputs,int enum_in,IssmDouble value);
 		void           SetElementInput(Inputs* inputs,int enum_in,IssmDouble value,int type){_error_("not implemented yet");};
@@ -200,4 +201,6 @@
 		IssmDouble     TotalGroundedBmb(bool scaled);
 		IssmDouble     TotalSmb(bool scaled);
+		IssmDouble     TotalSmbMelt(bool scaled);
+		IssmDouble     TotalSmbRefreeze(bool scaled);
 		void           Update(Inputs* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		void           UpdateConstraintsExtrudeFromBase(void);
Index: /issm/trunk/src/c/classes/Elements/PentaRef.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/PentaRef.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/PentaRef.cpp	(revision 28013)
@@ -307,4 +307,5 @@
 			basis[6]=27.*gauss->coord1*gauss->coord2*gauss->coord3*(1.+zeta)*(1.-zeta);
 			return;
+			#ifndef _HAVE_AD_
 		case P2xP1Enum:
 			/*Corner nodes*/
@@ -459,4 +460,5 @@
 			basis[14]=gauss->coord3*(-8./3.)*(zeta-1.0)*zeta*(zeta+0.5)*(zeta+1.);
 			return;
+			#endif
 		default:
 			_error_("Element type "<<EnumToStringx(finiteelement)<<" not supported yet");
@@ -571,4 +573,5 @@
 			dbasis[NUMNODESP1b*2+6] = -54*gauss->coord1*gauss->coord2*gauss->coord3*zeta;
 			return;
+			#ifndef _HAVE_AD_
 		case P2xP1Enum:
 			/*Nodal function 1*/
@@ -1057,4 +1060,5 @@
 
 			return;
+			#endif
 		default:
 			_error_("Element type "<<EnumToStringx(finiteelement)<<" not supported yet");
Index: /issm/trunk/src/c/classes/Elements/Seg.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Seg.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Seg.h	(revision 28013)
@@ -158,4 +158,6 @@
 		IssmDouble  TotalGroundedBmb(bool scaled){_error_("not implemented yet");};
 		IssmDouble  TotalSmb(bool scaled){_error_("not implemented yet");};
+		IssmDouble  TotalSmbMelt(bool scaled){_error_("not implemented yet");};
+		IssmDouble  TotalSmbRefreeze(bool scaled){_error_("not implemented yet");};
 		void        Update(Inputs* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement){_error_("not implemented yet");};
 		void        UpdateConstraintsExtrudeFromBase(){_error_("not implemented");};
Index: /issm/trunk/src/c/classes/Elements/Tetra.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tetra.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Tetra.h	(revision 28013)
@@ -167,4 +167,6 @@
 		IssmDouble  TotalGroundedBmb(bool scaled){_error_("not implemented yet");};
 		IssmDouble  TotalSmb(bool scaled){_error_("not implemented yet");};
+		IssmDouble  TotalSmbMelt(bool scaled){_error_("not implemented yet");};
+		IssmDouble  TotalSmbRefreeze(bool scaled){_error_("not implemented yet");};
 		void        Update(Inputs* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		void        UpdateConstraintsExtrudeFromBase(){_error_("not implemented");};
Index: /issm/trunk/src/c/classes/Elements/Tria.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tria.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Tria.cpp	(revision 28013)
@@ -29,5 +29,5 @@
 #define NUMVERTICES   3
 #define NUMVERTICES1D 2
-//#define MICI          1 //1 = DeConto & Pollard, 2 = DOMINOS
+//#define MICI          0 //1 = DeConto & Pollard, 2 = Anna Crawford DOMINOS
 
 /*Constructors/destructor/copy*/
@@ -345,4 +345,68 @@
 		smax_fl_input->GetInputValue(&sigma_max_floating,&gauss);
 		smax_gr_input->GetInputValue(&sigma_max_grounded,&gauss);
+		sl_input->GetInputValue(&sealevel,&gauss);
+
+		/*Tensile stress threshold*/
+		if(groundedice<0)
+		 sigma_max = sigma_max_floating;
+		else
+		 sigma_max = sigma_max_grounded;
+
+		/*Assign values*/
+		if(bed>sealevel){
+			calvingrate[iv] = 0.;
+		}
+		else{
+			calvingrate[iv] = sqrt(vx*vx+vy*vy)*sigma_vm/sigma_max;
+		}
+	}
+
+	/*Add input*/
+	this->AddInput(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
+   this->CalvingRateToVector();
+}
+/*}}}*/
+void       Tria::CalvingRateVonmisesAD(){/*{{{*/
+
+	/*First, compute Von Mises Stress*/
+	this->ComputeSigmaVM();
+
+	/*Now compute calving rate*/
+	IssmDouble  calvingrate[NUMVERTICES];
+	IssmDouble  sigma_vm,vx,vy;
+	IssmDouble  sigma_max,sigma_max_floating,sigma_max_grounded,n;
+	IssmDouble  groundedice,bed,sealevel;
+	int M;
+	int basinid;
+	IssmDouble* sigma_max_floating_basin=NULL;
+	IssmDouble* sigma_max_grounded_basin=NULL;
+
+	/*Retrieve all inputs and parameters we will need*/
+	Input* vx_input       = this->GetInput(VxEnum); _assert_(vx_input);
+	Input* vy_input       = this->GetInput(VyEnum); _assert_(vy_input);
+	Input* gr_input       = this->GetInput(MaskOceanLevelsetEnum); _assert_(gr_input);
+	Input* bs_input       = this->GetInput(BaseEnum);                    _assert_(bs_input);
+	Input* sl_input       = this->GetInput(SealevelEnum); _assert_(sl_input);
+	Input* sigma_vm_input = this->GetInput(SigmaVMEnum); _assert_(sigma_vm_input);
+
+	this->Element::GetInputValue(&basinid,CalvingBasinIdEnum);
+
+	parameters->FindParam(&sigma_max_floating_basin,&M,CalvingADStressThresholdFloatingiceEnum);
+	parameters->FindParam(&sigma_max_grounded_basin,&M,CalvingADStressThresholdGroundediceEnum);
+
+	sigma_max_floating = sigma_max_floating_basin[basinid];
+	sigma_max_grounded = sigma_max_grounded_basin[basinid];
+
+	/* Start looping on the number of vertices: */
+	GaussTria gauss;
+	for(int iv=0;iv<NUMVERTICES;iv++){
+		gauss.GaussVertex(iv);
+
+		/*Get velocity components and thickness*/
+		sigma_vm_input->GetInputValue(&sigma_vm,&gauss);
+		vx_input->GetInputValue(&vx,&gauss);
+		vy_input->GetInputValue(&vy,&gauss);
+		gr_input->GetInputValue(&groundedice,&gauss);
+		bs_input->GetInputValue(&bed,&gauss);
 		sl_input->GetInputValue(&sealevel,&gauss);
 
@@ -1054,4 +1118,84 @@
 	/*Clean up and return*/
 	delete gauss;
+}
+/*}}}*/
+void       Tria::CalvingRateCalvingMIP(){/*{{{*/
+
+	IssmDouble  calvingrate[NUMVERTICES];
+	IssmDouble  calvingratex[NUMVERTICES];
+	IssmDouble  calvingratey[NUMVERTICES];
+	int			experiment = 1;  /* exp:1 by default */
+	int         dim, domaintype;
+	IssmDouble	vx, vy, vel, c, wrate;
+	IssmDouble  time, groundedice, yts;
+
+	/*Get problem dimension and whether there is moving front or not*/
+	this->FindParam(&domaintype,DomainTypeEnum);
+	this->FindParam(&time,TimeEnum);
+	this->FindParam(&yts,ConstantsYtsEnum);
+
+	switch(domaintype){
+		case Domain2DverticalEnum:   dim = 1; break;
+		case Domain2DhorizontalEnum: dim = 2; break;
+		case Domain3DEnum:           dim = 2; break;
+		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
+	}
+	if(dim==1) _error_("not implemented in 1D...");
+
+	/*Retrieve all inputs and parameters we will need*/
+	Input *vx_input      = this->GetInput(VxEnum);                                _assert_(vx_input);
+	Input *vy_input      = this->GetInput(VyEnum);                                _assert_(vy_input);
+	Input *wrate_input   = this->GetInput(CalvingAblationrateEnum);               _assert_(wrate_input); 
+	Input* gr_input      = this->GetInput(MaskOceanLevelsetEnum);						_assert_(gr_input);
+
+	/* Use which experiment: use existing Enum */
+	this->FindParam(&experiment, CalvingUseParamEnum);
+
+	/* Start looping on the number of vertices: */
+	GaussTria gauss;
+	for(int iv=0;iv<NUMVERTICES;iv++){
+		gauss.GaussVertex(iv);
+
+		/*Get velocity components */
+		vx_input->GetInputValue(&vx,&gauss);
+		vy_input->GetInputValue(&vy,&gauss);
+		vel=sqrt(vx*vx+vy*vy)+1.e-14;
+
+		/* no calving for grounded ice in EXP4 */
+		gr_input->GetInputValue(&groundedice,&gauss);
+
+		switch (experiment) { 
+			case 1:
+			case 3:
+				/* Exp 1 and 3: set c=v-wrate, wrate=0, so that w=0 */
+				wrate = 0.0;
+				break;
+			case 2:
+				/* Exp 2: set c=v-wrate(given)*/
+				wrate_input->GetInputValue(&wrate,&gauss);
+				break;
+			case 4:
+				/* Exp 4: set c=v-wrate(given), for the first 500 years, then c=0 for the second 500 years*/
+				if((groundedice<0) && (time<=500.0*yts)) {
+				//	wrate_input->GetInputValue(&wrate,&gauss);
+					wrate = -750*sin(2.0*M_PI*time/yts/1000)/yts;  // m/a -> m/s
+				}
+				else {
+					/* no calving on the grounded ice*/
+					wrate = vel;
+				}
+				break;
+			default:
+				_error_("The experiment is not supported yet!");
+		}
+
+		calvingrate[iv] = vel - wrate;
+		calvingratex[iv] = vx - wrate*vx/vel;
+		calvingratey[iv] = vy - wrate*vy/vel;
+	}
+	/*Add input*/
+	this->AddInput(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
+	this->AddInput(CalvingratexEnum,&calvingratex[0],P1DGEnum);
+	this->AddInput(CalvingrateyEnum,&calvingratey[0],P1DGEnum);
 }
 /*}}}*/
@@ -2628,13 +2772,13 @@
 IssmDouble Tria::GetIcefrontArea(){/*{{{*/
 
-	IssmDouble  bed[NUMVERTICES]; //basinId[NUMVERTICES];
+	IssmDouble  bed[NUMVERTICES];
 	IssmDouble	Haverage,frontarea;
 	IssmDouble  x1,y1,x2,y2,distance;
 	IssmDouble lsf[NUMVERTICES], Haux[NUMVERTICES], surfaces[NUMVERTICES], bases[NUMVERTICES];
 	int* indices=NULL;
-	IssmDouble* H=NULL;;
-	int nrfrontbed,numiceverts;
-
+
+	/*Return if no ice front present*/
 	if(!IsZeroLevelset(MaskIceLevelsetEnum)) return 0;
+	//if(!this->IsIcefront()) return 0.;
 
 	/*Retrieve all inputs and parameters*/
@@ -2644,81 +2788,87 @@
 	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
 
-	nrfrontbed=0;
-	for(int i=0;i<NUMVERTICES;i++){
-		/*Find if bed<0*/
-		if(bed[i]<0.) nrfrontbed++;
-	}
-
-	if(nrfrontbed==3){
-		/*2. Find coordinates of where levelset crosses 0*/
-		int         numiceverts;
-		IssmDouble  s[2],x[2],y[2];
-		this->GetLevelsetIntersection(&indices, &numiceverts,&s[0],MaskIceLevelsetEnum,0.);
-		_assert_(numiceverts);
-
-		/*3 Write coordinates*/
-		IssmDouble  xyz_list[NUMVERTICES][3];
-		::GetVerticesCoordinates(&xyz_list[0][0],this->vertices,NUMVERTICES);
-		int counter = 0;
-		if((numiceverts>0) && (numiceverts<NUMVERTICES)){
-			for(int i=0;i<numiceverts;i++){
-				for(int n=numiceverts;n<NUMVERTICES;n++){ // iterate over no-ice vertices
-					x[counter] = xyz_list[indices[i]][0]+s[counter]*(xyz_list[indices[n]][0]-xyz_list[indices[i]][0]);
-					y[counter] = xyz_list[indices[i]][1]+s[counter]*(xyz_list[indices[n]][1]-xyz_list[indices[i]][1]);
-					counter++;
+	/*Only continue if all 3 vertices are below sea level*/
+	for(int i=0;i<NUMVERTICES;i++) if(bed[i]>=0.) return 0.;
+
+	/*2. Find coordinates of where levelset crosses 0*/
+	int         numiceverts;
+	IssmDouble  s[2],x[2],y[2];
+	this->GetLevelsetIntersection(&indices, &numiceverts, &s[0],MaskIceLevelsetEnum,0.);
+	_assert_(numiceverts);
+	if(numiceverts>2){
+		Input* ls_input = this->GetInput(MaskIceLevelsetEnum);
+		ls_input->Echo();
+	}
+
+	/*3 Write coordinates*/
+	IssmDouble  xyz_list[NUMVERTICES][3];
+	::GetVerticesCoordinates(&xyz_list[0][0],this->vertices,NUMVERTICES);
+	int counter = 0;
+	if((numiceverts>0) && (numiceverts<NUMVERTICES)){
+		for(int i=0;i<numiceverts;i++){
+			for(int n=numiceverts;n<NUMVERTICES;n++){ // iterate over no-ice vertices
+				x[counter] = xyz_list[indices[i]][0]+s[counter]*(xyz_list[indices[n]][0]-xyz_list[indices[i]][0]);
+				y[counter] = xyz_list[indices[i]][1]+s[counter]*(xyz_list[indices[n]][1]-xyz_list[indices[i]][1]);
+				counter++;
+			}
+		}
+	}
+	else if(numiceverts==NUMVERTICES){ //NUMVERTICES ice vertices: calving front lies on element edge
+
+		for(int i=0;i<NUMVERTICES;i++){
+			if(lsf[indices[i]]==0.){
+				x[counter]=xyz_list[indices[i]][0];
+				y[counter]=xyz_list[indices[i]][1];
+				counter++;
+			}
+			if(counter==2) break;
+		}
+		if(counter==1){
+			/*We actually have only 1 vertex on levelset, write a single point as a segment*/
+			x[counter]=x[0];
+			y[counter]=y[0];
+			counter++;
+		}
+	}
+	else{
+		_error_("not sure what's going on here...");
+	}
+	x1=x[0]; y1=y[0]; x2=x[1]; y2=y[1];
+	distance=sqrt(pow((x1-x2),2)+pow((y1-y2),2));
+	if(distance<1e-3) return 0.;
+
+	IssmDouble H[4];
+	for(int iv=0;iv<NUMVERTICES;iv++) Haux[iv]=-bed[indices[iv]]; //sort bed in ice/noice
+	xDelete<int>(indices);
+
+	switch(numiceverts){
+		case 1: // average over triangle
+			H[0]=Haux[0];
+			H[1]=Haux[0]+s[0]*(Haux[1]-Haux[0]);
+			H[2]=Haux[0]+s[1]*(Haux[2]-Haux[0]);
+			Haverage=(H[1]+H[2])/2;
+			break;
+		case 2: // average over quadrangle
+			H[0]=Haux[0];
+			H[1]=Haux[1];
+			H[2]=Haux[0]+s[0]*(Haux[2]-Haux[0]);
+			H[3]=Haux[1]+s[1]*(Haux[2]-Haux[1]);
+			Haverage=(H[2]+H[3])/2;
+			break;
+		case 3:
+			if(counter==1) distance = 0; //front has 0 width on this element because levelset is 0 at a single vertex
+			else if(counter==2){ //two vertices with levelset=0: averaging ice front depth over both
+				Haverage = 0;
+				for(int i=0;i<NUMVERTICES;i++){
+					if(lsf[indices[i]]==0.) Haverage -= Haux[indices[i]]/2;
+					if(Haverage<Haux[indices[i]]/2-1e-3) break; //done with the two vertices
 				}
 			}
-		}
-		else if(numiceverts==NUMVERTICES){ //NUMVERTICES ice vertices: calving front lies on element edge
-
-			for(int i=0;i<NUMVERTICES;i++){
-				if(lsf[indices[i]]==0.){
-					x[counter]=xyz_list[indices[i]][0];
-					y[counter]=xyz_list[indices[i]][1];
-					counter++;
-				}
-				if(counter==2) break;
-			}
-			if(counter==1){
-				/*We actually have only 1 vertex on levelset, write a single point as a segment*/
-				x[counter]=x[0];
-				y[counter]=y[0];
-				counter++;
-			}
-		}
-		else{
-			_error_("not sure what's going on here...");
-		}
-		x1=x[0]; y1=y[0]; x2=x[1]; y2=y[1];
-		distance=sqrt(pow((x1-x2),2)+pow((y1-y2),2));
-
-		int numthk=numiceverts+2;
-		H=xNew<IssmDouble>(numthk);
-		for(int iv=0;iv<NUMVERTICES;iv++) Haux[iv]=-bed[indices[iv]]; //sort bed in ice/noice
-
-		switch(numiceverts){
-			case 1: // average over triangle
-				H[0]=Haux[0];
-				H[1]=Haux[0]+s[0]*(Haux[1]-Haux[0]);
-				H[2]=Haux[0]+s[1]*(Haux[2]-Haux[0]);
-				Haverage=(H[1]+H[2])/2;
-				break;
-			case 2: // average over quadrangle
-				H[0]=Haux[0];
-				H[1]=Haux[1];
-				H[2]=Haux[0]+s[0]*(Haux[2]-Haux[0]);
-				H[3]=Haux[1]+s[1]*(Haux[2]-Haux[1]);
-				Haverage=(H[2]+H[3])/2;
-				break;
-			default:
-				_error_("Number of ice covered vertices wrong in Tria::GetIceFrontArea(void)");
-				break;
-		}
-		frontarea=distance*Haverage;
-	}
-	else return 0;
-
-	xDelete<int>(indices);
-	xDelete<IssmDouble>(H);
+			break;
+		default:
+			_error_("Number of ice covered vertices wrong in Tria::GetIceFrontArea(void)");
+			break;
+	}
+	frontarea=distance*Haverage;
 
 	_assert_(frontarea>0);
@@ -4414,8 +4564,10 @@
 		case DefaultCalvingEnum:
 		case CalvingVonmisesEnum:
+		case CalvingVonmisesADEnum:
 		case CalvingLevermannEnum:
 		case CalvingPollardEnum:
 		case CalvingTestEnum:
 		case CalvingParameterizationEnum:
+		case CalvingCalvingMIPEnum:
 			calvingratex_input=this->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
 			calvingratey_input=this->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
@@ -4458,8 +4610,10 @@
 			case DefaultCalvingEnum:
 			case CalvingVonmisesEnum:
+			case CalvingVonmisesADEnum:
 			case CalvingTestEnum:
 			case CalvingParameterizationEnum:
 			case CalvingLevermannEnum:
 			case CalvingPollardEnum:
+			case CalvingCalvingMIPEnum:
 				calvingratex_input->GetInputValue(&c[0],&gauss);
 				calvingratey_input->GetInputValue(&c[1],&gauss);
@@ -4575,6 +4729,11 @@
 
 		/*Do we assume that the calving front does not move if MICI is not engaged?*/
-		movingfrontvx[iv] = 0.;
-		movingfrontvy[iv] = 0.;
+		bool regrowth = false;
+		bool apply_as_retreat = true;
+		if(!regrowth){
+			movingfrontvx[iv] = 0.;
+			movingfrontvy[iv] = 0.;
+		}
+
 		//movingfrontvx[iv] = -2000./(365*24*3600.)*dlsf[0]/norm_dlsf;
 		//movingfrontvy[iv] = -2000./(365*24*3600.)*dlsf[1]/norm_dlsf;
@@ -4588,4 +4747,8 @@
 		}
 		else if (MICI==2 && Hc>135. && bed<0. && fabs(ls)<100.e3){ // Crawford et all
+
+			/*if 1: RETREAT rate
+			 *if 0: calving rate*/
+			if(0) v[0]=0.; v[1]=0.;
 
 			/*5C Bn (worst case scenario)*/
@@ -4597,4 +4760,10 @@
 			movingfrontvx[iv] = v[0] -C*dlsf[0]/norm_dlsf;
 			movingfrontvy[iv] = v[1] -C*dlsf[1]/norm_dlsf;
+
+			/*disable regrowth if calving rate is too low*/
+			if(!regrowth && C<vel){
+				movingfrontvx[iv] = 0.;
+				movingfrontvy[iv] = 0.;
+			}
 		}
 	}
@@ -4966,7 +5135,4 @@
 		}
 	}
-
-	/*Get out if this is not an element input*/
-	if(!IsInputEnum(control_enum)) return;
 
 	/*Get list of ids for this element and this control*/
@@ -5721,4 +5887,136 @@
 	/*Return: */
 	return Total_Smb;
+}
+/*}}}*/
+IssmDouble Tria::TotalSmbMelt(bool scaled){/*{{{*/
+
+	/*The smbmelt[kg yr-1] of one element is area[m2] * smbmelt [kg m^-2 yr^-1]*/
+	IssmDouble base,smbmelt,rho_ice,scalefactor;
+	IssmDouble Total_Melt=0;
+	IssmDouble lsf[NUMVERTICES];
+	IssmDouble xyz_list[NUMVERTICES][3];
+
+	/*Get material parameters :*/
+	rho_ice=FindParam(MaterialsRhoIceEnum);
+
+   if(!IsIceInElement())return 0;
+
+	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
+
+	/*First calculate the area of the base (cross section triangle)
+	 * http://en.wikipedia.org/wiki/Triangle
+	 * base = 1/2 abs((xA-xC)(yB-yA)-(xA-xB)(yC-yA))*/
+	base = 1./2. * fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));	// area of element in m2
+	
+	/*Now get the average SMB over the element*/
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+	if(lsf[0]*lsf[1]<=0 || lsf[0]*lsf[2]<=0 || lsf[1]*lsf[2]<=0){
+		/*Partially ice-covered element*/
+		bool mainlyice;
+      int point;
+      IssmDouble* weights       = xNew<IssmDouble>(NUMVERTICES);
+      IssmDouble* smbmelt_vertices  = xNew<IssmDouble>(NUMVERTICES);
+      IssmDouble f1,f2,phi;
+		
+		Element::GetInputListOnVertices(&smbmelt_vertices[0],SmbMeltEnum);
+		GetFractionGeometry(weights,&phi,&point,&f1,&f2,&mainlyice,lsf);
+		smbmelt = 0.0;
+		for(int i=0;i<NUMVERTICES;i++) smbmelt += weights[i]*smbmelt_vertices[i];
+	
+		if(scaled==true){
+         IssmDouble* scalefactor_vertices = xNew<IssmDouble>(NUMVERTICES);
+         Element::GetInputListOnVertices(&scalefactor_vertices[0],MeshScaleFactorEnum);
+         scalefactor = 0.0;
+         for(int i=0;i<NUMVERTICES;i++) scalefactor += weights[i]/phi*scalefactor_vertices[i];
+         xDelete<IssmDouble>(scalefactor_vertices);
+      }
+		else scalefactor = 1.0;
+
+		/*Cleanup*/
+      xDelete<IssmDouble>(weights);
+      xDelete<IssmDouble>(smbmelt_vertices);
+	}
+	else{
+		/*Fully ice-covered element*/
+		Input* smbmelt_input = this->GetInput(SmbMeltEnum); _assert_(smbmelt_input);
+		smbmelt_input->GetInputAverage(&smbmelt);   // average smbmelt on element in m ice s-1
+
+		if(scaled==true){
+			Input* scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+			scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
+		}
+		else scalefactor=1.0;
+	}
+	
+   Total_Melt=rho_ice*base*smbmelt*scalefactor;	// smbmelt on element in kg s-1
+
+	/*Return: */
+	return Total_Melt;
+}
+/*}}}*/
+IssmDouble Tria::TotalSmbRefreeze(bool scaled){/*{{{*/
+
+	/*The smb[kg yr-1] of one element is area[m2] * smb [kg m^-2 yr^-1]*/
+	IssmDouble base,smbrefreeze,rho_ice,scalefactor;
+	IssmDouble Total_Refreeze=0;
+	IssmDouble lsf[NUMVERTICES];
+	IssmDouble xyz_list[NUMVERTICES][3];
+
+	/*Get material parameters :*/
+	rho_ice=FindParam(MaterialsRhoIceEnum);
+
+   if(!IsIceInElement())return 0;
+
+	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
+
+	/*First calculate the area of the base (cross section triangle)
+	 * http://en.wikipedia.org/wiki/Triangle
+	 * base = 1/2 abs((xA-xC)(yB-yA)-(xA-xB)(yC-yA))*/
+	base = 1./2. * fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));	// area of element in m2
+	
+	/*Now get the average SMB over the element*/
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+	if(lsf[0]*lsf[1]<=0 || lsf[0]*lsf[2]<=0 || lsf[1]*lsf[2]<=0){
+		/*Partially ice-covered element*/
+		bool mainlyice;
+      int point;
+      IssmDouble* weights       = xNew<IssmDouble>(NUMVERTICES);
+      IssmDouble* smbrefreeze_vertices  = xNew<IssmDouble>(NUMVERTICES);
+      IssmDouble f1,f2,phi;
+		
+		Element::GetInputListOnVertices(&smbrefreeze_vertices[0],SmbRefreezeEnum);
+		GetFractionGeometry(weights,&phi,&point,&f1,&f2,&mainlyice,lsf);
+		smbrefreeze = 0.0;
+		for(int i=0;i<NUMVERTICES;i++) smbrefreeze += weights[i]*smbrefreeze_vertices[i];
+	
+		if(scaled==true){
+         IssmDouble* scalefactor_vertices = xNew<IssmDouble>(NUMVERTICES);
+         Element::GetInputListOnVertices(&scalefactor_vertices[0],MeshScaleFactorEnum);
+         scalefactor = 0.0;
+         for(int i=0;i<NUMVERTICES;i++) scalefactor += weights[i]/phi*scalefactor_vertices[i];
+         xDelete<IssmDouble>(scalefactor_vertices);
+      }
+		else scalefactor = 1.0;
+
+		/*Cleanup*/
+      xDelete<IssmDouble>(weights);
+      xDelete<IssmDouble>(smbrefreeze_vertices);
+	}
+	else{
+		/*Fully ice-covered element*/
+		Input* smbrefreeze_input = this->GetInput(SmbRefreezeEnum); _assert_(smbrefreeze_input);
+		smbrefreeze_input->GetInputAverage(&smbrefreeze);   // average smbrefreeze on element in m ice s-1
+
+		if(scaled==true){
+			Input* scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+			scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
+		}
+		else scalefactor=1.0;
+	}
+	
+   Total_Refreeze=rho_ice*base*smbrefreeze*scalefactor;	// smbrefreeze on element in kg s-1
+
+	/*Return: */
+	return Total_Refreeze;
 }
 /*}}}*/
@@ -7215,5 +7513,5 @@
 	barycontrib->Set(this->Sid(),bslcice,bslchydro,bslcbp);
 
-	/*Free ressources*/
+	/*Free resources*/
 	xDelete<IssmDouble>(areae);
 
@@ -7590,5 +7888,5 @@
 		b=i*nt;
 		c=av*nel;
-		for (int ae=0;ae<loads->nactiveloads;ae++){
+		for(ae=0;ae<loads->nactiveloads;ae++){
 			e=loads->combined_loads_index[ae];
 			a=AlphaIndex[c+e]*viscousnumsteps;
@@ -7695,5 +7993,5 @@
 		//get projection
 		if (spatial_component==1){ //north
-			for (int ae=0;ae<loads->nactiveloads;ae++){
+			for(ae=0;ae<loads->nactiveloads;ae++){
 				e=loads->combined_loads_index[ae];
 				horiz_projection[ae]=cos(2.0*M_PI*reCast<IssmDouble,int>(AzimIndex[av*nel+e])/65535.0); // 65535=2^16-1 = max value of 16 bits unsigned int
@@ -7701,5 +7999,5 @@
 			for(l=0;l<SLGEOM_NUMLOADS;l++){
 				nbar=slgeom->nbar[l];
-				for (ae=0;ae<loads->nactivesubloads[l];ae++){
+				for(ae=0;ae<loads->nactivesubloads[l];ae++){
 					e=loads->combined_subloads_index[l][ae];
 					horiz_projectionsub[l][ae]=cos(2.0*M_PI*reCast<IssmDouble,int>(AzimIndexsub[l][av*nbar+e])/65535.0);
@@ -7708,5 +8006,5 @@
 		}
 		else if (spatial_component==2){ //east
-			for (int ae=0;ae<loads->nactiveloads;ae++){
+			for(ae=0;ae<loads->nactiveloads;ae++){
 				e=loads->combined_loads_index[ae];
 				horiz_projection[ae]=sin(2.0*M_PI*reCast<IssmDouble,int>(AzimIndex[av*nel+e])/65535.0);
@@ -7714,5 +8012,5 @@
 			for(l=0;l<SLGEOM_NUMLOADS;l++){
 				nbar=slgeom->nbar[l];
-				for (ae=0;ae<loads->nactivesubloads[l];ae++){
+				for(ae=0;ae<loads->nactivesubloads[l];ae++){
 					e=loads->combined_subloads_index[l][ae];
 					horiz_projectionsub[l][ae]=sin(2.0*M_PI*reCast<IssmDouble,int>(AzimIndexsub[l][av*nbar+e])/65535.0);
@@ -7722,10 +8020,10 @@
 
 		//project load in the right direction 
-		for (int ae=0;ae<loads->nactiveloads;ae++){
+		for (ae=0;ae<loads->nactiveloads;ae++){
 			projected_loads[ae]=loads->combined_loads[ae]*horiz_projection[ae];
 		}
 		for(l=0;l<SLGEOM_NUMLOADS;l++){
 			nbar=slgeom->nbar[l];
-			for (ae=0;ae<loads->nactivesubloads[l];ae++){
+			for(ae=0;ae<loads->nactivesubloads[l];ae++){
 				projected_subloads[l][ae]=loads->combined_subloads[l][ae]*horiz_projectionsub[l][ae];
 			}
@@ -7734,5 +8032,5 @@
 		//do the convolution
 		c=av*nel;
-		for (int ae=0;ae<loads->nactiveloads;ae++){
+		for(ae=0;ae<loads->nactiveloads;ae++){
 			e=loads->combined_loads_index[ae];
 			a=AlphaIndex[c+e]*viscousnumsteps;
@@ -7744,5 +8042,5 @@
 			nbar=slgeom->nbar[l];
 			c=av*nbar;
-			for (ae=0;ae<loads->nactivesubloads[l];ae++){
+			for(ae=0;ae<loads->nactivesubloads[l];ae++){
 				e=loads->combined_subloads_index[l][ae];
 				a=AlphaIndexsub[l][c+e]*viscousnumsteps;
@@ -7757,5 +8055,4 @@
 
 	//free resources
-
 	xDelete<IssmDouble>(horiz_projection);
 	xDelete<IssmDouble>(projected_loads);
Index: /issm/trunk/src/c/classes/Elements/Tria.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tria.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Elements/Tria.h	(revision 28013)
@@ -55,4 +55,5 @@
 		void        AverageOntoPartition(Vector<IssmDouble>* partition_contributions,Vector<IssmDouble>* partition_areas,IssmDouble* vertex_response,IssmDouble* qmu_part);
 		void			CalvingRateVonmises();
+		void			CalvingRateVonmisesAD();
 		void			CalvingRateTest();
 		void        CalvingCrevasseDepth();
@@ -62,4 +63,5 @@
 		void			CalvingMeltingFluxLevelset();
 		void			CalvingRateParameterization();
+		void			CalvingRateCalvingMIP();
 		IssmDouble  CharacteristicLength(void);
 		void        ComputeBasalStress(void);
@@ -156,4 +158,6 @@
 		IssmDouble  TotalGroundedBmb(bool scaled);
 		IssmDouble  TotalSmb(bool scaled);
+		IssmDouble  TotalSmbMelt(bool scaled);
+		IssmDouble  TotalSmbRefreeze(bool scaled);
 		void        Update(Inputs* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		int         UpdatePotentialUngrounding(IssmDouble* vertices_potentially_ungrounding,Vector<IssmDouble>* vec_nodes_on_iceshelf,IssmDouble* nodes_on_iceshelf);
Index: /issm/trunk/src/c/classes/FemModel.cpp
===================================================================
--- /issm/trunk/src/c/classes/FemModel.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/FemModel.cpp	(revision 28013)
@@ -347,4 +347,7 @@
 	#endif
 
+	/*Cleanup toolkit*/
+	ToolkitOptions::Delete();
+
 	/*Clean up*/
 	xDelete<char>(lockfilename);
@@ -793,4 +796,7 @@
 				analyses_temp[numanalyses++]=L2ProjectionEPLAnalysisEnum;
 				analyses_temp[numanalyses++]=L2ProjectionBaseAnalysisEnum;
+			}
+			if(hydrology_model==HydrologyarmapwEnum){
+				analyses_temp[numanalyses++]=HydrologyArmapwAnalysisEnum;
 			}
 		}
@@ -915,4 +921,7 @@
 			}
 			if(isdebris){
+				analyses_temp[numanalyses++]=L2ProjectionBaseAnalysisEnum;
+				analyses_temp[numanalyses++]=SmbAnalysisEnum;
+				analyses_temp[numanalyses++]=ExtrudeFromTopAnalysisEnum;
 				analyses_temp[numanalyses++]=DebrisAnalysisEnum;
 			}
@@ -1445,19 +1454,20 @@
 
 	/*Now send and receive vector for vertices on partition edge*/
-	#ifdef _HAVE_AD_
-	IssmDouble* buffer = xNew<IssmDouble>(this->vertices->Size(),"t"); //only one alloc, "t" is required by adolc
-	#else
-	IssmDouble* buffer = xNew<IssmDouble>(this->vertices->Size());
-	#endif
+	IssmDouble **send_buffers = xNewZeroInit<IssmDouble*>(num_procs);
+	IssmDouble  *recv_buffer  = xNewZeroInit<IssmDouble>(this->vertices->Size());
+	ISSM_MPI_Request  *send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for (int rank = 0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
+
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->vertices->common_send[rank]){
 			int  numids = this->vertices->common_send[rank];
+			send_buffers[rank] = xNew<IssmDouble>(numids,"t"); //only one alloc, "t" is required by adolc
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_send_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(!vertex->clone);
-				buffer[i] = local_vector[vertex->lid];
-			}
-			ISSM_MPI_Send(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm());
+            send_buffers[rank][i] = local_vector[vertex->lid];
+			}
+         ISSM_MPI_Isend(send_buffers[rank],numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -1465,14 +1475,20 @@
 		if(this->vertices->common_recv[rank]){
 			int  numids = this->vertices->common_recv[rank];
-			ISSM_MPI_Recv(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+         ISSM_MPI_Recv(recv_buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_recv_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(vertex->clone);
-				local_vector[vertex->lid] = buffer[i];
-			}
-		}
-	}
-	xDelete<IssmDouble>(buffer);
+            local_vector[vertex->lid] = recv_buffer[i];
+			}
+		}
+	}
+   xDelete<IssmDouble>(recv_buffer);
+   for(int rank=0;rank<num_procs;rank++){
+		if(this->vertices->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<IssmDouble>(send_buffers[rank]);
+   }
+   xDelete<IssmDouble*>(send_buffers);
+   xDelete<ISSM_MPI_Request>(send_requests);
 }/*}}}*/
 void FemModel::SyncLocalVectorWithClonesVerticesAdd(IssmDouble* local_vector){/*{{{*/
@@ -1484,9 +1500,8 @@
 
 	/*Now send and receive vector for vertices on partition edge*/
-	#ifdef _HAVE_AD_
-	IssmDouble* buffer = xNew<IssmDouble>(this->vertices->Size(),"t"); //only one alloc, "t" is required by adolc
-	#else
-	IssmDouble* buffer = xNew<IssmDouble>(this->vertices->Size());
-	#endif
+	IssmDouble **send_buffers = xNewZeroInit<IssmDouble*>(num_procs);
+	IssmDouble  *recv_buffer  = xNewZeroInit<IssmDouble>(this->vertices->Size());
+	ISSM_MPI_Request  *send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for (int rank = 0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
 
 	/*1st: add slaves to master values (reverse of what we usually do)*/
@@ -1494,11 +1509,12 @@
 		if(this->vertices->common_recv[rank]){
 			int  numids = this->vertices->common_recv[rank];
+         send_buffers[rank] = xNew<IssmDouble>(numids,"t"); //only one alloc, "t" is required by adolc
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_recv_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(vertex->clone);
-				buffer[i] = local_vector[vertex->lid];
-			}
-			ISSM_MPI_Send(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm());
+				send_buffers[rank][i] = local_vector[vertex->lid];
+			}
+			ISSM_MPI_Isend(send_buffers[rank],numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -1506,12 +1522,17 @@
 		if(this->vertices->common_send[rank]){
 			int  numids = this->vertices->common_send[rank];
-			ISSM_MPI_Recv(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_send_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(!vertex->clone);
-				local_vector[vertex->lid] += buffer[i];
-			}
-		}
+				local_vector[vertex->lid] += recv_buffer[i];
+			}
+		}
+	}
+
+	/*Wait until MPI is done*/
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->vertices->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
 	}
 
@@ -1520,11 +1541,13 @@
 		if(this->vertices->common_send[rank]){
 			int  numids = this->vertices->common_send[rank];
+			xDelete<IssmDouble>(send_buffers[rank]);
+			send_buffers[rank] = xNew<IssmDouble>(numids,"t"); //only one alloc, "t" is required by adolc
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_send_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(!vertex->clone);
-				buffer[i] = local_vector[vertex->lid];
-			}
-			ISSM_MPI_Send(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm());
+				send_buffers[rank][i] = local_vector[vertex->lid];
+			}
+			ISSM_MPI_Isend(send_buffers[rank],numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -1532,14 +1555,21 @@
 		if(this->vertices->common_recv[rank]){
 			int  numids = this->vertices->common_recv[rank];
-			ISSM_MPI_Recv(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->vertices->common_recv_ids[rank][i];
 				Vertex* vertex=xDynamicCast<Vertex*>(this->vertices->GetObjectByOffset(master_lid));
 				_assert_(vertex->clone);
-				local_vector[vertex->lid] = buffer[i];
-			}
-		}
-	}
-	xDelete<IssmDouble>(buffer);
+				local_vector[vertex->lid] = recv_buffer[i];
+			}
+		}
+	}
+	xDelete<IssmDouble>(recv_buffer);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->vertices->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<IssmDouble>(send_buffers[rank]);
+	}
+	xDelete<IssmDouble*>(send_buffers);
+	xDelete<ISSM_MPI_Request>(send_requests);
 }/*}}}*/
 void FemModel::GetLocalVectorWithClonesNodes(IssmDouble** plocal_vector,Vector<IssmDouble> *vector){/*{{{*/
@@ -1567,19 +1597,20 @@
 
 	/*Now send and receive vector for nodes on partition edge*/
-	#ifdef _HAVE_AD_
-	IssmDouble* buffer = xNew<IssmDouble>(this->nodes->Size(),"t"); //only one alloc, "t" is required by adolc
-	#else
-	IssmDouble* buffer = xNew<IssmDouble>(this->nodes->Size());
-	#endif
+	IssmDouble **send_buffers = xNewZeroInit<IssmDouble*>(num_procs);
+	IssmDouble  *recv_buffer  = xNewZeroInit<IssmDouble>(this->nodes->Size(),"t"); //only one alloc, "t" is required by adolc
+	ISSM_MPI_Request  *send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for (int rank = 0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
+
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->nodes->common_send[rank]){
 			int  numids = this->nodes->common_send[rank];
+			send_buffers[rank] = xNew<IssmDouble>(numids,"t"); //only one alloc, "t" is required by adolc
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->nodes->common_send_ids[rank][i];
 				Node* vertex=xDynamicCast<Node*>(this->nodes->GetObjectByOffset(master_lid));
 				_assert_(!vertex->clone);
-				buffer[i] = local_vector[vertex->lid];
-			}
-			ISSM_MPI_Send(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm());
+				send_buffers[rank][i] = local_vector[vertex->lid];
+			}
+			ISSM_MPI_Isend(send_buffers[rank],numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -1587,14 +1618,21 @@
 		if(this->nodes->common_recv[rank]){
 			int  numids = this->nodes->common_recv[rank];
-			ISSM_MPI_Recv(buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_buffer,numids,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->nodes->common_recv_ids[rank][i];
 				Node* vertex=xDynamicCast<Node*>(this->nodes->GetObjectByOffset(master_lid));
 				_assert_(vertex->clone);
-				local_vector[vertex->lid] = buffer[i];
-			}
-		}
-	}
-	xDelete<IssmDouble>(buffer);
+				local_vector[vertex->lid] = recv_buffer[i];
+			}
+		}
+	}
+
+	xDelete<IssmDouble>(recv_buffer);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->nodes->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<IssmDouble>(send_buffers[rank]);
+	}
+	xDelete<IssmDouble*>(send_buffers);
+	xDelete<ISSM_MPI_Request>(send_requests);
 
 	/*Assign output pointer*/
@@ -1638,5 +1676,5 @@
 		basin_icefront_area[basin]=total_icefront_area;
 	}
-	
+
 	this->parameters->AddObject(new DoubleVecParam(FrontalForcingsBasinIcefrontAreaEnum,basin_icefront_area,numbasins));
 
@@ -2297,21 +2335,16 @@
 void FemModel::RequestedDependentsx(void){/*{{{*/
 
-	bool        isautodiff      = false;
-	IssmDouble  output_value;
-
-	int         num_dependents;
-	IssmPDouble *dependents;
-	DataSet*    dependent_objects=NULL;
-	int my_rank=IssmComm::GetRank();
-
 	/*AD mode on?: */
+	bool isautodiff;
 	parameters->FindParam(&isautodiff,AutodiffIsautodiffEnum);
 
 	if(isautodiff){
 		#ifdef _HAVE_AD_
+		int      num_dependents;
+		DataSet* dependent_objects=NULL;
 		parameters->FindParam(&num_dependents,AutodiffNumDependentsEnum);
 		parameters->FindParam(&dependent_objects,AutodiffDependentObjectsEnum);
 		if(num_dependents){
-			dependents=xNew<IssmPDouble>(num_dependents);
+			IssmPDouble* dependents=xNew<IssmPDouble>(num_dependents);
 
 			#if defined(_HAVE_CODIPACK_)
@@ -2326,8 +2359,10 @@
 
 			/*Go through our dependent variables, and compute the response:*/
+			int my_rank=IssmComm::GetRank();
 			int i = 0;
 			for(Object* & object : dependent_objects->objects){
 				DependentObject* dep=(DependentObject*)object;
-				dep->Responsex(&output_value,this);
+				dep->RecordResponsex(this);
+				IssmDouble output_value = dep->GetValue();
 				if (my_rank==0) {
 					#if defined(_HAVE_CODIPACK_)
@@ -2347,7 +2382,7 @@
 				i++;
 			}
+			xDelete<IssmPDouble>(dependents);
 		}
 		delete dependent_objects;
-		if(num_dependents)xDelete<IssmPDouble>(dependents);
 		#else
 		_error_("Should not be requesting dependents when an AD library is not available!");
@@ -2437,4 +2472,6 @@
 					case TotalGroundedBmbScaledEnum:         this->TotalGroundedBmbx(&double_result,true);          break;
 					case TotalSmbEnum:                       this->TotalSmbx(&double_result,false);                 break;
+					case TotalSmbMeltEnum:                   this->TotalSmbMeltx(&double_result,false);             break;
+					case TotalSmbRefreezeEnum:               this->TotalSmbRefreezex(&double_result,false);         break;
 					case TotalSmbScaledEnum:                 this->TotalSmbx(&double_result,true);                  break;
 
@@ -2509,4 +2546,11 @@
 							InputDuplicatex(this,DamageDbarEnum,DamageDbarOldEnum);
 							this->ElementOperationx(&Element::ComputeNewDamage);
+						}
+						else if(output_enum==FrictionAlpha2Enum){
+							for(Object* & object : this->elements->objects){
+								Element* element=xDynamicCast<Element*>(object);
+								element->SetElementInput(FrictionAlpha2Enum,0.,P1Enum);
+							}
+							this->ElementOperationx(&Element::FrictionAlpha2CreateInput);
 						}
 
@@ -2702,4 +2746,6 @@
 		case TotalGroundedBmbScaledEnum:			  this->TotalGroundedBmbx(responses, true); break;
 		case TotalSmbEnum:					        this->TotalSmbx(responses, false); break;
+		case TotalSmbMeltEnum:					     this->TotalSmbMeltx(responses, false); break;
+		case TotalSmbRefreezeEnum:					  this->TotalSmbRefreezex(responses, false); break;
 		case TotalSmbScaledEnum:					  this->TotalSmbx(responses, true); break;
 		case MaterialsRheologyBbarEnum:          this->ElementResponsex(responses,MaterialsRheologyBbarEnum); break;
@@ -3092,4 +3138,36 @@
 	/*Assign output pointers: */
 	*pSmb=total_smb;
+
+}/*}}}*/
+void FemModel::TotalSmbMeltx(IssmDouble* pSmbMelt, bool scaled){/*{{{*/
+
+	IssmDouble local_smbmelt = 0;
+	IssmDouble total_smbmelt;
+
+	for(Object* & object : this->elements->objects){
+		Element* element = xDynamicCast<Element*>(object);
+		local_smbmelt+=element->TotalSmbMelt(scaled);
+	}
+	ISSM_MPI_Reduce(&local_smbmelt,&total_smbmelt,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() );
+	ISSM_MPI_Bcast(&total_smbmelt,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+
+	/*Assign output pointers: */
+	*pSmbMelt=total_smbmelt;
+
+}/*}}}*/
+void FemModel::TotalSmbRefreezex(IssmDouble* pSmbRefreeze, bool scaled){/*{{{*/
+
+	IssmDouble local_smbrefreeze = 0;
+	IssmDouble total_smbrefreeze;
+
+	for(Object* & object : this->elements->objects){
+		Element* element = xDynamicCast<Element*>(object);
+		local_smbrefreeze+=element->TotalSmbRefreeze(scaled);
+	}
+	ISSM_MPI_Reduce(&local_smbrefreeze,&total_smbrefreeze,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() );
+	ISSM_MPI_Bcast(&total_smbrefreeze,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
+
+	/*Assign output pointers: */
+	*pSmbRefreeze=total_smbrefreeze;
 
 }/*}}}*/
@@ -4570,8 +4648,6 @@
 void FemModel::DakotaResponsesx(double* d_responses,char** responses_descriptors,int numresponsedescriptors,int d_numresponses){/*{{{*/
 
-	int        i,j;
-	int        my_rank;
-
 	/*intermediary: */
+	int    i,j;
 	char   root[50];
 	int    index;
@@ -4593,5 +4669,5 @@
 
 	/*retrieve my_rank: */
-	my_rank=IssmComm::GetRank();
+	int my_rank=IssmComm::GetRank();
 
 	/*save the d_responses pointer: */
Index: /issm/trunk/src/c/classes/FemModel.h
===================================================================
--- /issm/trunk/src/c/classes/FemModel.h	(revision 28012)
+++ /issm/trunk/src/c/classes/FemModel.h	(revision 28013)
@@ -143,4 +143,6 @@
 		void TotalGroundedBmbx(IssmDouble* pGbmb, bool scaled);
 		void TotalSmbx(IssmDouble* pSmb, bool scaled);
+		void TotalSmbMeltx(IssmDouble* pSmbMelt, bool scaled);
+		void TotalSmbRefreezex(IssmDouble* pSmbRefreeze, bool scaled);
 		#ifdef  _HAVE_DAKOTA_
 		void DakotaResponsesx(double* d_responses,char** responses_descriptors,int numresponsedescriptors,int d_numresponses);
Index: /issm/trunk/src/c/classes/Inputs/ControlInput.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs/ControlInput.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/ControlInput.cpp	(revision 28013)
@@ -227,4 +227,8 @@
 }
 /*}}}*/
+void ControlInput::AverageAndReplace(void){/*{{{*/
+	this->values->AverageAndReplace();
+}
+/*}}}*/
 TriaInput* ControlInput::GetTriaInput(){/*{{{*/
 
Index: /issm/trunk/src/c/classes/Inputs/ControlInput.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs/ControlInput.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/ControlInput.h	(revision 28013)
@@ -48,4 +48,5 @@
 		TriaInput* GetTriaInput();
 		PentaInput* GetPentaInput();
+		void AverageAndReplace(void);
 };
 #endif  /* _CONTROLINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs/Input.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs/Input.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/Input.h	(revision 28013)
@@ -48,4 +48,5 @@
 		virtual void   Pow(IssmDouble scale_factor){_error_("Not implemented yet");};
 		virtual void   Scale(IssmDouble scale_factor){_error_("Not implemented yet");};
+		virtual void   AverageAndReplace(void){_error_("Not implemented yet");};
 
 		virtual int  GetResultArraySize(void){_error_("Not implemented yet");};
Index: /issm/trunk/src/c/classes/Inputs/Inputs.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs/Inputs.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/Inputs.cpp	(revision 28013)
@@ -302,5 +302,5 @@
 }
 /*}}}*/
-void     Inputs::Shift(int xenum, IssmDouble alpha){/*{{{*/
+void Inputs::Shift(int xenum, IssmDouble alpha){/*{{{*/
 
 	_assert_(this);
@@ -314,4 +314,15 @@
 	/*Shift: */
 	this->inputs[index_x]->Shift(alpha);
+}
+/*}}}*/
+void Inputs::AverageAndReplace(int inputenum){/*{{{*/
+
+	_assert_(this);
+
+	/*Get indices from enums*/
+	int index = EnumToIndex(inputenum);
+	if(!this->inputs[index]) _error_("Input "<<EnumToStringx(inputenum)<<" not found");
+
+	this->inputs[index]->AverageAndReplace();
 }
 /*}}}*/
Index: /issm/trunk/src/c/classes/Inputs/Inputs.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs/Inputs.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/Inputs.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void     AXPY(IssmDouble alpha, int xenum, int yenum);
 		void     Shift(int inputenum, IssmDouble alpha);
+		void     AverageAndReplace(int inputenum);
 		void     DeepEcho(void);
 		void     DeepEcho(int enum_in);
Index: /issm/trunk/src/c/classes/Inputs/TransientInput.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs/TransientInput.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/TransientInput.cpp	(revision 28013)
@@ -435,4 +435,5 @@
 	/*First, recover current time from parameters: */
 	bool linear_interp,average,cycle;
+	int  timestepping;
 	IssmDouble dt;
 	this->parameters->FindParam(&linear_interp,TimesteppingInterpForcingEnum);
@@ -440,16 +441,20 @@
 	this->parameters->FindParam(&cycle,TimesteppingCycleForcingEnum);
 	this->parameters->FindParam(&dt,TimesteppingTimeStepEnum);          /*transient core time step*/
-
-	/*Change input time if we cycle through the forcing*/
-	IssmDouble time0 = this->timesteps[0];
-	IssmDouble time1 = this->timesteps[this->numtimesteps - 1];
-
-	/*We need the end time to be the last timestep that would be taken*/
-	/* i.e., the case where GEMB has time stamps (finer timestep) after the last timestep */
-	IssmDouble nsteps = reCast<int,IssmDouble>(time1/dt);
-	if (reCast<IssmDouble>(nsteps)<time1/dt) nsteps=nsteps+1;
-	time1 = nsteps*dt;
+	this->parameters->FindParam(&timestepping,TimesteppingTypeEnum);
 
 	if(cycle){
+
+		/*Change input time if we cycle through the forcing*/
+		IssmDouble time0 = this->timesteps[0];
+		IssmDouble time1 = this->timesteps[this->numtimesteps - 1];
+
+		if(timestepping!=AdaptiveTimesteppingEnum){
+			/*We need the end time to be the last timestep that would be taken*/
+			/* i.e., the case where GEMB has time stamps (finer timestep) after the last timestep */
+			/* warning: this assumes dt = constant!!*/
+			IssmDouble nsteps = reCast<int,IssmDouble>(time1/dt);
+			if (reCast<IssmDouble>(nsteps)<time1/dt) nsteps=nsteps+1;
+			time1 = nsteps*dt;
+		}
 
 		/*See by how many intervals we have to offset time*/
Index: /issm/trunk/src/c/classes/Inputs/TriaInput.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs/TriaInput.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/TriaInput.cpp	(revision 28013)
@@ -406,4 +406,28 @@
 }
 /*}}}*/
+void TriaInput::AverageAndReplace(void){/*{{{*/
+
+	if(this->M!=this->numberofelements_local) _error_("not implemented for P1");
+
+	/*Get local sum and local size*/
+	IssmDouble sum  = 0.;
+	int        weight;
+	for(int i=0;i<this->M*this->N;i++) sum += this->values[i];
+	weight = this->M*this->N;
+
+	/*Get sum across all procs*/
+	IssmDouble all_sum;
+	int        all_weight;
+	ISSM_MPI_Allreduce((void*)&sum,(void*)&all_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+	ISSM_MPI_Allreduce((void*)&weight,(void*)&all_weight,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm());
+
+	/*Divide by number of procs*/
+	IssmDouble newvalue = all_sum/reCast<IssmPDouble>(all_weight);
+
+	/*Now replace existing input*/
+	this->Reset(P0Enum);
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = newvalue;
+}
+/*}}}*/
 
 /*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs/TriaInput.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs/TriaInput.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Inputs/TriaInput.h	(revision 28013)
@@ -41,4 +41,5 @@
 		void AXPY(Input* xinput,IssmDouble scalar);
 		void Shift(IssmDouble scalar);
+		void AverageAndReplace(void);
 		void PointWiseMult(Input* xinput);
 		void Serve(int numindices,int* indices);
Index: /issm/trunk/src/c/classes/IoModel.cpp
===================================================================
--- /issm/trunk/src/c/classes/IoModel.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/IoModel.cpp	(revision 28013)
@@ -480,7 +480,6 @@
 		this->FetchData(&num_independent_objects,"md.autodiff.num_independent_objects");
 		if(num_independent_objects){
-			this->FetchData(&names,&temp,"md.autodiff.independent_object_names");
-			_assert_(temp==num_independent_objects);
-			this->FetchData(&types,NULL,NULL,"md.autodiff.independent_object_types");
+			this->FetchMultipleData(&names,&temp,"md.autodiff.independent_name"); _assert_(temp==num_independent_objects);
+			this->FetchMultipleData(&types,NULL,"md.autodiff.independent_type");
 
 			/*create independent objects, and at the same time, fetch the corresponding independent variables,
@@ -1023,4 +1022,33 @@
 }
 /*}}}*/
+void  IoModel::FetchData(IssmDouble** pscalar, const char* data_name){/*{{{*/
+
+   /*output: */
+   IssmPDouble *scalar = NULL;
+   int          code   = 0;
+
+   /*recover my_rank:*/
+   int my_rank=IssmComm::GetRank();
+
+   /*Set file pointer to beginning of the data: */
+   fid=this->SetFilePointerToData(&code,NULL,data_name);
+   if(code!=3)_error_("expecting a IssmDouble for \""<<data_name<<"\"");
+
+   /*Now fetch: */
+
+   /*We have to read a matrix from disk. First read the dimensions of the matrix, then the whole matrix: */
+
+   /*Now allocate matrix: */
+   /*Read matrix on node 0, then broadcast: */
+   scalar=xNew<IssmPDouble>(1);
+   if(my_rank==0) if(fread(scalar,sizeof(IssmPDouble),1,fid)!=1) _error_("could not read matrix ");
+   ISSM_MPI_Bcast(scalar,1,ISSM_MPI_PDOUBLE,0,IssmComm::GetComm());
+
+   _printf0_("scalar: " << *scalar << "\n");
+   *pscalar=xNew<IssmDouble>(1);
+   *pscalar[0]=scalar[0];
+   xDelete<IssmPDouble>(scalar);
+}
+/*}}}*/
 void  IoModel::FetchData(char** pstring,const char* data_name){/*{{{*/
 
@@ -1122,4 +1150,69 @@
 	*pstrings = strings;
 	if(pnumstrings) *pnumstrings = numstrings;
+}
+/*}}}*/
+void  IoModel::FetchData(bool** pmatrix,int* pM,int* pN,const char* data_name){/*{{{*/
+
+	/*output: */
+	int M,N;
+	IssmPDouble* matrix=NULL;
+	bool*        bool_matrix=NULL;
+	int code=0;
+
+	/*recover my_rank:*/
+	int my_rank=IssmComm::GetRank();
+
+	/*Set file pointer to beginning of the data: */
+	fid=this->SetFilePointerToData(&code,NULL,data_name);
+
+	if(code!=5 && code!=6 && code!=7)_error_("expecting a IssmDouble, integer or boolean matrix for \""<<data_name<<"\""<<" (Code is "<<code<<")");
+
+	/*Now fetch: */
+
+	/*We have to read a matrix from disk. First read the dimensions of the matrix, then the whole matrix: */
+	/*numberofelements: */
+	if(my_rank==0){
+		if(fread(&M,sizeof(int),1,fid)!=1) _error_("could not read number of rows for matrix ");
+	}
+
+	ISSM_MPI_Bcast(&M,1,ISSM_MPI_INT,0,IssmComm::GetComm());
+
+	if(my_rank==0){
+		if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix ");
+	}
+	ISSM_MPI_Bcast(&N,1,ISSM_MPI_INT,0,IssmComm::GetComm());
+
+	/*Now allocate matrix: */
+	if(M*N){
+		matrix=xNew<IssmPDouble>(M*N);
+
+		/*Read matrix on node 0, then broadcast: */
+		if(my_rank==0){
+			if(fread(matrix,M*N*sizeof(IssmPDouble),1,fid)!=1) _error_("could not read matrix ");
+		}
+
+		ISSM_MPI_Bcast(matrix,M*N,ISSM_MPI_PDOUBLE,0,IssmComm::GetComm());
+	}
+
+	/*Now cast to bool: */
+	if(M*N){
+		bool_matrix=xNew<bool>(M*N);
+		for(int i=0;i<M;i++){
+			for(int j=0;j<N;j++){
+				bool_matrix[i*N+j]=(bool)matrix[i*N+j];
+			}
+		}
+	}
+	else{
+		bool_matrix=NULL;
+	}
+	/*Free resources:*/
+	xDelete<IssmPDouble>(matrix);
+
+	/*Assign output pointers: */
+	*pmatrix=bool_matrix;
+	if (pM)*pM=M;
+	if (pN)*pN=N;
+
 }
 /*}}}*/
@@ -1361,4 +1454,30 @@
 	if(pM) *pM=M;
 	if(pN) *pN=N;
+}
+/*}}}*/
+void  IoModel::FetchData(IssmPDouble** pscalar,const char* data_name){/*{{{*/
+
+   /*output: */
+   IssmPDouble   *scalar = NULL;
+   int      code;
+
+   /*recover my_rank:*/
+   int my_rank=IssmComm::GetRank();
+
+   /*Set file pointer to beginning of the data: */
+   fid=this->SetFilePointerToData(&code,NULL,data_name);
+
+   if(code!=3)_error_("expecting a IssmDouble for \""<<data_name<<"\"");
+
+   /*We have to read a scalar from disk. First read the dimensions of the scalar, then the scalar: */
+   scalar=xNew<IssmPDouble>(1);
+   if(my_rank==0){
+      if(fread(scalar,sizeof(IssmPDouble),1,fid)!=1)_error_("could not read scalar ");
+   }
+   ISSM_MPI_Bcast(scalar,1,ISSM_MPI_PDOUBLE,0,IssmComm::GetComm());
+
+
+   /*Assign output pointers: */
+   *pscalar=scalar;
 }
 /*}}}*/
@@ -2266,5 +2385,5 @@
 	/*Assign output pointers: */
 	*pstrings=strings;
-	*pnumstrings=num_instances;
+	if(pnumstrings) *pnumstrings=num_instances;
 }
 /*}}}*/
@@ -2278,7 +2397,7 @@
 
 	/*intermediary: */
-	int          integer;
-	int         *codes   = NULL;
-	int          code;
+	int  integer;
+	int *codes   = NULL;
+	int  code;
 
 	/*recover my_rank:*/
@@ -2319,5 +2438,5 @@
 	/*Assign output pointers: */
 	*pvector=vector;
-	*pnum_instances=num_instances;
+	if(pnum_instances) *pnum_instances=num_instances;
 }
 /*}}}*/
@@ -2473,5 +2592,7 @@
 		xDelete<int>(ndims);
 	}
-	*pnumrecords=num_instances;
+	if(pnumrecords){
+		*pnumrecords=num_instances;
+	}
 }
 /*}}}*/
Index: /issm/trunk/src/c/classes/IoModel.h
===================================================================
--- /issm/trunk/src/c/classes/IoModel.h	(revision 28012)
+++ /issm/trunk/src/c/classes/IoModel.h	(revision 28013)
@@ -133,10 +133,13 @@
 		void        FetchData(int* pinteger,const char* data_name);
 		void        FetchData(IssmDouble* pscalar,const char* data_name);
+		void        FetchData(IssmDouble** pscalar, const char* data_name);	
 		void        FetchData(char** pstring,const char* data_name);
 		void        FetchData(char*** pstrings,int* pnumstrings,const char* data_name);
 		void        FetchData(int** pmatrix,int* pM,int* pN,const char* data_name);
+		void        FetchData(bool**  pboolmatrix,int* pM,int* pN,const char* data_name);
 		void        FetchData(IssmDouble**  pscalarmatrix,int* pM,int* pN,const char* data_name);
 #if _HAVE_AD_  && !defined(_WRAPPERS_)
 		void        FetchData(IssmPDouble**  pscalarmatrix,int* pM,int* pN,const char* data_name);
+		void        FetchData(IssmPDouble** pscalar,const char* data_name);
 #endif
 		void        FetchData(IssmDouble*** pmatrixarray,int** pmdims,int** pndims, int* pnumrecords,const char* data_name);
@@ -151,5 +154,5 @@
 		void        FetchMultipleData(int*** pmatrices,int** pmdims,int** pndims, int* pnumrecords,const char* data_name);
 		void        FetchMultipleData(int** pvector, int* pnum_instances,const char* data_name);
-		void        FetchMultipleData(IssmDouble** pvector, int* pnum_instances,const char* data_name);
+		void        FetchMultipleData(IssmDouble** pvector, int* pM,const char* data_name);
 		fpos_t*     SetFilePointersToData(int** pcodes,int** pvector_types, int* pnum_instances, const char* data_name);
 		FILE*       SetFilePointerToData(int* pcode,int* pvector_type, const char* data_name);
Index: /issm/trunk/src/c/classes/Loads/Channel.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Channel.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Loads/Channel.cpp	(revision 28013)
@@ -20,9 +20,4 @@
 
 #define C_W         4.22e3   /*specific heat capacity of water (J/kg/K)*/
-#define ALPHA_C     5./4.
-#define BETA_C      3./2.
-/*Make sure these are the same as in HydrologyGlaDSAnalysis::CreateKMatrix*/
-#define ALPHA_S     5./4.
-#define BETA_S      3./2.
 #define AEPS        2.2204460492503131E-015
 
@@ -366,5 +361,6 @@
 	/*Intermediaries */
 	IssmDouble  Jdet,v1,qc,fFactor,Afactor,Bfactor,Xifactor;
-	IssmDouble  A,B,n,phi_old,phi,phi_0,dPw,ks,Ngrad;
+	IssmDouble  A,B,n,phi_old,phi,phi_0,dPw,ks,kc,Ngrad;
+	IssmDouble  h_r;
 	IssmDouble  H,h,b,dphi[2],dphids,dphimds,db[2],dbds;
 	IssmDouble  xyz_list[NUMVERTICES][3];
@@ -382,19 +378,28 @@
 	GetVerticesCoordinates(&xyz_list_tria[0][0],tria->vertices,3);
 
+	bool istransition;
+	element->FindParam(&istransition,HydrologyIsTransitionEnum);
 	IssmDouble L         = element->FindParam(MaterialsLatentheatEnum);
+	IssmDouble mu_water  = element->FindParam(MaterialsMuWaterEnum);
 	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
-	IssmDouble kc        = element->FindParam(HydrologyChannelConductivityEnum);
 	IssmDouble lc        = element->FindParam(HydrologyChannelSheetWidthEnum);
 	IssmDouble c_t       = element->FindParam(HydrologyPressureMeltCoefficientEnum);
-
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	IssmDouble alpha_c   = element->FindParam(HydrologyChannelAlphaEnum);
+	IssmDouble beta_c    = element->FindParam(HydrologyChannelBetaEnum);
+	IssmDouble alpha_s   = element->FindParam(HydrologySheetAlphaEnum);
+	IssmDouble beta_s    = element->FindParam(HydrologySheetBetaEnum);
+	IssmDouble omega     = element->FindParam(HydrologyOmegaEnum);
+
+	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);      _assert_(h_input);
+	Input* H_input      = element->GetInput(ThicknessEnum);                    _assert_(H_input);
+	Input* b_input      = element->GetInput(BedEnum);                          _assert_(b_input);
+	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);           _assert_(B_input);
+	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);           _assert_(n_input);
+	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum);   _assert_(ks_input);
+	Input* kc_input     = element->GetInput(HydrologyChannelConductivityEnum); _assert_(kc_input);
+	Input* hr_input     = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
+	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);           _assert_(phi_input);
 
 	/*Get tangent vector*/
@@ -421,8 +426,13 @@
 		h_input->GetInputValue(&h,gauss);
 		ks_input->GetInputValue(&ks,gauss);
+		kc_input->GetInputValue(&kc,gauss);
+		hr_input->GetInputValue(&h_r,gauss);
 		B_input->GetInputValue(&B,gauss);
 		n_input->GetInputValue(&n,gauss);
 		b_input->GetInputValue(&b,gauss);
 		H_input->GetInputValue(&H,gauss);
+
+		/*Hard code B*/
+		B = Cuffey(273.15-2);
 
 		/*Get values for a few potentials*/
@@ -432,8 +442,19 @@
 		Ngrad   = fabs(dphids);
 		if(Ngrad<AEPS) Ngrad = AEPS;
-
-		/*Compute the effective conductivity Kc = k h^alpha |grad Phi|^{beta-2} (same for sheet)*/
-		IssmDouble Kc = kc * pow(this->S,ALPHA_C) * pow(Ngrad,BETA_C-2.);
-		IssmDouble Ks = ks * pow(h      ,ALPHA_S) * pow(Ngrad,BETA_S-2.);
+		
+		/*Compute the effective conductivity Kc = k h^alpha |grad Phi|^{beta-2} (same for sheet) and use transition model if specified*/
+		IssmDouble Kc;
+		IssmDouble Ks;
+		IssmDouble nu = mu_water/rho_water;
+		if(istransition==1 && omega>=AEPS){
+			IssmDouble hratio = h/h_r;
+			IssmDouble coarg = 1. + 4.*omega*pow(hratio,3-2*alpha_s)*ks*pow(h,3)*Ngrad/nu;
+			Ks = nu/2./omega*pow(hratio,2*alpha_s-3) * (-1 + pow(coarg, 0.5))/Ngrad;
+			Kc = kc * pow(this->S,alpha_c) * pow(Ngrad,beta_c-2.);
+		}
+		else {
+			Ks = ks*pow(h,alpha_s)*pow(Ngrad,beta_s-2.);
+			Kc = kc * pow(this->S,alpha_c) * pow(Ngrad,beta_c-2.);
+		}
 
 		/*Approx. discharge in the sheet flowing folwing in the direction of the channel ofver a width lc*/
@@ -499,5 +520,6 @@
 	IssmDouble  Jdet,v2,Afactor,Bfactor,fFactor;
 	IssmDouble  A,B,n,phi_old,phi,phi_0,dphimds,dphi[2];
-	IssmDouble  H,h,b,db[2],dphids,qc,dPw,ks,Ngrad;
+	IssmDouble  H,h,b,db[2],dphids,qc,dPw,ks,kc,Ngrad;
+	IssmDouble  h_r;
 	IssmDouble  xyz_list[NUMVERTICES][3];
 	IssmDouble  xyz_list_tria[3][3];
@@ -512,5 +534,8 @@
 	GetVerticesCoordinates(&xyz_list_tria[0][0],tria->vertices,3);
 
+	bool istransition;
+	element->FindParam(&istransition,HydrologyIsTransitionEnum);
 	IssmDouble L         = element->FindParam(MaterialsLatentheatEnum);
+	IssmDouble mu_water  = element->FindParam(MaterialsMuWaterEnum);
 	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
@@ -518,12 +543,17 @@
 	IssmDouble lc        = element->FindParam(HydrologyChannelSheetWidthEnum);
 	IssmDouble c_t       = element->FindParam(HydrologyPressureMeltCoefficientEnum);
-
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	IssmDouble alpha_s   = element->FindParam(HydrologySheetAlphaEnum);
+	IssmDouble beta_s    = element->FindParam(HydrologySheetBetaEnum);
+	IssmDouble omega     = element->FindParam(HydrologyOmegaEnum);
+
+	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);      _assert_(h_input);
+	Input* H_input      = element->GetInput(ThicknessEnum);                    _assert_(H_input);
+	Input* b_input      = element->GetInput(BedEnum);                          _assert_(b_input);
+	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);           _assert_(B_input);
+	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);           _assert_(n_input);
+	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum);   _assert_(ks_input);
+	Input* kc_input     = element->GetInput(HydrologyChannelConductivityEnum); _assert_(kc_input);
+	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);           _assert_(phi_input);
+	Input* hr_input     = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
 
 	/*Get tangent vector*/
@@ -546,4 +576,5 @@
 		h_input->GetInputValue(&h,gauss);
 		ks_input->GetInputValue(&ks,gauss);
+		kc_input->GetInputValue(&kc,gauss);
 		B_input->GetInputValue(&B,gauss);
 		n_input->GetInputValue(&n,gauss);
@@ -551,5 +582,9 @@
 		b_input->GetInputValue(&b,gauss);
 		H_input->GetInputValue(&H,gauss);
-
+		hr_input->GetInputValue(&h_r,gauss);
+
+		/*Hard code B*/
+		B = Cuffey(273.15-2);
+		
 		/*Get values for a few potentials*/
 		phi_0   = rho_water*g*b + rho_ice*g*H;
@@ -559,6 +594,16 @@
 		if(Ngrad<AEPS) Ngrad = AEPS;
 
-		/*Compute the effective conductivity Ks = k h^alpha |grad Phi|^{beta-2} (same for sheet)*/
-		IssmDouble Ks = ks * pow(h,ALPHA_S) * pow(Ngrad,BETA_S-2.);
+
+		/*Approx. discharge in the sheet flowing folwing in the direction of the channel ofver a width lc, use transition model if specified*/
+		IssmDouble Ks;
+		if (istransition==1 && omega>=AEPS){
+		IssmDouble hratio = h/h_r;
+			IssmDouble nu = mu_water/rho_water;
+			IssmDouble coarg = 1. + 4.*omega*pow(hratio,3-2*alpha_s)*ks*pow(h,3)*Ngrad/nu;
+			Ks = nu/2./omega*pow(hratio,2*alpha_s-3) * (-1 + pow(coarg, 0.5))/Ngrad;
+		}
+		else {
+			Ks = ks * pow(h,alpha_s) * pow(Ngrad,beta_s-2.);
+		}
 
 		/*Approx. discharge in the sheet flowing folwing in the direction of the channel ofver a width lc*/
@@ -626,5 +671,6 @@
 
 	/*Intermediaries */
-	IssmDouble  A,B,n,phi,phi_0,ks,Ngrad;
+	IssmDouble  A,B,n,phi,phi_0,ks,kc,Ngrad;
+	IssmDouble  h_r;
 	IssmDouble  H,h,b,dphi[2],dphids,dphimds,db[2],dbds;
 	IssmDouble  xyz_list[NUMVERTICES][3];
@@ -635,20 +681,29 @@
 	GetVerticesCoordinates(&xyz_list_tria[0][0],tria->vertices,3);
 
+	bool istransition;
+	element->FindParam(&istransition,HydrologyIsTransitionEnum);
 	IssmDouble L         = element->FindParam(MaterialsLatentheatEnum);
 	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble mu_water  = element->FindParam(MaterialsMuWaterEnum);
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
-	IssmDouble kc        = element->FindParam(HydrologyChannelConductivityEnum);
 	IssmDouble lc        = element->FindParam(HydrologyChannelSheetWidthEnum);
 	IssmDouble c_t       = element->FindParam(HydrologyPressureMeltCoefficientEnum);
 	IssmDouble dt        = element->FindParam(TimesteppingTimeStepEnum);
-
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	IssmDouble alpha_c   = element->FindParam(HydrologyChannelAlphaEnum);
+	IssmDouble beta_c    = element->FindParam(HydrologyChannelBetaEnum);
+	IssmDouble alpha_s   = element->FindParam(HydrologySheetAlphaEnum);
+	IssmDouble beta_s    = element->FindParam(HydrologySheetBetaEnum);
+	IssmDouble omega     = element->FindParam(HydrologyOmegaEnum);
+
+	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);      _assert_(h_input);
+	Input* H_input      = element->GetInput(ThicknessEnum);                    _assert_(H_input);
+	Input* b_input      = element->GetInput(BedEnum);                          _assert_(b_input);
+	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);           _assert_(B_input);
+	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);           _assert_(n_input);
+	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum);   _assert_(ks_input);
+	Input* kc_input     = element->GetInput(HydrologyChannelConductivityEnum); _assert_(kc_input);
+	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);           _assert_(phi_input);
+	Input* hr_input     = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
 
 	/*Get tangent vector*/
@@ -664,4 +719,5 @@
 	h_input->GetInputValue(&h,gauss);
 	ks_input->GetInputValue(&ks,gauss);
+	kc_input->GetInputValue(&kc,gauss);
 	B_input->GetInputValue(&B,gauss);
 	n_input->GetInputValue(&n,gauss);
@@ -669,4 +725,9 @@
 	b_input->GetInputDerivativeValue(&db[0],&xyz_list_tria[0][0],gauss);
 	H_input->GetInputValue(&H,gauss);
+	hr_input->GetInputValue(&h_r,gauss);
+
+
+	/*Hard code B*/
+	B = Cuffey(273.15-2);
 
 	/*Get values for a few potentials*/
@@ -680,6 +741,15 @@
 	IssmDouble dPw = dphids - dphimds;
 
-	/*Approx. discharge in the sheet flowing folwing in the direction of the channel ofver a width lc*/
-	IssmDouble qc = - ks * pow(h,ALPHA_S) * pow(Ngrad,BETA_S-2.) * dphids;
+	/*Approx. discharge in the sheet flowing folwing in the direction of the channel ofver a width lc, use transition model if necessary*/
+	IssmDouble qc;
+	if (istransition==1 && omega>=AEPS){
+	IssmDouble hratio = h/h_r;
+		IssmDouble nu = mu_water/rho_water;
+		IssmDouble coarg = 1. + 4.*omega*pow(hratio,3-2*alpha_s)*ks*pow(h,3)*fabs(Ngrad)/nu;
+		qc = -nu/2./omega*pow(hratio,2*alpha_s-3) * (-1 + pow(coarg, 0.5))*dphids/Ngrad;
+	}
+	else {
+		qc = - ks * pow(h,alpha_s) * pow(Ngrad,beta_s-2.) * dphids;
+	}
 
 	/*Ice rate factor*/
@@ -687,5 +757,5 @@
 
 	IssmDouble C = C_W*c_t*rho_water;
-	IssmDouble Qprime = -kc * pow(Ngrad,BETA_C-2.)*dphids;
+	IssmDouble Qprime = -kc * pow(Ngrad,beta_c-2.)*dphids;
 	IssmDouble N = phi_0 - phi;
 
@@ -704,6 +774,6 @@
 
 		IssmDouble alpha = 1./(rho_ice*L)*(
-					fabs(Qprime*pow(Snew,ALPHA_C-1.)*dphids)
-					+ C*Qprime*pow(Snew,ALPHA_C-1.)*dPw
+					fabs(Qprime*pow(Snew,alpha_c-1.)*dphids)
+					+ C*Qprime*pow(Snew,alpha_c-1.)*dPw
 					) - 2./pow(n,n)*A*pow(fabs(N),n-1.)*N;
 
@@ -716,5 +786,8 @@
 		/*Constrain the cross section to be between 0 and 500 m^2*/
 		if(this->S<0.)   this->S = 0.;
-		if(this->S>500.) this->S = 500.;
+		if(this->S>100.) this->S = 100.;
+		
+		/*Do not allow channels to grow in areas with no sheet thickness*/
+		if(H<200.) this->S = 0.;
 
 		count++;
@@ -724,5 +797,5 @@
 
 	/*Compute new channel discharge for output only*/
-	IssmDouble Kc = kc * pow(this->S,ALPHA_C) * pow(Ngrad,BETA_C-2.);
+	IssmDouble Kc = kc * pow(this->S,alpha_c) * pow(Ngrad,beta_c-2.);
 	this->discharge = -Kc*dphids;
 
Index: /issm/trunk/src/c/classes/Loads/Friction.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Friction.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Loads/Friction.cpp	(revision 28013)
@@ -13,4 +13,5 @@
 #include "../classes.h"
 #include "shared/shared.h"
+#include "../../modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h"
 /*}}}*/
 
@@ -18,10 +19,13 @@
 Friction::Friction(){/*{{{*/
 	this->element=NULL;
-	this->law=0;
-	this->apply_dim = 1.;
-	this->domaintype=-1;
+	this->law        = 0;
+	this->linearize  = 0;
+	this->apply_dim  = 1.;
+	this->domaintype = -1;
 	this->vx_input=NULL;
 	this->vy_input=NULL;
 	this->vz_input=NULL;
+	this->alpha2_list=NULL;
+	this->alpha2_complement_list=NULL;
 }
 /*}}}*/
@@ -29,5 +33,10 @@
 	/* Determine the dimension according to the domain type automatically. 
 	 * There are exceptions, e.g. HO, which needs the user to specify the dimension used in Friciton.*/
+
+	/*Intermediaries*/
+	int linearization_type;
+
 	this->element=element_in;
+	this->linearize  = 0;
 
 	/* Load necessary parameters */
@@ -39,21 +48,53 @@
 		case Domain2DhorizontalEnum: 
 			this->apply_dim = 2.;
-			this->vx_input = element_in->GetInput(VxBaseEnum);			_assert_(this->vx_input); 
-			this->vy_input = element_in->GetInput(VyBaseEnum);			_assert_(this->vy_input);
+			this->vx_input = element_in->GetInput(VxBaseEnum);	_assert_(this->vx_input); 
+			this->vy_input = element_in->GetInput(VyBaseEnum);	_assert_(this->vy_input);
 			this->vz_input = NULL;
 			break;
       case Domain2DverticalEnum:
 			this->apply_dim = 2.;
-			this->vx_input = element_in->GetInput(VxEnum);				_assert_(this->vx_input);
-			this->vy_input = element_in->GetInput(VyEnum);				_assert_(this->vy_input);
+			this->vx_input = element_in->GetInput(VxEnum);	_assert_(this->vx_input);
+			this->vy_input = element_in->GetInput(VyEnum);	_assert_(this->vy_input);
 			this->vz_input = NULL;
 			break;
       case Domain3DEnum:           
 			this->apply_dim = 3.;
-			this->vx_input = element_in->GetInput(VxEnum);				_assert_(this->vx_input);
-			this->vy_input = element_in->GetInput(VyEnum);				_assert_(this->vy_input);
-			this->vz_input = element_in->GetInput(VzEnum);				_assert_(this->vz_input);
+			this->vx_input = element_in->GetInput(VxEnum);	_assert_(this->vx_input);
+			this->vy_input = element_in->GetInput(VyEnum);	_assert_(this->vy_input);
+			this->vz_input = element_in->GetInput(VzEnum);	_assert_(this->vz_input);
 			break;
       default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
+	}
+
+	if(this->law==1 || this->law==2){
+		element_in->FindParam(&linearization_type,FrictionLinearizeEnum);
+		if(linearization_type==0){
+			/*Don't do anything*/
+		}
+		else if(linearization_type==1){
+			int numvertices = this->element->GetNumberOfVertices();
+			this->alpha2_list            = xNew<IssmDouble>(numvertices);
+			this->alpha2_complement_list = xNew<IssmDouble>(numvertices);
+			Gauss* gauss=this->element->NewGauss();
+			for(int iv=0;iv<numvertices;iv++){
+				gauss->GaussVertex(iv);
+				this->GetAlpha2(&this->alpha2_list[iv], gauss);
+				this->GetAlphaComplement(&this->alpha2_complement_list[iv], gauss);
+			}
+			this->linearize = linearization_type; /*Change back, we are now all set!*/
+			delete gauss;
+		}
+		else if(linearization_type==2){
+			this->alpha2_list            = xNew<IssmDouble>(1);
+			this->alpha2_complement_list = xNew<IssmDouble>(1);
+			Gauss* gauss=element->NewGauss(1); gauss->GaussPoint(0);
+			this->GetAlpha2(&this->alpha2_list[0], gauss);
+			this->GetAlphaComplement(&this->alpha2_complement_list[0], gauss);
+			this->linearize = linearization_type; /*Change back, we are now all set!*/
+			delete gauss;
+		}
+		else{
+			_error_("not supported yet");
+		}
 	}
 }
@@ -68,7 +109,10 @@
 /*}}}*/
 Friction::~Friction(){/*{{{*/
+	if(this->linearize){
+		xDelete<IssmDouble>(this->alpha2_list);
+		xDelete<IssmDouble>(this->alpha2_complement_list);
+	}
 }
 /*}}}*/
-
 
 /*methods: */
@@ -80,22 +124,39 @@
 void Friction::GetAlphaComplement(IssmDouble* palpha_complement, Gauss* gauss){/*{{{*/
 
-	switch(this->law){
-		case 1:
-			GetAlphaViscousComplement(palpha_complement,gauss);
-			break;
-		case 2:
-			GetAlphaWeertmanComplement(palpha_complement, gauss);
-			break;
-		case 3:
-			GetAlphaHydroComplement(palpha_complement,gauss);
-			break;
-		case 4:
-			GetAlphaTempComplement(palpha_complement,gauss);
-			break;
-		case 11:
-			GetAlphaSchoofComplement(palpha_complement,gauss);
-			break;
-	  default:
-			_error_("not supported");
+	if(this->linearize==0){
+		switch(this->law){
+			case 1:
+				GetAlphaViscousComplement(palpha_complement,gauss);
+				break;
+			case 2:
+				GetAlphaWeertmanComplement(palpha_complement, gauss);
+				break;
+			case 3:
+				GetAlphaHydroComplement(palpha_complement,gauss);
+				break;
+			case 4:
+				GetAlphaTempComplement(palpha_complement,gauss);
+				break;
+			case 11:
+				GetAlphaSchoofComplement(palpha_complement,gauss);
+				break;
+			case 13:
+				GetAlphaCoulomb2Complement(palpha_complement,gauss);
+				break;
+			case 14:
+				GetAlphaRegCoulombComplement(palpha_complement,gauss);
+				break;
+			default:
+				_error_("not supported");
+		}
+	}
+	else if(this->linearize==1){
+		this->element->ValueP1OnGauss(palpha_complement, this->alpha2_complement_list, gauss);
+	}
+	else if(this->linearize==2){
+		*palpha_complement = this->alpha2_complement_list[0];
+	}
+	else{
+		_error_("not supported yet");
 	}
 
@@ -266,48 +327,127 @@
 	*palpha_complement=alpha_complement;
 }/*}}}*/
+void Friction::GetAlphaRegCoulombComplement(IssmDouble* palpha_complement, Gauss* gauss){/*{{{*/
+
+	/* Compute the complement of regularised Coulombs law for inversion
+	 * d alpha2                       
+	 * -------- = |u_b|^(1/m-1) * (|u_b|/u_0 + 1)^(-1/m)
+	 *  dC                           
+	 */
+
+	/*diverse: */
+	IssmDouble  m, u0;
+	IssmDouble  alpha_complement;
+
+	/*Recover parameters: */
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+	element->parameters->FindParam(&u0,FrictionU0Enum);
+
+	/*Get velocity magnitude*/
+	IssmDouble ub = VelMag(gauss);
+
+	/*Check to prevent dividing by zero if vmag==0*/
+	if(ub==0.) {
+		alpha_complement=0.;
+	}
+	else {
+		/*Compute friction complement*/
+		alpha_complement= (pow(ub,1./m-1.)) / pow(ub/u0 + 1.,1./m);	
+	}
+
+	/*Assign output pointers:*/
+	*palpha_complement=alpha_complement;
+}/*}}}*/
+void Friction::GetAlphaCoulomb2Complement(IssmDouble* palpha_complement, Gauss* gauss){/*{{{*/
+
+	/* Compute the complement of Cornford's friction law for inversion
+	 * d alpha2                       
+	 * ------ = (C*N*v^m)/(C^(2/m)*v + (N/2)^(1/m))^m - (C^(2/m - 1)*C^2*N*v*v^m)/(C^(2/m)*v + (N/2)^(1/m))^(m + 1)
+	 *  dC                           
+	 */
+
+	/*diverse: */
+	IssmDouble  m, C;
+	IssmDouble  alpha_complement;
+
+	/*Recover parameters: */
+	element->GetInputValue(&C,gauss,FrictionCEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+
+	/*Get effective pressure and velocity magnitude*/
+	IssmDouble N = EffectivePressure(gauss);
+	IssmDouble v = VelMag(gauss);
+
+	/*Compute alpha*/
+	if(v<1e-10){
+		alpha_complement = 0.;
+	}
+	else{
+		alpha_complement= pow(0.5*N,1./m+1)* pow(v,m-1.) * pow(v*pow(C,1./m) +pow(0.5*N,1./m) ,-m-1.);
+	}
+
+	/*Assign output pointers:*/
+	*palpha_complement=alpha_complement/2.;
+}/*}}}*/
 void Friction::GetAlpha2(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
 
-	switch(this->law){
-		case 1:
-			GetAlpha2Viscous(palpha2,gauss);
-			break;
-		case 2:
-			GetAlpha2Weertman(palpha2,gauss);
-			break;
-		case 3:
-			GetAlpha2Hydro(palpha2,gauss);
-			break;
-		case 4:
-			GetAlpha2Temp(palpha2,gauss);
-			break;
-		case 5:
-			GetAlpha2WaterLayer(palpha2,gauss);
-			break;
-		case 6:
-			GetAlpha2WeertmanTemp(palpha2,gauss);
-			break;
-		case 7:
-			GetAlpha2Coulomb(palpha2,gauss);
-			break;
-		case 8:
-			GetAlpha2Shakti(palpha2,gauss);
-			break;
-		case 9:
-			GetAlpha2Josh(palpha2,gauss);
-			break;
-		case 10:
-			GetAlpha2PISM(palpha2,gauss);
-			break;
-		case 11:
-			GetAlpha2Schoof(palpha2,gauss);
-			break;
-		case 12:
-			GetAlpha2Tsai(palpha2,gauss);
-			break;
-		case 13:
-			GetAlpha2Coulomb2(palpha2,gauss);
-			break;
-	  default:
-			_error_("Friction law "<< this->law <<" not supported");
+	if(this->linearize==0){
+		switch(this->law){
+			case 1:
+				GetAlpha2Viscous(palpha2,gauss);
+				break;
+			case 2:
+				GetAlpha2Weertman(palpha2,gauss);
+				break;
+			case 3:
+				GetAlpha2Hydro(palpha2,gauss);
+				break;
+			case 4:
+				GetAlpha2Temp(palpha2,gauss);
+				break;
+			case 5:
+				GetAlpha2WaterLayer(palpha2,gauss);
+				break;
+			case 6:
+				GetAlpha2WeertmanTemp(palpha2,gauss);
+				break;
+			case 7:
+				GetAlpha2Coulomb(palpha2,gauss);
+				break;
+			case 8:
+				GetAlpha2Shakti(palpha2,gauss);
+				break;
+			case 9:
+				GetAlpha2Josh(palpha2,gauss);
+				break;
+			case 10:
+				GetAlpha2PISM(palpha2,gauss);
+				break;
+			case 11:
+				GetAlpha2Schoof(palpha2,gauss);
+				break;
+			case 12:
+				GetAlpha2Tsai(palpha2,gauss);
+				break;
+			case 13:
+				GetAlpha2Coulomb2(palpha2,gauss);
+				break;
+			case 14:
+				GetAlpha2RegCoulomb(palpha2,gauss);
+				break;
+			case 15:
+				GetAlpha2RegCoulomb2(palpha2,gauss);
+				break;
+			default:
+				_error_("Friction law "<< this->law <<" not supported");
+		}
+	}
+	else if(this->linearize==1){
+		this->element->ValueP1OnGauss(palpha2, this->alpha2_list, gauss);
+	}
+	else if(this->linearize==2){
+		*palpha2 = this->alpha2_list[0];
+	}
+	else{
+		_error_("not supported yet");
 	}
 
@@ -344,19 +484,20 @@
 
 	/*Get effective pressure*/
-	bool ispwStochastic;
-	IssmDouble Neff;
-	element->parameters->FindParam(&ispwStochastic,StochasticForcingIsWaterPressureEnum);
-	if(ispwStochastic){
-		/*Retrieve stochastic water pressure and compute ice pressure*/
-		IssmDouble p_ice,p_water,Neff_limit;
-		element->GetInputValue(&p_water,gauss,FrictionCoulombWaterPressureEnum);
-		element->parameters->FindParam(&Neff_limit,FrictionEffectivePressureLimitEnum);
-		p_ice = IcePressure(gauss);
-		Neff  = max(Neff_limit*p_ice, p_ice - p_water);
-	}	
-	else{
-		/*Compute effective pressure directly*/
-		Neff = EffectivePressure(gauss);
-	}
+	bool ispwHydro,ispwStochastic;
+   IssmDouble Neff;
+   element->parameters->FindParam(&ispwStochastic,StochasticForcingIsWaterPressureEnum);
+   element->parameters->FindParam(&ispwHydro,HydrologyIsWaterPressureArmaEnum);
+   if(ispwStochastic || ispwHydro){
+      /*Retrieve pre-computed water pressure and compute ice pressure*/
+      IssmDouble p_ice,p_water,Neff_limit;
+      element->GetInputValue(&p_water,gauss,FrictionWaterPressureEnum);
+      element->parameters->FindParam(&Neff_limit,FrictionEffectivePressureLimitEnum);
+      p_ice = IcePressure(gauss);
+      Neff  = max(Neff_limit*p_ice, p_ice - p_water);
+   }
+   else{
+      /*Compute effective pressure directly*/
+      Neff = EffectivePressure(gauss);
+   }
 	
 	/*Get velocity magnitude*/
@@ -559,4 +700,221 @@
 	IssmDouble vmag = VelMag(gauss);
 
+	bool ispwHydro,ispwStochastic;
+   IssmDouble Neff;
+   element->parameters->FindParam(&ispwStochastic,StochasticForcingIsWaterPressureEnum);
+   element->parameters->FindParam(&ispwHydro,HydrologyIsWaterPressureArmaEnum);
+   if(ispwStochastic || ispwHydro){
+      /*Retrieve pre-computed water pressure and compute ice pressure*/
+      IssmDouble p_ice,p_water,Neff_limit;
+      element->GetInputValue(&p_water,gauss,FrictionWaterPressureEnum);
+		element->parameters->FindParam(&Neff_limit,FrictionEffectivePressureLimitEnum);
+      p_ice = IcePressure(gauss);
+      Neff  = max(Neff_limit*p_ice, p_ice - p_water);
+   }	
+	else{
+		/*Compute effective pressure directly*/
+		Neff = EffectivePressure(gauss);
+	}
+
+	/*Check to prevent dividing by zero if vmag==0*/
+	if(s==1.){
+		/*This is to make AD happy and avoid 0^0*/
+		alpha2=drag_coefficient*drag_coefficient*pow(Neff,r);
+	}
+	else{
+		if(vmag==0. && (s-1.)<0.) alpha2=0.;
+		else alpha2=drag_coefficient*drag_coefficient*pow(Neff,r)*pow(vmag,(s-1.));
+	}
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2WaterLayer(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+
+	/*This routine calculates the basal friction coefficient
+	  alpha2= drag^2 * Neff ^r * | vel | ^(s-1), with Neff=rho_ice*g*thickness+rho_ice*g*base, r=q/p and s=1/p**/
+
+	/*diverse: */
+	IssmDouble  r,s;
+	IssmDouble  drag_p, drag_q;
+	IssmDouble  Neff,F;
+	IssmDouble  thickness,base,sealevel;
+	IssmDouble  drag_coefficient,water_layer;
+	IssmDouble  alpha2;
+
+	/*Recover parameters: */
+	element->parameters->FindParam(&F,FrictionFEnum);
+	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
+	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
+	element->GetInputValue(&thickness, gauss,ThicknessEnum);
+	element->GetInputValue(&base, gauss,BaseEnum);
+	element->GetInputValue(&sealevel, gauss,SealevelEnum);
+	element->GetInputValue(&drag_coefficient, gauss,FrictionCoefficientEnum);
+	element->GetInputValue(&water_layer, gauss,FrictionWaterLayerEnum);
+	IssmDouble rho_water   = element->FindParam(MaterialsRhoSeawaterEnum);
+	IssmDouble rho_ice     = element->FindParam(MaterialsRhoIceEnum);
+	IssmDouble gravity     = element->FindParam(ConstantsGEnum);
+
+	//compute r and q coefficients: */
+	r=drag_q/drag_p;
+	s=1./drag_p;
+
+	//From base and thickness, compute effective pressure when drag is viscous:
+	if(base>0) base=0;
+	if(water_layer==0) Neff=gravity*rho_ice*thickness+gravity*rho_water*(base-sealevel);
+	else if(water_layer>0) Neff=gravity*rho_ice*thickness*F;
+	else _error_("negative water layer thickness");
+	if(Neff<0) Neff=0;
+
+	IssmDouble vmag = VelMag(gauss);
+
+	if(s==1.){
+		/*This is to make AD happy and avoid 0^0*/
+		alpha2=drag_coefficient*drag_coefficient*pow(Neff,r);
+	}
+	else{
+		/*Check to prevent dividing by zero if vmag==0*/
+		if(vmag==0. && (s-1.)<0.) alpha2=0.;
+		else alpha2=drag_coefficient*drag_coefficient*pow(Neff,r)*pow(vmag,(s-1.));
+	}
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2Weertman(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+
+	/*This routine calculates the basal friction coefficient alpha2= C^2 |v|^(1/m-1) */
+
+	/*diverse: */
+	IssmDouble  C,m;
+	IssmDouble  alpha2;
+
+	/*Recover parameters: */
+	element->GetInputValue(&C,gauss,FrictionCEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+
+	/*Get velocity magnitude*/
+	IssmDouble vmag = VelMag(gauss);
+
+	/*Check to prevent dividing by zero if vmag==0*/
+	if(vmag==0. && (1./m-1.)<0.) alpha2=0.;
+	else alpha2=C*C*pow(vmag,(1./m-1.));
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2WeertmanTemp(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+	/*Here, we want to parameterize the friction as a function of temperature
+	 *
+	 * alpha2 = alpha2_weertman * 1/f(T)
+	 *
+	 * where f(T) = exp((T-Tpmp)/gamma)
+	 */
+
+	/*Intermediaries: */
+	IssmDouble  f,T,pressure,Tpmp,gamma;
+	IssmDouble  alpha2;
+
+	/*Get viscous part*/
+	this->GetAlpha2Weertman(&alpha2,gauss);
+
+	/*Get pressure melting point (Tpmp) for local pressure and get current temperature*/
+	element->GetInputValue(&T,gauss,TemperatureEnum);
+	element->GetInputValue(&pressure,gauss,PressureEnum);
+	Tpmp = element->TMeltingPoint(pressure);
+
+	/*Compute scaling parameter*/
+	element->parameters->FindParam(&gamma,FrictionGammaEnum);
+	alpha2 = alpha2 / exp((T-Tpmp)/gamma);
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2PISM(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+	/*Here, we want to parameterize the friction using a pseudoplastic friction law,
+	 * computing the basal shear stress as
+	 *
+	 * alpha2 = tau_c (u_b/(abs(u_b)^(1-q)*u_0^q))
+	 *
+	 * The yield stress tau_c is a function of the effective pressure N
+	 * using a Mohr-Coloumb criterion, so that
+	 * tau_c = tan(phi)*N,
+	 * where phi is the till friction angle, representing sediment strength
+	 *
+	 * The effective pressure is given by Eq. (5) in Aschwanden et al. 2016:
+	 *
+	 * N = delta * P0 * 10^((e_0/Cc)(1-(W/Wmax)))
+	 *
+	 * W is calculated by a non-conserving hydrology model in HydrologyPismAnalysis.cpp
+	 *
+	 * see Aschwanden et al. 2016 and Bueler and Brown, 2009 for more details
+	 */
+
+	/*compute ice overburden pressure P0*/
+	IssmDouble thickness,base,P0;
+	element->GetInputValue(&thickness, gauss,ThicknessEnum);
+	element->GetInputValue(&base, gauss,BaseEnum);
+	//element->GetInputValue(&sealevel, gauss,SealevelEnum);
+	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+	IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+	P0 = gravity*rho_ice*thickness;
+
+	/*Compute effective pressure*/
+	IssmDouble  N,delta,W,Wmax,e0,Cc;
+	element->parameters->FindParam(&delta,FrictionDeltaEnum);
+	element->parameters->FindParam(&e0,FrictionVoidRatioEnum);
+	element->GetInputValue(&Cc,gauss,FrictionSedimentCompressibilityCoefficientEnum);
+	element->GetInputValue(&W,gauss,WatercolumnEnum);
+	element->GetInputValue(&Wmax,gauss,HydrologyWatercolumnMaxEnum);
+
+ 	/*Check that water column height is within 0 and upper bound, correct if needed*/
+ 	if(W>Wmax) W=Wmax;
+ 	if(W<0)    W=0.;
+
+	N = delta*P0*pow(10.,(e0/Cc)*(1.-W/Wmax));
+
+	/*Get till friction angles, defined by user [deg]*/
+	IssmDouble phi;
+	element->GetInputValue(&phi,gauss,FrictionTillFrictionAngleEnum);
+
+	/*Convert till friction angle from user-defined deg to rad, which Matlab uses*/
+	phi = phi*PI/180.;
+
+	/*Compute yield stress following a Mohr-Colomb criterion*/
+	IssmDouble tau_c = N*tan(phi);
+
+	/*Compute basal speed*/
+	IssmDouble ub;
+	element->GetInputValue(&ub,gauss,VelEnum);
+
+	/*now compute alpha^2*/
+	IssmDouble u0,q;
+	element->parameters->FindParam(&u0,FrictionThresholdSpeedEnum);
+	element->parameters->FindParam(&q,FrictionPseudoplasticityExponentEnum);
+	IssmDouble alpha2 = tau_c/(pow(ub+1.e-10,1.-q)*pow(u0,q));
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2Schoof(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+
+	/*This routine calculates the basal friction coefficient
+	 *
+	 *               C^2 |u_b|^(m-1)
+	 * alpha2= __________________________
+	 *          (1+(C^2/(Cmax Neff))^1/m |u_b| )^m
+	 *
+	 * */
+
+	/*diverse: */
+	IssmDouble  C,Cmax,m,alpha2;
+
+	/*Recover parameters: */
+	element->GetInputValue(&Cmax,gauss,FrictionCmaxEnum);
+	element->GetInputValue(&C,gauss,FrictionCEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+
+
+	/*Get effective pressure*/
 	bool ispwStochastic;
 	IssmDouble Neff;
@@ -575,222 +933,4 @@
 	}
 
-	/*Check to prevent dividing by zero if vmag==0*/
-	if(s==1.){
-		/*This is to make AD happy and avoid 0^0*/
-		alpha2=drag_coefficient*drag_coefficient*pow(Neff,r);
-	}
-	else{
-		if(vmag==0. && (s-1.)<0.) alpha2=0.;
-		else alpha2=drag_coefficient*drag_coefficient*pow(Neff,r)*pow(vmag,(s-1.));
-	}
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
-}/*}}}*/
-void Friction::GetAlpha2WaterLayer(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
-
-	/*This routine calculates the basal friction coefficient
-	  alpha2= drag^2 * Neff ^r * | vel | ^(s-1), with Neff=rho_ice*g*thickness+rho_ice*g*base, r=q/p and s=1/p**/
-
-	/*diverse: */
-	IssmDouble  r,s;
-	IssmDouble  drag_p, drag_q;
-	IssmDouble  Neff,F;
-	IssmDouble  thickness,base,sealevel;
-	IssmDouble  drag_coefficient,water_layer;
-	IssmDouble  alpha2;
-
-	/*Recover parameters: */
-	element->parameters->FindParam(&F,FrictionFEnum);
-	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
-	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
-	element->GetInputValue(&thickness, gauss,ThicknessEnum);
-	element->GetInputValue(&base, gauss,BaseEnum);
-	element->GetInputValue(&sealevel, gauss,SealevelEnum);
-	element->GetInputValue(&drag_coefficient, gauss,FrictionCoefficientEnum);
-	element->GetInputValue(&water_layer, gauss,FrictionWaterLayerEnum);
-	IssmDouble rho_water   = element->FindParam(MaterialsRhoSeawaterEnum);
-	IssmDouble rho_ice     = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble gravity     = element->FindParam(ConstantsGEnum);
-
-	//compute r and q coefficients: */
-	r=drag_q/drag_p;
-	s=1./drag_p;
-
-	//From base and thickness, compute effective pressure when drag is viscous:
-	if(base>0) base=0;
-	if(water_layer==0) Neff=gravity*rho_ice*thickness+gravity*rho_water*(base-sealevel);
-	else if(water_layer>0) Neff=gravity*rho_ice*thickness*F;
-	else _error_("negative water layer thickness");
-	if(Neff<0) Neff=0;
-
-	IssmDouble vmag = VelMag(gauss);
-
-	if(s==1.){
-		/*This is to make AD happy and avoid 0^0*/
-		alpha2=drag_coefficient*drag_coefficient*pow(Neff,r);
-	}
-	else{
-		/*Check to prevent dividing by zero if vmag==0*/
-		if(vmag==0. && (s-1.)<0.) alpha2=0.;
-		else alpha2=drag_coefficient*drag_coefficient*pow(Neff,r)*pow(vmag,(s-1.));
-	}
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
-}/*}}}*/
-void Friction::GetAlpha2Weertman(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
-
-	/*This routine calculates the basal friction coefficient alpha2= C^2 |v|^(1/m-1) */
-
-	/*diverse: */
-	IssmDouble  C,m;
-	IssmDouble  alpha2;
-
-	/*Recover parameters: */
-	element->GetInputValue(&C,gauss,FrictionCEnum);
-	element->GetInputValue(&m,gauss,FrictionMEnum);
-
-	/*Get velocity magnitude*/
-	IssmDouble vmag = VelMag(gauss);
-
-	/*Check to prevent dividing by zero if vmag==0*/
-	if(vmag==0. && (1./m-1.)<0.) alpha2=0.;
-	else alpha2=C*C*pow(vmag,(1./m-1.));
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
-}/*}}}*/
-void Friction::GetAlpha2WeertmanTemp(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
-	/*Here, we want to parameterize the friction as a function of temperature
-	 *
-	 * alpha2 = alpha2_weertman * 1/f(T)
-	 *
-	 * where f(T) = exp((T-Tpmp)/gamma)
-	 */
-
-	/*Intermediaries: */
-	IssmDouble  f,T,pressure,Tpmp,gamma;
-	IssmDouble  alpha2;
-
-	/*Get viscous part*/
-	this->GetAlpha2Weertman(&alpha2,gauss);
-
-	/*Get pressure melting point (Tpmp) for local pressure and get current temperature*/
-	element->GetInputValue(&T,gauss,TemperatureEnum);
-	element->GetInputValue(&pressure,gauss,PressureEnum);
-	Tpmp = element->TMeltingPoint(pressure);
-
-	/*Compute scaling parameter*/
-	element->parameters->FindParam(&gamma,FrictionGammaEnum);
-	alpha2 = alpha2 / exp((T-Tpmp)/gamma);
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
-}/*}}}*/
-void Friction::GetAlpha2PISM(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
-	/*Here, we want to parameterize the friction using a pseudoplastic friction law,
-	 * computing the basal shear stress as
-	 *
-	 * alpha2 = tau_c (u_b/(abs(u_b)^(1-q)*u_0^q))
-	 *
-	 * The yield stress tau_c is a function of the effective pressure N
-	 * using a Mohr-Coloumb criterion, so that
-	 * tau_c = tan(phi)*N,
-	 * where phi is the till friction angle, representing sediment strength
-	 *
-	 * The effective pressure is given by Eq. (5) in Aschwanden et al. 2016:
-	 *
-	 * N = delta * P0 * 10^((e_0/Cc)(1-(W/Wmax)))
-	 *
-	 * W is calculated by a non-conserving hydrology model in HydrologyPismAnalysis.cpp
-	 *
-	 * see Aschwanden et al. 2016 and Bueler and Brown, 2009 for more details
-	 */
-
-	/*compute ice overburden pressure P0*/
-	IssmDouble thickness,base,P0;
-	element->GetInputValue(&thickness, gauss,ThicknessEnum);
-	element->GetInputValue(&base, gauss,BaseEnum);
-	//element->GetInputValue(&sealevel, gauss,SealevelEnum);
-	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble gravity   = element->FindParam(ConstantsGEnum);
-	P0 = gravity*rho_ice*thickness;
-
-	/*Compute effective pressure*/
-	IssmDouble  N,delta,W,Wmax,e0,Cc;
-	element->parameters->FindParam(&delta,FrictionDeltaEnum);
-	element->parameters->FindParam(&e0,FrictionVoidRatioEnum);
-	element->GetInputValue(&Cc,gauss,FrictionSedimentCompressibilityCoefficientEnum);
-	element->GetInputValue(&W,gauss,WatercolumnEnum);
-	element->GetInputValue(&Wmax,gauss,HydrologyWatercolumnMaxEnum);
-
- 	/*Check that water column height is within 0 and upper bound, correct if needed*/
- 	if(W>Wmax) W=Wmax;
- 	if(W<0)    W=0.;
-
-	N = delta*P0*pow(10.,(e0/Cc)*(1.-W/Wmax));
-
-	/*Get till friction angles, defined by user [deg]*/
-	IssmDouble phi;
-	element->GetInputValue(&phi,gauss,FrictionTillFrictionAngleEnum);
-
-	/*Convert till friction angle from user-defined deg to rad, which Matlab uses*/
-	phi = phi*PI/180.;
-
-	/*Compute yield stress following a Mohr-Colomb criterion*/
-	IssmDouble tau_c = N*tan(phi);
-
-	/*Compute basal speed*/
-	IssmDouble ub;
-	element->GetInputValue(&ub,gauss,VelEnum);
-
-	/*now compute alpha^2*/
-	IssmDouble u0,q;
-	element->parameters->FindParam(&u0,FrictionThresholdSpeedEnum);
-	element->parameters->FindParam(&q,FrictionPseudoplasticityExponentEnum);
-	IssmDouble alpha2 = tau_c/(pow(ub+1.e-10,1.-q)*pow(u0,q));
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
-}/*}}}*/
-void Friction::GetAlpha2Schoof(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
-
-	/*This routine calculates the basal friction coefficient
-	 *
-	 *               C |u_b|^(m-1)
-	 * alpha2= __________________________
-	 *          (1+(C/(Cmax Neff))^1/m |u_b| )^m
-	 *
-	 * */
-
-	/*diverse: */
-	IssmDouble  C,coeff,Cmax,m,alpha2;
-
-	/*Recover parameters: */
-	element->GetInputValue(&Cmax,gauss,FrictionCmaxEnum);
-	element->GetInputValue(&coeff,gauss,FrictionCEnum);
-	element->GetInputValue(&m,gauss,FrictionMEnum);
-
-	/* scale C for a better inversion */
-	C = coeff*coeff;
-
-	/*Get effective pressure*/
-	bool ispwStochastic;
-	IssmDouble Neff;
-	element->parameters->FindParam(&ispwStochastic,StochasticForcingIsWaterPressureEnum);
-	if(ispwStochastic){
-		/*Retrieve stochastic water pressure and compute ice pressure*/
-		IssmDouble p_ice,p_water,Neff_limit;
-		element->GetInputValue(&p_water,gauss,FrictionSchoofWaterPressureEnum);
-		element->parameters->FindParam(&Neff_limit,FrictionEffectivePressureLimitEnum);
-		p_ice = IcePressure(gauss);
-		Neff  = max(Neff_limit*p_ice, p_ice - p_water);
-	}	
-	else{
-		/*Compute effective pressure directly*/
-		Neff = EffectivePressure(gauss);
-	}
-
 	/*Get velocity magnitude*/
 	IssmDouble ub = VelMag(gauss);
@@ -801,5 +941,5 @@
 	}
 	else{
-		alpha2= (C*pow(ub,m-1.)) / pow(1.+  pow(C/(Cmax*Neff),1./m)*ub,m);
+		alpha2= (C*C*pow(ub,m-1.)) / pow(1.+  pow(C*C/(Cmax*Neff),1./m)*ub,m);
 	}
 
@@ -874,5 +1014,74 @@
 	*palpha2=alpha2;
 }/*}}}*/
-
+void Friction::GetAlpha2RegCoulomb(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+
+	/*This routine calculates the basal friction coefficient
+	 *
+	 *               C |u_b|^(1/m-1)
+	 * alpha2= __________________________
+	 *          (|u_b|/u0 + 1 )^(1/m)
+	 *
+	 * */
+
+	/*diverse: */
+	IssmDouble  C,coeff,u0,m,alpha2;
+
+	/*Recover parameters: */
+	element->GetInputValue(&coeff,gauss,FrictionCEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+	element->parameters->FindParam(&u0,FrictionU0Enum);
+
+	/* scale C for a better inversion */
+	C = coeff*coeff;
+
+	/*Get velocity magnitude*/
+	IssmDouble ub = VelMag(gauss);
+
+	/*Check to prevent dividing by zero if vmag==0*/
+	if(ub==0.) {
+		alpha2=0.;
+	}
+	else {
+		/*Compute alpha^2*/
+		alpha2= (C*pow(ub,1./m-1.)) / pow(ub/u0 + 1.,1./m);
+	}
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
+void Friction::GetAlpha2RegCoulomb2(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
+
+	/*This routine calculates the basal friction coefficient
+	 *
+	 *               C N |u_b|^(1/m-1)
+	 * alpha2= __________________________
+	 *          (|u_b| + K N^m )^(1/m)
+	 *
+	 * */
+
+	/*diverse: */
+	IssmDouble  C,K,m,alpha2;
+
+	/*Recover parameters: */
+	element->GetInputValue(&C,gauss,FrictionCEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
+	element->GetInputValue(&K,gauss,FrictionKEnum);
+
+	/*Get velocity magnitude*/
+	IssmDouble ub = VelMag(gauss);
+	IssmDouble Neff = EffectivePressure(gauss);
+
+	/*Check to prevent dividing by zero if vmag==0*/
+	if(ub==0. && (m-1.)<0) {
+		alpha2=0.;
+	}
+	else {
+		/*Compute alpha^2*/
+		alpha2= (C*pow(ub,1./m-1.)) * Neff / pow((ub+pow(K*Neff,m)),1./m);
+	}
+
+	/*Assign output pointers:*/
+	*palpha2=alpha2;
+}/*}}}*/
 IssmDouble Friction::EffectivePressure(Gauss* gauss){/*{{{*/
 	/*Get effective pressure as a function of  flag */
@@ -1044,2 +1253,207 @@
 	_assert_(!xIsInf<IssmDouble>(*pvz));
 }/*}}}*/
+
+/*IO*/
+void FrictionUpdateInputs(Elements* elements,Inputs* inputs,IoModel* iomodel){/*{{{*/
+
+	/*Intermediaries*/
+	int    frictionlaw;
+	int    frictioncoupling;
+
+	/*Friction law variables*/
+	iomodel->FindConstant(&frictionlaw,"md.friction.law");
+	switch(frictionlaw){
+		case 1:
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
+			if(frictioncoupling==3){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+			else if(frictioncoupling==4){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
+			}
+			break;
+		case 2:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			break;
+		case 3:
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.As",FrictionAsEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
+			if(frictioncoupling==3){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+			else if(frictioncoupling==4){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
+			}
+			break;
+		case 4:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			break;
+		case 5:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
+			break;
+		case 6:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
+			break;
+		case 7:
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.q",FrictionQEnum);
+			if(frictioncoupling==3){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+			else if(frictioncoupling==4){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
+
+			}
+			break;
+		case 8:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			break;
+		case 9:
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
+			InputUpdateFromConstantx(inputs,elements,1.,FrictionPEnum);
+			InputUpdateFromConstantx(inputs,elements,1.,FrictionQEnum);
+			break;
+		case 10:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.till_friction_angle",FrictionTillFrictionAngleEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.sediment_compressibility_coefficient",FrictionSedimentCompressibilityCoefficientEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
+			break;
+		case 11:
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.Cmax",FrictionCmaxEnum);
+			if(frictioncoupling==3){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+			else if(frictioncoupling==4){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
+			}
+			break;
+		case 12:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.f",FrictionfEnum);
+			break;
+		case 13:
+			iomodel->FindConstant(&frictioncoupling,"md.friction.coupling");
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			if(frictioncoupling==3){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+			else if(frictioncoupling==4){
+				iomodel->FetchDataToInput(inputs,elements,"md.friction.effective_pressure",EffectivePressureEnum);
+			}
+			break;
+		case 14:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			break;
+		case 15:
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs,elements,"md.friction.K",FrictionKEnum);
+			break;
+		default:
+			_error_("friction law "<< frictionlaw <<" not supported");
+	}
+
+#ifdef _HAVE_ANDROID_
+	inputs->DuplicateInput(FrictionCoefficientEnum,AndroidFrictionCoefficientEnum);
+#endif
+
+}/*}}}*/
+void FrictionUpdateParameters(Parameters* parameters,IoModel* iomodel){/*{{{*/
+
+	parameters->AddObject(iomodel->CopyConstantObject("md.friction.law",FrictionLawEnum));
+
+	/*Set default linearize parameter to 0 for now*/
+	parameters->AddObject(new IntParam(FrictionLinearizeEnum,0));
+
+	int frictionlaw;
+	iomodel->FindConstant(&frictionlaw,"md.friction.law");
+	switch(frictionlaw){
+		case 1:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.linearize",FrictionLinearizeEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 2:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.linearize",FrictionLinearizeEnum));
+			break;
+		case 3:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 4:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 5:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.f",FrictionFEnum));
+			break;
+		case 6:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+			break;
+		case 7:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 8:
+			break;
+		case 9:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));/*comment this line to use effective pressure from Beuler and Pelt (2015)*/
+			break;
+		case 10:
+			parameters->AddObject(new IntParam(FrictionCouplingEnum,2)); /*comment this line to use effective pressure from Beuler and Pelt (2015)*/
+			parameters->AddObject(new DoubleParam(FrictionEffectivePressureLimitEnum,0.));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.pseudoplasticity_exponent",FrictionPseudoplasticityExponentEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.threshold_speed",FrictionThresholdSpeedEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.delta",FrictionDeltaEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.void_ratio",FrictionVoidRatioEnum));
+			break;
+		case 11:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 12:
+			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 13:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		case 14:
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.u0",FrictionU0Enum));
+			break;
+		case 15:
+			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+			break;
+		default: _error_("Friction law "<<frictionlaw<<" not implemented yet");
+	}
+
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Loads/Friction.h
===================================================================
--- /issm/trunk/src/c/classes/Loads/Friction.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Loads/Friction.h	(revision 28013)
@@ -8,4 +8,7 @@
 /*Headers:*/
 class Inputs;
+class Elements;
+class Parameters;
+class IoModel;
 class GaussPenta;
 class GaussTria;
@@ -17,8 +20,11 @@
 		int         law;
 		int         domaintype;
+		int         linearize;
 		IssmDouble  apply_dim;
 		Input      *vx_input;
 		Input      *vy_input;
 		Input      *vz_input;
+		IssmDouble *alpha2_list;
+		IssmDouble *alpha2_complement_list;
 
 		/*methods: */
@@ -35,4 +41,6 @@
 		void  GetAlphaViscousComplement(IssmDouble* alpha_complement,Gauss* gauss);
 		void  GetAlphaSchoofComplement(IssmDouble* alpha_complement,Gauss* gauss);
+		void  GetAlphaCoulomb2Complement(IssmDouble* alpha_complement,Gauss* gauss);
+		void  GetAlphaRegCoulombComplement(IssmDouble* alpha_complement,Gauss* gauss);
 		void  GetAlphaWeertmanComplement(IssmDouble* alpha_complement,Gauss* gauss);
 		void  GetAlpha2(IssmDouble* palpha2,Gauss* gauss);
@@ -49,4 +57,6 @@
 		void  GetAlpha2PISM(IssmDouble* palpha2,Gauss* gauss);
 		void  GetAlpha2Schoof(IssmDouble* palpha2,Gauss* gauss);
+		void  GetAlpha2RegCoulomb(IssmDouble* palpha2,Gauss* gauss);
+		void  GetAlpha2RegCoulomb2(IssmDouble* palpha2,Gauss* gauss);
 		void  GetAlpha2Tsai(IssmDouble* palpha2,Gauss* gauss);
 
@@ -60,3 +70,7 @@
 };
 
+/*Friction related IO*/
+void FrictionUpdateParameters(Parameters* parameters,IoModel* iomodel);
+void FrictionUpdateInputs(Elements* elements,Inputs* inputs,IoModel* iomodel);
+
 #endif  /* _FRICTION_H_ */
Index: /issm/trunk/src/c/classes/Loads/Moulin.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Moulin.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Loads/Moulin.cpp	(revision 28013)
@@ -104,5 +104,13 @@
 /*}}}*/
 void    Moulin::Echo(void){/*{{{*/
-	this->DeepEcho();
+
+	_printf_("Moulin:\n");
+	_printf_("   id: " << id << "\n");
+	hnode->Echo();
+	hvertex->Echo();
+	helement->Echo();
+	_printf_("   parameters\n");
+	parameters->Echo();
+	//this->DeepEcho();
 }
 /*}}}*/
@@ -143,5 +151,5 @@
 void  Moulin::Configure(Elements* elementsin,Loads* loadsin,Nodes* nodesin,Vertices* verticesin,Materials* materialsin,Parameters* parametersin){/*{{{*/
 
-	/*Take care of hooking up all objects for this load, ie links the objects in the hooks to their respective 
+	/*Take care of hooking up all objects for this load, ie links the objects in the hooks to their respective
 	 * datasets, using internal ids and offsets hidden in hooks: */
 	hnode->configure(nodesin);
@@ -204,8 +212,8 @@
 			break;
 		case HydrologyDCInefficientAnalysisEnum:
-			pe = CreatePVectorHydrologyDCInefficient();
+			pe = this->CreatePVectorHydrologyDCInefficient();
 			break;
 		case HydrologyDCEfficientAnalysisEnum:
-			pe = CreatePVectorHydrologyDCEfficient();
+			pe = this->CreatePVectorHydrologyDCEfficient();
 			break;
 		default:
@@ -427,8 +435,7 @@
 	 * mesh), don't add the moulin input a second time*/
 	if(node->IsClone()) return NULL;
-	bool isefficientlayer;
+	bool isefficientlayer, active_element;
 	IssmDouble moulin_load,dt;
 	IssmDouble epl_active;
-
 	/*Initialize Element matrix*/
 	ElementVector* pe=new ElementVector(&node,1,this->parameters);
@@ -437,17 +444,25 @@
 	parameters->FindParam(&dt,TimesteppingTimeStepEnum);
 	parameters->FindParam(&isefficientlayer,HydrologydcIsefficientlayerEnum);
-	// Test version input in EPL when active
+
+	//Test version input in EPL when active
 	if(isefficientlayer){
-		this->element->GetInputValue(&epl_active,node,HydrologydcMaskEplactiveNodeEnum);
-		if(reCast<bool>(epl_active)){
-			pe->values[0]=moulin_load*0.0;
+		this->element->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
+		if(!active_element){
+			/* this->element->GetInputValue(&epl_active,node,HydrologydcMaskEplactiveNodeEnum); */
+			/* if(reCast<bool>(epl_active))pe->values[0]=0.0; */
+			/* else { */
+			pe->values[0]=moulin_load*dt;
+			/* 	if (moulin_load>0)_printf_("MoulinInput in Sed is "<<pe->values[0]<<"\n"); */
+			/* 	if (moulin_load>0)pe->Echo(); */
+			/* } */
+			//if (node->Sid()==4)_printf_("MoulinInput in Sed is "<<moulin_load*dt<<"\n");
 		}
-		else{
-			pe->values[0]=moulin_load*dt;
-		}
-	}
-	else{
-		pe->values[0]=moulin_load*dt;
-	}
+		else pe->values[0]=0.0;
+	}
+	else pe->values[0]=moulin_load*dt;
+
+	//Test only input in sed
+	/* pe->values[0]=moulin_load*dt; */
+
 	/*Clean up and return*/
 	return pe;
@@ -458,14 +473,39 @@
 	/*If this node is not the master node (belongs to another partition of the
 	 * mesh), don't add the moulin input a second time*/
+
 	if(node->IsClone()) return NULL;
-	if(!this->node->IsActive()) return NULL;
-	IssmDouble moulin_load,dt;
 	ElementVector* pe=new ElementVector(&node,1,this->parameters);
 
-	this->element->GetInputValue(&moulin_load,node,HydrologydcBasalMoulinInputEnum);
-	parameters->FindParam(&dt,TimesteppingTimeStepEnum);
-
-	pe->values[0]=moulin_load*dt;
-	/*Clean up and return*/
+	//Test Input in epl if active
+	/* IssmDouble epl_active; */
+	/* this->element->GetInputValue(&epl_active,node,HydrologydcMaskEplactiveNodeEnum); */
+	/* //if(node->Sid()==4)_printf_("Activity is "<<epl_active<<" \n"); */
+	/* if(reCast<bool>(epl_active)){ */
+	/* 	IssmDouble moulin_load,dt; */
+	/* 	this->element->GetInputValue(&moulin_load,node,HydrologydcBasalMoulinInputEnum); */
+	/* 	parameters->FindParam(&dt,TimesteppingTimeStepEnum); */
+	/* 	pe->values[0]=moulin_load*dt; */
+	/* 	if (moulin_load>0)_printf_("MoulinInput in Epl is "<<pe->values[1]<<"\n"); */
+
+	/* } */
+	/* 	else{ */
+	/* 		pe->values[0]=0.0; */
+	/* } */
+	// Test element only test
+	bool active_element;
+	this->element->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
+	if(active_element){
+		IssmDouble moulin_load,dt;
+		this->element->GetInputValue(&moulin_load,node,HydrologydcBasalMoulinInputEnum);
+		parameters->FindParam(&dt,TimesteppingTimeStepEnum);
+		pe->values[0]=moulin_load*dt;
+	}
+	else pe->values[0]=0.0;
+
+
+	//Test only input is sed
+	/* pe->values[0]=0.0; */
+
+	//Clean up and return
 	return pe;
 }
Index: /issm/trunk/src/c/classes/Loads/Neumannflux.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Neumannflux.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Loads/Neumannflux.cpp	(revision 28013)
@@ -352,5 +352,11 @@
 
 	/*Initialize Load Vector and return if necessary*/
-	Tria*  tria=(Tria*)element;
+	Tria*  tria=NULL;
+	if(element->ObjectEnum()==TriaEnum){
+		tria = (Tria*)this->element;
+	}
+	else if(element->ObjectEnum()==PentaEnum){
+		tria = (Tria*)this->element->SpawnBasalElement();
+	}
 	_assert_(tria->FiniteElement()==P1Enum); 
 	if(!tria->IsIceInElement() || tria->IsAllFloating()) return NULL;
@@ -380,4 +386,5 @@
 	/*Clean up and return*/
 	delete gauss;
+	if(tria->IsSpawnedElement()){tria->DeleteMaterials(); delete tria;};
 	return pe;
 }
Index: /issm/trunk/src/c/classes/Materials/Matlitho.cpp
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matlitho.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Materials/Matlitho.cpp	(revision 28013)
@@ -14,25 +14,22 @@
 /*Matlitho constructors and destructor*/
 Matlitho::Matlitho(){/*{{{*/
-	this->numlayers=0;
-	this->radius=NULL;
-	this->viscosity=NULL;
-	this->lame_lambda=NULL;
-	this->lame_mu=NULL;
-	this->burgers_viscosity=NULL;
-	this->burgers_mu=NULL;
-	this->ebm_alpha=NULL;
-	this->ebm_delta=NULL;	
-	this->ebm_taul=NULL;
-	this->ebm_tauh=NULL;
-	this->density=NULL;
-	this->rheologymodel=NULL;
-	this->issolid=NULL;
-	return;
-}
-/*}}}*/
-Matlitho::Matlitho(int matlitho_mid, IoModel* iomodel){/*{{{*/
-
-	IssmDouble* rheologymodeld=NULL;
-	IssmDouble* issolidd=NULL;
+	this->numlayers         = 0;
+	this->radius            = NULL;
+	this->viscosity         = NULL;
+	this->lame_lambda       = NULL;
+	this->lame_mu           = NULL;
+	this->burgers_viscosity = NULL;
+	this->burgers_mu        = NULL;
+	this->ebm_alpha         = NULL;
+	this->ebm_delta         = NULL;
+	this->ebm_taul          = NULL;
+	this->ebm_tauh          = NULL;
+	this->density           = NULL;
+	this->rheologymodel     = NULL;
+	this->issolid           = NULL;
+	return;
+}
+/*}}}*/
+Matlitho::Matlitho(int matlitho_mid, IoModel* iomodel, bool* issolid_in, int* rheo_in){/*{{{*/
 
 	this->mid=matlitho_mid;
@@ -72,23 +69,9 @@
 	xMemCpy<IssmDouble>(this->density, iomodel->Data("md.materials.density"),this->numlayers);
 
-	this->rheologymodel=xNew<IssmDouble>(this->numlayers);
-	xMemCpy<IssmDouble>(this->rheologymodel, iomodel->Data("md.materials.rheologymodel"),this->numlayers);
-
-	this->issolid=xNew<IssmDouble>(this->numlayers);
-	xMemCpy<IssmDouble>(this->issolid, iomodel->Data("md.materials.issolid"),this->numlayers);
-
-	/*rheologymodeld= xNew<IssmDouble>(this->numlayers);
-	this->rheologymodel=xNew<bool>(this->numlayers);
-	xMemCpy<IssmDouble>(rheologymodeld, iomodel->Data("md.materials.rheologymodel"),this->numlayers);
-	for (int i=0;i<this->numlayers;i++)this->rheologymodel[i]=reCast<bool,IssmDouble>(rheologymodeld[i]);
-
-	issolidd= xNew<IssmDouble>(this->numlayers);
+	this->rheologymodel=xNew<int>(this->numlayers);
+	xMemCpy<int>(this->rheologymodel, rheo_in, this->numlayers);
+
 	this->issolid=xNew<bool>(this->numlayers);
-	xMemCpy<IssmDouble>(issolidd, iomodel->Data("md.materials.issolid"),this->numlayers);
-	for (int i=0;i<this->numlayers;i++)this->issolid[i]=reCast<bool,IssmDouble>(issolidd[i]);*/
-
-	/*Free resources: */
-	xDelete<IssmDouble>(rheologymodeld);
-	xDelete<IssmDouble>(issolidd);
+	xMemCpy<bool>(this->issolid, issolid_in, this->numlayers);
 }
 /*}}}*/
@@ -106,6 +89,6 @@
 	xDelete<IssmDouble>(ebm_tauh);
 	xDelete<IssmDouble>(density);
-	xDelete<IssmDouble>(rheologymodel);
-	xDelete<IssmDouble>(issolid);
+	xDelete<int>(rheologymodel);
+	xDelete<bool>(issolid);
 
 	return;
@@ -141,8 +124,6 @@
 		matlitho->ebm_tauh=xNew<IssmDouble>(this->numlayers); xMemCpy<IssmDouble>(matlitho->ebm_tauh, this->ebm_tauh,this->numlayers);
 		matlitho->density=xNew<IssmDouble>(this->numlayers); xMemCpy<IssmDouble>(matlitho->density, this->density,this->numlayers);
-		matlitho->rheologymodel=xNew<IssmDouble>(this->numlayers); xMemCpy<IssmDouble>(matlitho->rheologymodel, this->rheologymodel,this->numlayers);
-		matlitho->issolid=xNew<IssmDouble>(this->numlayers); xMemCpy<IssmDouble>(matlitho->issolid, this->issolid,this->numlayers);
-		/*matlitho->rheologymodel=xNew<bool>(this->numlayers); for(int i=0;i<this->numlayers;i++)matlitho->rheologymodel[i]=this->rheologymodel[i]; 
-		matlitho->issolid=xNew<bool>(this->numlayers); for(int i=0;i<this->numlayers;i++)matlitho->issolid[i]=this->issolid[i];*/ 
+		matlitho->rheologymodel=xNew<int>(this->numlayers); xMemCpy<int>(matlitho->rheologymodel, this->rheologymodel,this->numlayers);
+		matlitho->issolid=xNew<bool>(this->numlayers); xMemCpy<bool>(matlitho->issolid, this->issolid,this->numlayers);
 	}
 
Index: /issm/trunk/src/c/classes/Materials/Matlitho.h
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matlitho.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Materials/Matlitho.h	(revision 28013)
@@ -28,9 +28,9 @@
 		IssmDouble*  ebm_tauh;
 		IssmDouble*  density;
-		IssmDouble*  rheologymodel;
-		IssmDouble*  issolid;
+		int*         rheologymodel;
+		bool*        issolid;
 
 		Matlitho();
-		Matlitho(int matlitho_id, IoModel* iomodel);
+		Matlitho(int matlitho_id, IoModel* iomodel, bool* issolid_in, int* rheo_in);
 		~Matlitho();
 		void SetMid(int matlitho_mid);
Index: /issm/trunk/src/c/classes/Nodalvalue.cpp
===================================================================
--- /issm/trunk/src/c/classes/Nodalvalue.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Nodalvalue.cpp	(revision 28013)
@@ -64,5 +64,12 @@
 /*}}}*/
 void Nodalvalue::Marshall(MarshallHandle* marshallhandle){/*{{{*/
-	_error_("not implemented yet!"); 
+
+	int object_enum=NodalvalueEnum;
+	marshallhandle->call(object_enum);
+
+	marshallhandle->call(this->definitionenum);
+	marshallhandle->call(this->model_enum);
+	marshallhandle->call(this->name);
+	marshallhandle->call(this->node);
 } 
 /*}}}*/
Index: /issm/trunk/src/c/classes/Nodalvalue.h
===================================================================
--- /issm/trunk/src/c/classes/Nodalvalue.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Nodalvalue.h	(revision 28013)
@@ -30,10 +30,10 @@
 
 		/*Object virtual function resolutoin: */
-		Object* copy();
-		void DeepEcho(void);
-		void Echo(void);
-		int Id(void);
-		void Marshall(MarshallHandle* marshallhandle);
-		int ObjectEnum(void);
+		Object *copy();
+		void    DeepEcho(void);
+		void    Echo(void);
+		int     Id(void);
+		void    Marshall(MarshallHandle  *marshallhandle);
+		int     ObjectEnum(void);
 
 		/*Definition virtual function resolutoin: */
Index: /issm/trunk/src/c/classes/Node.cpp
===================================================================
--- /issm/trunk/src/c/classes/Node.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Node.cpp	(revision 28013)
@@ -147,4 +147,6 @@
 				analysis_enum==HydrologyDCInefficientAnalysisEnum ||
 				analysis_enum==HydrologyDCEfficientAnalysisEnum ||
+				analysis_enum==HydrologyShaktiAnalysisEnum ||
+				analysis_enum==HydrologyGlaDSAnalysisEnum ||
 				analysis_enum==GLheightadvectionAnalysisEnum ||
 				analysis_enum==LevelsetAnalysisEnum
Index: /issm/trunk/src/c/classes/Nodes.cpp
===================================================================
--- /issm/trunk/src/c/classes/Nodes.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Nodes.cpp	(revision 28013)
@@ -181,14 +181,19 @@
 	 * object that is not a clone, tell them to show their dofs, so that later on, they can get picked
 	 * up by their clones: */
-	int  maxdofspernode = this->MaxNumDofs(GsetEnum);
-	int* truedofs       = xNewZeroInit<int>(this->Size()*maxdofspernode); //only one alloc
+	int maxdofspernode  = this->MaxNumDofs(GsetEnum);
+	int **send_truedofs = xNewZeroInit<int*>(num_procs);
+	int  *recv_truedofs = xNewZeroInit<int>(this->Size()*maxdofspernode);
+	ISSM_MPI_Request  *send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for (int rank = 0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
+
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->common_send[rank]){
 			int  numids = this->common_send[rank];
+			send_truedofs[rank] = xNew<int>(numids*maxdofspernode);
 			for(int i=0;i<numids;i++){
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(this->common_send_ids[rank][i]));
-				node->ShowMasterDofs(&truedofs[i*maxdofspernode+0],setenum);
-			}
-			ISSM_MPI_Send(truedofs,numids*maxdofspernode,ISSM_MPI_INT,rank,0,IssmComm::GetComm());
+				node->ShowMasterDofs(&send_truedofs[rank][i*maxdofspernode+0],setenum);
+			}
+			ISSM_MPI_Isend(send_truedofs[rank],numids*maxdofspernode,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -196,12 +201,18 @@
 		if(this->common_recv[rank]){
 			int  numids = this->common_recv[rank];
-			ISSM_MPI_Recv(truedofs,numids*maxdofspernode,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_truedofs,numids*maxdofspernode,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(this->common_recv_ids[rank][i]));
-				node->UpdateCloneDofs(&truedofs[i*maxdofspernode+0],setenum);
-			}
-		}
-	}
-	xDelete<int>(truedofs);
+				node->UpdateCloneDofs(&recv_truedofs[i*maxdofspernode+0],setenum);
+			}
+		}
+	}
+	xDelete<int>(recv_truedofs);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<int>(send_truedofs[rank]);
+	}
+	xDelete<int*>(send_truedofs);
+	xDelete<ISSM_MPI_Request>(send_requests);
 
 	/*Update indexingupdateflag*/
@@ -261,13 +272,17 @@
 
 	/* Share pids of masters and update pids of clones*/
-	int* truepids = xNew<int>(this->Size()); //only one alloc
+	int **send_truepids = xNewZeroInit<int*>(num_procs);
+	int  *recv_truepids = xNewZeroInit<int>(this->Size());
+	ISSM_MPI_Request* send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for(int rank=0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->common_send[rank]){
 			int  numids = this->common_send[rank];
+			send_truepids[rank] = xNew<int>(numids);
 			for(int i=0;i<numids;i++){
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(this->common_send_ids[rank][i]));
-				truepids[i] = node->pid;
-			}
-			ISSM_MPI_Send(truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm());
+				send_truepids[rank][i] = node->pid;
+			}
+			ISSM_MPI_Isend(send_truepids[rank],numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -275,12 +290,18 @@
 		if(this->common_recv[rank]){
 			int  numids = this->common_recv[rank];
-			ISSM_MPI_Recv(truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(this->common_recv_ids[rank][i]));
-				node->pid = truepids[i];
-			}
-		}
-	}
-	xDelete<int>(truepids);
+				node->pid = recv_truepids[i];
+			}
+		}
+	}
+	xDelete<int>(recv_truepids);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<int>(send_truepids[rank]);
+	}
+	xDelete<int*>(send_truepids);
+	xDelete<ISSM_MPI_Request>(send_requests);
 
 	/*4. Distribute G dofs once for all*/
@@ -483,19 +504,20 @@
 
 	/*Now send and receive ug for nodes on partition edge*/
-	#ifdef _HAVE_AD_
-	IssmDouble* buffer = xNew<IssmDouble>(this->Size()*maxdofspernode,"t"); //only one alloc, "t" is required by adolc
-	#else
-	IssmDouble* buffer = xNew<IssmDouble>(this->Size()*maxdofspernode);
-	#endif
+	IssmDouble **send_buffers = xNewZeroInit<IssmDouble*>(num_procs);
+	IssmDouble  *recv_buffer  = xNewZeroInit<IssmDouble>(this->Size()*maxdofspernode,"t");
+	ISSM_MPI_Request  *send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for (int rank = 0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
+
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->common_send[rank]){
 			int  numids = this->common_send[rank];
+			send_buffers[rank] = xNew<IssmDouble>(numids*maxdofspernode,"t"); //"t" is required by adolc
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->common_send_ids[rank][i];
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(master_lid));
 				_assert_(!node->IsClone());
-				for(int j=0;j<node->gsize;j++) buffer[i*maxdofspernode+j]=local_ug[node->gdoflist_local[j]];
-			}
-			ISSM_MPI_Send(buffer,numids*maxdofspernode,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm());
+				for(int j=0;j<node->gsize;j++) send_buffers[rank][i*maxdofspernode+j]=local_ug[node->gdoflist_local[j]];
+			}
+			ISSM_MPI_Isend(send_buffers[rank],numids*maxdofspernode,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -503,13 +525,20 @@
 		if(this->common_recv[rank]){
 			int  numids = this->common_recv[rank];
-			ISSM_MPI_Recv(buffer,numids*maxdofspernode,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_buffer,numids*maxdofspernode,ISSM_MPI_DOUBLE,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				int   master_lid = this->common_recv_ids[rank][i];
 				Node* node=xDynamicCast<Node*>(this->GetObjectByOffset(master_lid));
-				for(int j=0;j<node->gsize;j++) local_ug[node->gdoflist_local[j]] = buffer[i*maxdofspernode+j];
-			}
-		}
-	}
-	xDelete<IssmDouble>(buffer);
+				for(int j=0;j<node->gsize;j++) local_ug[node->gdoflist_local[j]] = recv_buffer[i*maxdofspernode+j];
+			}
+		}
+	}
+
+	xDelete<IssmDouble>(recv_buffer);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<IssmDouble>(send_buffers[rank]);
+	}
+	xDelete<IssmDouble*>(send_buffers);
+	xDelete<ISSM_MPI_Request>(send_requests);
 
 	/*Assign output pointer*/
Index: /issm/trunk/src/c/classes/Params/BoolParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/BoolParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/BoolParam.h	(revision 28013)
@@ -49,4 +49,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -62,4 +63,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -70,4 +72,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/ControlParam.cpp
===================================================================
--- /issm/trunk/src/c/classes/Params/ControlParam.cpp	(revision 28013)
+++ /issm/trunk/src/c/classes/Params/ControlParam.cpp	(revision 28013)
@@ -0,0 +1,252 @@
+/*!\file ControlParam.c
+ * \brief: implementation of the ControlParam object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "shared/shared.h"
+
+/*ControlParam constructors and destructor*/
+ControlParam::ControlParam(){/*{{{*/
+	return;
+}
+/*}}}*/
+ControlParam::ControlParam(IssmDouble* in_value, IssmDouble* in_minvalue, IssmDouble* in_maxvalue, int in_enum_type, int in_M,int in_N){/*{{{*/
+
+	this->enum_type=in_enum_type;
+	this->M=in_M;
+	this->N=in_N;
+
+	/*Sanity check, can't hurt*/
+	if(this->N<1) _error_("Parameter is empty");
+	if(this->M<1) _error_("Parameter is empty");
+	if(this->M>2) _error_("Cannot handle more than 2 rows (as a TransientParam)");
+
+	/*Assign value*/
+	this->value=xNew<IssmDouble>(M*N);
+	xMemCpy<IssmDouble>(value,in_value,M*N);
+
+	/*Assign other fields*/
+	this->minvalue=xNew<IssmDouble>(N);
+	xMemCpy<IssmDouble>(minvalue,in_minvalue,N);
+	this->maxvalue=xNew<IssmDouble>(N);
+	xMemCpy<IssmDouble>(maxvalue,in_maxvalue,N);
+	this->gradient=xNewZeroInit<IssmDouble>(N);
+}
+/*}}}*/
+ControlParam::~ControlParam(){/*{{{*/
+	xDelete<IssmDouble>(value);
+	xDelete<IssmDouble>(minvalue);
+	xDelete<IssmDouble>(maxvalue);
+	xDelete<IssmDouble>(gradient);
+	return;
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Param* ControlParam::copy() {/*{{{*/
+
+	ControlParam* output = new ControlParam();
+	output->enum_type=this->enum_type;
+	output->M=this->M;
+	output->N=this->N;
+	if(value){
+		output->value=xNew<IssmDouble>(this->M*this->N);
+		xMemCpy<IssmDouble>(output->value,this->value,this->M*this->N);
+	}
+	if(minvalue){
+		output->minvalue=xNew<IssmDouble>(this->N);
+		xMemCpy<IssmDouble>(output->minvalue,this->minvalue,this->N);
+	}
+	if(maxvalue){
+		output->maxvalue=xNew<IssmDouble>(this->N);
+		xMemCpy<IssmDouble>(output->maxvalue,this->maxvalue,this->N);
+	}
+	if(gradient){
+		output->gradient=xNew<IssmDouble>(this->N);
+		xMemCpy<IssmDouble>(output->gradient,this->gradient,this->N);
+	}
+	return output;
+
+}
+/*}}}*/
+void ControlParam::DeepEcho(void){/*{{{*/
+
+	_printf_(setw(22)<<"   ControlParam "<<setw(35)<<left<<EnumToStringx(this->enum_type)<<"\n ");
+	if (value) _printf_("---value: ");
+	for(int i=0;i<this->M;i++) _printf_(" "<< this->value[i]);
+	_printf_("]\n");
+	if (minvalue) _printf_("---minvalue: ");
+	for(int i=0;i<this->M;i++) _printf_(" "<< this->minvalue[i]);
+	_printf_("]\n");
+	if (maxvalue) _printf_("---maxvalue: ");
+	for(int i=0;i<this->M;i++) _printf_(" "<< this->maxvalue[i]);
+	_printf_("]\n");
+	if (gradient) _printf_("---gradient: " << this->gradient << "\n");
+}
+/*}}}*/
+void ControlParam::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  ControlParam::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void ControlParam::Marshall(MarshallHandle* marshallhandle){ /*{{{*/
+
+	int object_enum = ControlParamEnum;
+   marshallhandle->call(object_enum);
+	marshallhandle->call(this->enum_type);
+	marshallhandle->call(this->M);
+	marshallhandle->call(this->N);
+	marshallhandle->call(this->value,this->M*this->N);
+	marshallhandle->call(this->minvalue,this->N);
+	marshallhandle->call(this->maxvalue,this->N);
+	marshallhandle->call(this->gradient,this->N);
+
+}
+/*}}}*/
+int  ControlParam::ObjectEnum(void){/*{{{*/
+
+	return ControlParamEnum;
+
+}
+/*}}}*/
+
+void  ControlParam::GetParameterValue(IssmDouble** poutput,int* pN, const char* data){/*{{{*/
+
+	IssmDouble* output=xNew<IssmDouble>(N);
+	
+	if(strcmp(data,"value")==0){
+		xMemCpy<IssmDouble>(output,value,N);
+	}
+	else if (strcmp(data,"lowerbound")==0){
+		xMemCpy<IssmDouble>(output,minvalue,N);
+	}
+	else if (strcmp(data,"upperbound")==0){
+		xMemCpy<IssmDouble>(output,maxvalue,N);
+	}
+	else if (strcmp(data,"gradient")==0){
+		xMemCpy<IssmDouble>(output,gradient,N);
+	}
+	else{
+		_error_("Data " << data << " not supported yet");
+	}
+	
+	/*Assign output pointers:*/
+	if(pN) *pN=N;
+	*poutput=output;
+}
+/*}}}*/
+void  ControlParam::GetParameterValue(IssmDouble* poutput){/*{{{*/
+
+	/*Copy entire vector if M==1, or first row if M==2*/
+	if(M==1){
+		xMemCpy<IssmDouble>(poutput,value,N);
+		return;
+	}
+
+	_error_("STOP");
+
+}
+/*}}}*/
+void  ControlParam::GetParameterValue(IssmDouble* poutput, IssmDouble time){/*{{{*/
+
+	if(M==1){
+		*poutput = value[0];
+		return;
+	}
+
+	IssmDouble *timesteps = &this->value[1*this->N+0];
+	IssmDouble output;
+	bool       found;
+
+	/*Ok, we have the time, go through the timesteps, and figure out which interval we 
+	 *fall within. Then interpolate the values on this interval: */
+	if(time<timesteps[0]){
+		/*get values for the first time: */
+		output=this->value[0];
+		found=true;
+	}
+	else if(time>timesteps[this->N-1]){
+		/*get values for the last time: */
+		output=this->value[this->N-1];
+		found=true;
+	}
+	else{
+		/*Find which interval we fall within: */
+		for(int i=0;i<this->N;i++){
+			if(time==timesteps[i]){
+				/*We are right on one step time: */
+				output=this->value[i];
+				found=true;
+				break; //we are done with the time interpolation.
+			}
+			else{
+				if(timesteps[i]<time && time<timesteps[i+1]){
+					/*ok, we have the interval ]i:i+1[. Interpolate linearly for now: */
+					IssmDouble deltat=timesteps[i+1]-timesteps[i];
+					IssmDouble alpha=(time-timesteps[i])/deltat;
+					output=(1.0-alpha)*this->value[i] + alpha*this->value[i+1];
+					found=true;
+					break;
+				}
+				else continue; //keep looking on the next interval
+			}
+		}
+	}
+	if(!found)_error_("did not find time interval on which to interpolate values");
+	//_printf_("for time = "<<time/31536000.<<" yr, melt = "<<output*31536000.<<" m/yr\n");
+
+	*poutput=output;
+}
+/*}}}*/
+void  ControlParam::GetParameterValue(IssmDouble** poutput, int* pN){/*{{{*/
+
+	/*This method should be specific to VectorParams, only one tow required*/
+	_assert_(N>0);
+	_assert_(M==1);
+	IssmDouble* output=xNew<IssmDouble>(N);
+	xMemCpy<IssmDouble>(output,value,N);
+	
+	/*Assign output pointers:*/
+	if(pN) *pN=N;
+	*poutput=output;
+}
+/*}}}*/
+void  ControlParam::SetValue(IssmDouble* poutput,int in_M, int in_N){/*{{{*/
+
+	_assert_(in_N==this->N);
+	_assert_(in_M==1);
+	xMemCpy<IssmDouble>(this->value,poutput,in_N);
+}
+/*}}}*/
+void  ControlParam::SetGradient(IssmDouble* poutput,int in_M, int in_N){/*{{{*/
+
+	_assert_(in_M==1);
+	xMemCpy<IssmDouble>(this->gradient,poutput,in_N);
+}
+/*}}}*/
+void  ControlParam::GetVectorFromControl(Vector<IssmDouble>* vector,int control_index,int in_N,const char* data,int offset){/*{{{*/
+
+	/*Get list of ids for this element and this control*/
+	_assert_(in_N==this->N);
+	int* idlist = xNew<int>(this->N);
+	for(int i=0;i<this->N;i++) idlist[i] = offset+i;
+
+	/*Get data*/
+	IssmDouble* values = NULL;
+	GetParameterValue(&values, NULL, data);
+
+	/*Enter data in vector*/
+	vector->SetValues(this->N,idlist,values,INS_VAL);
+
+	/*Clean up*/
+	xDelete<int>(idlist);
+	xDelete<IssmDouble>(values);
+
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Params/ControlParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/ControlParam.h	(revision 28013)
+++ /issm/trunk/src/c/classes/Params/ControlParam.h	(revision 28013)
@@ -0,0 +1,82 @@
+/*! \file ControlParam.h 
+ *  \brief: header file for ControlParam object
+ */
+
+#ifndef _CONTROLPARAM_H_
+#define _CONTROLPARAM_H_
+
+/*Headers:*/
+/*{{{*/
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./Param.h"
+#include "../../shared/shared.h"
+/*}}}*/
+
+class ControlParam: public Param{
+
+	private: 
+		IssmDouble* value;   //Can either be a VecParam or a TransientParam
+		IssmDouble* minvalue;
+		IssmDouble* maxvalue;
+		IssmDouble* gradient; 
+		int         enum_type;
+		int         M,N;
+
+	public:
+		/*ControlParam constructors, destructors: {{{*/
+		ControlParam();
+		ControlParam(IssmDouble* in_value, IssmDouble* in_minvalue, IssmDouble* in_maxvalue, int in_enum_type, int in_M, int in_N);
+		~ControlParam();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Param* copy();
+		void  DeepEcho();
+		void  Echo();
+		int   Id(); 
+		void Marshall(MarshallHandle* marshallhandle);
+		int   ObjectEnum();
+		/*}}}*/
+		/*Param virtual functions definitions: {{{*/
+		void  GetParameterValue(bool* pbool){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a bool");}
+		void  GetParameterValue(int* pinteger){_error_("Param "<< EnumToStringx(enum_type) << " cannot return an integer");}
+		void  GetParameterValue(int** pintarray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return an integer");}
+		void  GetParameterValue(int** pintarray,int* pM,int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix");}
+		void  GetParameterValue(IssmDouble* pIssmDouble);
+		void  GetParameterValue(IssmDouble* pdouble,IssmDouble time);
+		void  GetParameterValue(char** pstring){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a string");}
+		void  GetParameterValue(char*** pstringarray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a string array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM);
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data);
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
+		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
+		void  GetParameterValue(Matrix<IssmDouble>** pmat){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Mat");}
+		void  GetParameterValue(FILE** pfid){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a FILE");}
+		void  GetParameterValue(DataSet** pdataset){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a DataSet");}
+		int   InstanceEnum(){return enum_type;}
+
+		void  SetEnum(int enum_in){this->enum_type = enum_in;};
+		void  SetValue(bool boolean){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a boolean");}
+		void  SetValue(int integer){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an integer");}
+		void  SetValue(IssmDouble scalar){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a scalar");}
+		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
+		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a scalar");}
+		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("not implemented");}
+		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N);
+		void  SetValue(int* intarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot return an integer");}
+		void  SetValue(int* pintarray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a int mat array");}
+		void  SetValue(Vector<IssmDouble>* vec){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a Vec");}
+		void  SetValue(Matrix<IssmDouble>* mat){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a Mat");}
+		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
+		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N);
+		void  GetVectorFromControl(Vector<IssmDouble>* vector,int control_index,int N,const char* data,int offset);
+		/*}}}*/
+};
+#endif  /* _DOUBLEPARAM_H */
Index: /issm/trunk/src/c/classes/Params/DataSetParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/DataSetParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/DataSetParam.h	(revision 28013)
@@ -51,4 +51,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("DataSet param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -64,4 +65,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a file pointer");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -72,4 +74,5 @@
 		void  SetValue(DataSet* dataset);
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("DataSet param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/DoubleMatArrayParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/DoubleMatArrayParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/DoubleMatArrayParam.h	(revision 28013)
@@ -52,4 +52,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << "cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << "cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims);
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << "cannot return a Vec");}
@@ -65,4 +66,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << "cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << "cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << "cannot hold a IssmDouble vec array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << "cannot hold a IssmDouble mat array");}
@@ -73,4 +75,5 @@
 		void  SetValue(FILE* fid){_error_("Bool param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array);
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/DoubleMatParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/DoubleMatParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/DoubleMatParam.h	(revision 28013)
@@ -52,4 +52,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM,int* pN);
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble vec array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M,int N);
@@ -72,4 +74,5 @@
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
 		void  SetEnum(int enum_in){this->enum_type = enum_in;};
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 		/*DoubleMatParam specific routines:{{{*/
Index: /issm/trunk/src/c/classes/Params/DoubleParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/DoubleParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/DoubleParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM);
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN);
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/DoubleVecParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/DoubleVecParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/DoubleVecParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM);
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN);
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M);
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble mat array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 		/*DoubleVecParam specific routines:{{{*/
Index: /issm/trunk/src/c/classes/Params/FileParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/FileParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/FileParam.h	(revision 28013)
@@ -49,4 +49,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("File param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -62,4 +63,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -70,4 +72,5 @@
 		void  SetValue(FILE* fid){_error_("File param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("File param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/GenericParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/GenericParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/GenericParam.h	(revision 28013)
@@ -77,4 +77,5 @@
                 void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot return a IssmDouble array");}
                 void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot return a IssmDouble array");}
+					 void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot return a IssmDouble array");}
                 void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot return a matrix array");}
                 void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot return a Vec");}
@@ -91,4 +92,5 @@
                 void  SetValue(char* string){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a string");}
                 void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a string array");}
+					 void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a IssmDouble array");}
                 void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a IssmDouble array");}
                 void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a IssmDouble array");}
@@ -97,4 +99,5 @@
                 void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold a FILE");}
                 void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold an array of matrices");}
+					 void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(myEnumVal) << " cannot hold an IssmDouble");};
 
                 /*}}}*/
Index: /issm/trunk/src/c/classes/Params/IntMatParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/IntMatParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/IntMatParam.h	(revision 28013)
@@ -51,4 +51,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM,int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");};
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -64,4 +65,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble vec array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble vec array");};
@@ -72,4 +74,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/IntParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/IntParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/IntParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -65,4 +66,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/IntVecParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/IntVecParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/IntVecParam.h	(revision 28013)
@@ -51,4 +51,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array (maybe in serial?)");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -64,4 +65,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble mat array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble mat array");}
@@ -72,4 +74,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/MatrixParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/MatrixParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/MatrixParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/Param.h
===================================================================
--- /issm/trunk/src/c/classes/Params/Param.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/Param.h	(revision 28013)
@@ -39,4 +39,5 @@
 		virtual void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM)=0;
 		virtual void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM,int* pN)=0;
+		virtual void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data)=0;
 		virtual void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims)=0;
 		virtual void  GetParameterValue(Vector<IssmDouble>** pvec)=0;
@@ -56,4 +57,5 @@
 		virtual void  SetValue(DataSet* dataset){_error_("not implemented yet");};
 		virtual void  SetValue(IssmDouble* IssmDoublearray,int M)=0;
+		virtual void  SetValue(IssmDouble* IssmDoublearray)=0;
 		virtual void  SetValue(IssmDouble* pIssmDoublearray,int M,int N)=0;
 		virtual void  SetValue(int* intarray,int M)=0;
@@ -63,4 +65,6 @@
 		virtual void  SetValue(FILE* fid)=0;
 		virtual void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array)=0;
+		virtual void  SetGradient(IssmDouble* poutput, int M, int N)=0;
+		virtual void  GetVectorFromControl(Vector<IssmDouble>* vector,int control_index,int N,const char* data,int offset){_error_("not implemented yet");};
 };
 #endif
Index: /issm/trunk/src/c/classes/Params/Parameters.cpp
===================================================================
--- /issm/trunk/src/c/classes/Params/Parameters.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/Parameters.cpp	(revision 28013)
@@ -20,4 +20,5 @@
 
 #include "./BoolParam.h"
+#include "./ControlParam.h"
 #include "./DoubleMatParam.h"
 #include "./DataSetParam.h"
@@ -232,4 +233,10 @@
 				this->AddObject(transarrayparam);
 			}
+			else if(obj_enum==ControlParamEnum){
+				ControlParam* controlparam=NULL;
+				controlparam=new ControlParam();
+				controlparam->Marshall(marshallhandle);
+				this->AddObject(controlparam);
+			}
 			else if(obj_enum==GenericParamEnum){
 				/*Skip for now (we don't want to Marhsall Comms)*/
@@ -417,4 +424,40 @@
 	if(pM)   *pM   = n;
 }/*}}}*/
+void Parameters::FindControlParam(IssmDouble** pvec,int* pM, int param_enum, const char* data){ _assert_(this);/*{{{*/
+
+	int index = EnumToIndex(param_enum);
+
+	/*Output*/
+	int         n;
+	IssmDouble* vector = NULL;
+
+	if(!this->params[index]) _error_("Parameter " << EnumToStringx(param_enum) <<" not set");
+	this->params[index]->GetParameterValue(pvec,pM,data);
+
+}/*}}}*/
+void Parameters::FindControlParamAndMakePassive(IssmPDouble** pvec,int* pM, int param_enum, const char* data){ _assert_(this);/*{{{*/
+
+	int index = EnumToIndex(param_enum);
+
+	/*Output*/
+	int         n;
+	IssmDouble* vector = NULL;
+
+	if(!this->params[index]) _error_("Parameter " << EnumToStringx(param_enum) <<" not set");
+	this->params[index]->GetParameterValue(&vector,&n,data);
+
+	/*Make output passive*/
+	#ifdef _HAVE_AD_
+	IssmPDouble* output = xNew<IssmPDouble>(n);
+	for(int i=0;i<n;i++) output[i] = reCast<IssmPDouble>(vector[i]);
+	xDelete<IssmDouble>(vector);
+	if(pvec) *pvec = output;
+	#else
+	if(pvec) *pvec = vector;
+	#endif
+
+	/*assign output pointers*/
+	if(pM)   *pM   = n;
+}/*}}}*/
 void Parameters::FindParamInDataset(IssmDouble** pIssmDoublearray,int* pM,int* pN,int dataset_type,int enum_type){/*{{{*/
 	_assert_(this);
@@ -524,4 +567,16 @@
 }
 /*}}}*/
+void   Parameters::SetParam(IssmDouble* IssmDoublearray, int enum_type){/*{{{*/
+
+	Param* param=NULL;
+
+	/*first, figure out if the param has already been created: */
+	param=xDynamicCast<Param*>(this->FindParamObject(enum_type));
+	if(param) param->SetValue(IssmDoublearray); //already exists, just set it.
+	else _error_("Param "<< EnumToStringx(enum_type) << " cannot setValue");
+
+	 //this->AddObject(new ControlParam(enum_type,IssmDoublearray,M,N)); //just add the new parameter.
+}
+/*}}}*/
 void   Parameters::SetParam(int* intarray,int M, int enum_type){/*{{{*/
 
@@ -594,4 +649,33 @@
 }
 /*}}}*/
+void   Parameters::SetControlFromVector(IssmDouble* vector, int enum_type, int M, int N, int offset){/*{{{*/
+
+	/*first, figure out if the param has already been created: */
+	Param* param=NULL;
+	param=xDynamicCast<Param*>(this->FindParamObject(enum_type));
+
+	if(param) param->SetValue(&vector[offset], M, N);
+	else _error_("Param "<< EnumToStringx(enum_type) << " cannot setValue");
+}
+/*}}}*/
+void   Parameters::SetGradientFromVector(IssmDouble* vector, int enum_type, int M, int N, int offset){/*{{{*/
+
+	/*first, figure out if the param has already been created: */
+	Param* param=NULL;
+	param=xDynamicCast<Param*>(this->FindParamObject(enum_type));
+
+	if(param) param->SetGradient(&vector[offset], M, N);
+	else _error_("Param "<< EnumToStringx(enum_type) << " cannot setValue");
+}
+/*}}}*/
+
+void  Parameters::GetVectorFromControl(Vector<IssmDouble>* vector,int control_enum,int control_index,int N,const char* data,int offset){/*{{{*/
+
+	/*first, figure out if the param has already been created: */
+	Param* param=xDynamicCast<Param*>(this->FindParamObject(control_enum));
+	if(!param) _error_("Parameter not found");
+
+	param->GetVectorFromControl(vector, control_index, N, data, offset);
+}/*}}}*/
 
 Param* Parameters::FindParamObject(int param_enum){/*{{{*/
Index: /issm/trunk/src/c/classes/Params/Parameters.h
===================================================================
--- /issm/trunk/src/c/classes/Params/Parameters.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/Parameters.h	(revision 28013)
@@ -55,4 +55,6 @@
 		void  FindParamAndMakePassive(IssmPDouble* pscalar, int enum_type);
 		void  FindParamAndMakePassive(IssmPDouble** pvec,int* pM,int enum_type);
+		void  FindControlParam(IssmDouble** pvec,int* pM, int param_enum, const char* data);
+		void  FindControlParamAndMakePassive(IssmPDouble** pvec,int* pM, int param_enum, const char* data);
 		void  FindParamInDataset(IssmDouble** pIssmDoublearray,int* pM,int* pN,int dataset_type,int enum_type);
 		IssmDouble FindParam(int enum_type);
@@ -65,4 +67,5 @@
 		void  SetParam(IssmDouble* IssmDoublearray,int M,int enum_type);
 		void  SetParam(IssmDouble* IssmDoublearray,int M,int N,int enum_type);
+		void  SetParam(IssmDouble* IssmDoublearray, int enum_type);
 		void  SetParam(int* intarray,int M,int enum_type);
 		void  SetParam(int* intarray,int M,int N,int enum_type);
@@ -71,5 +74,7 @@
 		void  SetParam(FILE* fid,int enum_type);
 		void  SetParam(DataSet* dataset,int enum_type);
-
+		void  SetControlFromVector(IssmDouble* array, int enum_type, int M, int N, int offset);
+		void  SetGradientFromVector(IssmDouble* array, int enum_type, int M, int N, int offset);
+		void  GetVectorFromControl(Vector<IssmDouble>* vector,int control_enum,int control_index,int N,const char* data,int offset);
 		Param* FindParamObject(int enum_type);
 
Index: /issm/trunk/src/c/classes/Params/StringArrayParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/StringArrayParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/StringArrayParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Vec param of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M);
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/StringParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/StringParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/StringParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string);
 		void  SetValue(char** stringarray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/TransientArrayParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/TransientArrayParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/TransientArrayParam.h	(revision 28013)
@@ -55,4 +55,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM,int* pN){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -68,4 +69,5 @@
 		void  SetValue(char* string){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a IssmDouble vec array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M,int N){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -76,4 +78,5 @@
 		void  SetValue(FILE* fid){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/TransientParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/TransientParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/TransientParam.h	(revision 28013)
@@ -53,4 +53,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM,int* pN){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Vector<IssmDouble>** pvec){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot return a Vec");}
@@ -66,4 +67,5 @@
 		void  SetValue(char* string){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a IssmDouble vec array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M,int N){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
@@ -74,4 +76,5 @@
 		void  SetValue(FILE* fid){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Parameter " <<EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Params/VectorParam.h
===================================================================
--- /issm/trunk/src/c/classes/Params/VectorParam.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Params/VectorParam.h	(revision 28013)
@@ -50,4 +50,5 @@
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, int* pN){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
+		void  GetParameterValue(IssmDouble** pIssmDoublearray,int* pM, const char* data){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a IssmDouble array");}
 		void  GetParameterValue(IssmDouble*** parray, int* pM,int** pmdims, int** pndims){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a matrix array");}
 		void  GetParameterValue(Matrix<IssmDouble>** pmat){_error_("Param "<< EnumToStringx(enum_type) << " cannot return a Mat");}
@@ -63,4 +64,5 @@
 		void  SetValue(char* string){_error_("Vector of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a string");}
 		void  SetValue(char** stringarray,int M){_error_("Vector of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a string array");}
+		void  SetValue(IssmDouble* IssmDoublearray){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* IssmDoublearray,int M){_error_("Vector of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a IssmDouble array");}
 		void  SetValue(IssmDouble* pIssmDoublearray,int M,int N){_error_("Vector of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a IssmDouble array");}
@@ -71,4 +73,5 @@
 		void  SetValue(FILE* fid){_error_("Vector of enum " << enum_type << " (" << EnumToStringx(enum_type) << ") cannot hold a FILE");}
 		void  SetValue(IssmDouble** array, int M, int* mdim_array, int* ndim_array){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an array of matrices");}
+		void  SetGradient(IssmDouble* poutput, int M, int N){_error_("Param "<< EnumToStringx(enum_type) << " cannot hold an IssmDouble");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Regionaloutput.cpp
===================================================================
--- /issm/trunk/src/c/classes/Regionaloutput.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Regionaloutput.cpp	(revision 28013)
@@ -143,4 +143,10 @@
 				val_t+=element->TotalSmb(this->mask,true);
 				break;
+			case TotalSmbMeltEnum:
+				val_t+=element->TotalSmbMelt(this->mask,true);
+				break;
+			case TotalSmbRefreezeEnum:
+				val_t+=element->TotalSmbRefreeze(this->mask,true);
+				break;
 			default:
 				_error_("Regional output type " << this->outputname << " not supported yet!");
Index: /issm/trunk/src/c/classes/Vertices.cpp
===================================================================
--- /issm/trunk/src/c/classes/Vertices.cpp	(revision 28012)
+++ /issm/trunk/src/c/classes/Vertices.cpp	(revision 28013)
@@ -175,4 +175,41 @@
 }
 /*}}}*/
+void Vertices::XYList(IssmDouble** pxcoords,IssmDouble** pycoords){/*{{{*/
+
+	/*output: */
+	IssmDouble* xyz_serial=NULL;
+
+	/*recover my_rank:*/
+	int my_rank=IssmComm::GetRank();
+
+	/*First, figure out number of vertices: */
+	int num_vertices=this->NumberOfVertices();
+
+	/*Now, allocate vectors*/
+	Vector<IssmDouble>* xlist = new Vector<IssmDouble>(num_vertices);
+	Vector<IssmDouble>* ylist = new Vector<IssmDouble>(num_vertices);
+
+	/*Go through vertices, and for each vertex, object, report it cpu: */
+	for(Object* & object : this->objects){
+      Vertex* vertex = xDynamicCast<Vertex*>(object);
+		xlist->SetValue(vertex->sid,vertex->GetX() ,INS_VAL);
+		ylist->SetValue(vertex->sid,vertex->GetY(),INS_VAL);
+	}
+
+	/*Assemble:*/
+	xlist->Assemble();
+	ylist->Assemble();
+
+	/*gather on cpu 0: */
+	IssmDouble* x_serial=xlist->ToMPISerial();
+	IssmDouble* y_serial=ylist->ToMPISerial();
+
+	/*Free resources: */
+	*pxcoords = x_serial;
+	*pycoords = y_serial;
+	delete xlist;
+	delete ylist;
+}
+/*}}}*/
 
 void Vertices::Finalize(IoModel* iomodel){/*{{{*/
@@ -235,13 +272,17 @@
 
 	/* Share pids of masters and update pids of clones*/
-	int* truepids = xNew<int>(this->Size()); //only one alloc
+	int **send_truepids = xNewZeroInit<int*>(num_procs);
+	int  *recv_truepids = xNewZeroInit<int>(this->Size());
+	ISSM_MPI_Request* send_requests = xNew<ISSM_MPI_Request>(num_procs);
+	for(int rank=0;rank<num_procs;rank++) send_requests[rank] = ISSM_MPI_REQUEST_NULL;
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->common_send[rank]){
 			int  numids = this->common_send[rank];
+			send_truepids[rank] = xNew<int>(numids);
 			for(int i=0;i<numids;i++){
 				Vertex* vertex=xDynamicCast<Vertex*>(this->GetObjectByOffset(this->common_send_ids[rank][i]));
-				truepids[i] = vertex->pid;
+				send_truepids[rank][i] = vertex->pid;
 			}
-			ISSM_MPI_Send(truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm());
+			ISSM_MPI_Isend(send_truepids[rank],numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&send_requests[rank]);
 		}
 	}
@@ -249,12 +290,18 @@
 		if(this->common_recv[rank]){
 			int  numids = this->common_recv[rank];
-			ISSM_MPI_Recv(truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
+			ISSM_MPI_Recv(recv_truepids,numids,ISSM_MPI_INT,rank,0,IssmComm::GetComm(),&status);
 			for(int i=0;i<numids;i++){
 				Vertex* vertex=xDynamicCast<Vertex*>(this->GetObjectByOffset(this->common_recv_ids[rank][i]));
-				vertex->pid = truepids[i];
+				vertex->pid = recv_truepids[i];
 			}
 		}
 	}
-	xDelete<int>(truepids);
+	xDelete<int>(recv_truepids);
+	for(int rank=0;rank<num_procs;rank++){
+		if(this->common_send[rank]) ISSM_MPI_Wait(&send_requests[rank],&status);
+		xDelete<int>(send_truepids[rank]);
+	}
+	xDelete<int*>(send_truepids);
+	xDelete<ISSM_MPI_Request>(send_requests);
 }/*}}}*/
 int Vertices::NumberOfVertices(){/*{{{*/
Index: /issm/trunk/src/c/classes/Vertices.h
===================================================================
--- /issm/trunk/src/c/classes/Vertices.h	(revision 28012)
+++ /issm/trunk/src/c/classes/Vertices.h	(revision 28013)
@@ -39,4 +39,5 @@
 		int   NumberOfVerticesLocalAll(void);
 		void  LatLonList(IssmDouble** lat,IssmDouble** lon);
+		void  XYList(IssmDouble** pxcoords,IssmDouble** pycoords);
 };
 
Index: /issm/trunk/src/c/classes/classes.h
===================================================================
--- /issm/trunk/src/c/classes/classes.h	(revision 28012)
+++ /issm/trunk/src/c/classes/classes.h	(revision 28013)
@@ -24,5 +24,9 @@
 #include "./Numberedcostfunction.h"
 #include "./Cfsurfacesquare.h"
+#include "./Cfsurfacesquaretransient.h"
 #include "./Cfdragcoeffabsgrad.h"
+#include "./Cfdragcoeffabsgradtransient.h"
+#include "./Cfrheologybbarabsgrad.h"
+#include "./Cfrheologybbarabsgradtransient.h"
 #include "./Cfsurfacelogvel.h"
 #include "./Cflevelsetmisfit.h"
@@ -89,4 +93,5 @@
 #include "./Params/GenericParam.h"
 #include "./Params/BoolParam.h"
+#include "./Params/ControlParam.h"
 #include "./Params/DoubleMatParam.h"
 #include "./Params/DoubleTransientMatParam.h"
Index: /issm/trunk/src/c/cores/controladm1qn3_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/controladm1qn3_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/controladm1qn3_core.cpp	(revision 28013)
@@ -208,6 +208,6 @@
 	femmodel->parameters->FindParam(&solution_type,SolutionTypeEnum);
 
-	/*In transient, we need to make sure we do not modify femmodel at each iteration, make a copy*/
-	if(solution_type == TransientSolutionEnum) femmodel = input_struct->femmodel->copy();
+	/*we need to make sure we do not modify femmodel at each iteration, make a copy*/
+	femmodel = input_struct->femmodel->copy();
 
 	IssmPDouble*  Jlist  = input_struct->Jlist;
@@ -313,5 +313,4 @@
 
 		/*Get Dependents*/
-		IssmDouble   output_value;
 		int          num_dependents;
 		IssmPDouble *dependents;
@@ -329,26 +328,26 @@
 			i++;
 			DependentObject* dep=xDynamicCast<DependentObject*>(object);
-			if(solution_type==TransientSolutionEnum) output_value = dep->GetValue();
-			if(solution_type!=TransientSolutionEnum) dep->Responsex(&output_value,femmodel);
-
-			#if defined(_HAVE_CODIPACK_)
-			tape_codi.registerOutput(output_value);
+
+			/*Get cost function for this dependent*/
+			dep->RecordResponsex(femmodel);
+			IssmDouble output_value = dep->GetValue();
 			dependents[i] = output_value.getValue();
-			#if _CODIPACK_MAJOR_==2
-			codi_global.output_indices.push_back(output_value.getIdentifier());
-			#elif _CODIPACK_MAJOR_==1
-			codi_global.output_indices.push_back(output_value.getGradientData());
-			#else
-			#error "_CODIPACK_MAJOR_ not supported"
+			#if defined(_HAVE_ADOLC_)
+			output_value>>=dependents[i];
 			#endif
 
-			#elif defined(_HAVE_ADOLC_)
-			output_value>>=dependents[i];
-
-			#else
-			_error_("not suppoted");
-			#endif
 			J+=output_value;
 		}
+
+		#if defined(_HAVE_CODIPACK_)
+		tape_codi.registerOutput(J);
+		#if _CODIPACK_MAJOR_==2
+		codi_global.output_indices.push_back(J.getIdentifier());
+		#elif _CODIPACK_MAJOR_==1
+		codi_global.output_indices.push_back(J.getGradientData());
+		#else
+		#error "_CODIPACK_MAJOR_ not supported"
+		#endif
+		#endif
 
 		/*Turning off trace tape*/
@@ -420,31 +419,11 @@
 		/*Get gradient for CoDiPack{{{*/
 		if(VerboseAutodiff())_printf0_("   CoDiPack fos_reverse\n");
-
-		/* call the fos_reverse in a loop on the index, from 0 to num_dependents, so
-		 * as to generate num_dependents gradients: */
-		for(int dep_index=0;dep_index<num_dependents_old;dep_index++){
-
-			/*initialize direction index in the weights vector: */
-			if(my_rank==0){
-				if(dep_index<0 || dep_index>=num_dependents || codi_global.output_indices.size() <= dep_index){
-					_error_("index value for dependent index should be in [0,num_dependents-1]");
-				}
-				tape_codi.setGradient(codi_global.output_indices[dep_index],1.0);
-			}
-			//feclearexcept(FE_ALL_EXCEPT);
-			//feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
-			tape_codi.evaluate();
-
-			/*Get gradient for this dependent */
-			weightVectorTimesJac=xNew<IssmPDouble>(num_independents);
-			auto in_size = codi_global.input_indices.size();
-			for(size_t i = 0; i < in_size; ++i){
-				_assert_(i<num_independents);
-				weightVectorTimesJac[i] = tape_codi.getGradient(codi_global.input_indices[i]);
-			}
-			if(my_rank==0) for(int i=0;i<num_independents;i++){
-				totalgradient[i]+=weightVectorTimesJac[i];
-			}
-			xDelete(weightVectorTimesJac);
+		if(my_rank==0) tape_codi.setGradient(codi_global.output_indices[0],1.0);
+		tape_codi.evaluate();
+
+		auto in_size = codi_global.input_indices.size();
+		for(size_t i = 0; i < in_size; ++i){
+			_assert_(i<num_independents);
+			totalgradient[i] = tape_codi.getGradient(codi_global.input_indices[i]);
 		}
 
@@ -473,10 +452,5 @@
 		if(*indic==0){
 			/*dry run, no gradient required*/
-
-			/*Retrieve objective functions independently*/
-			_printf0_("f(x) = "<<setw(12)<<setprecision(7)<<*pf<<"  |  ");
-			_printf0_("            N/A |\n");
-			for(int i=0;i<num_responses;i++) _printf0_(" "<<setw(12)<<setprecision(7)<<Jlist[(*Jlisti)*JlistN+i]);
-			_printf0_("\n");
+			InversionStatsIter( (*Jlisti)+1, *pf, NAN, &Jlist[(*Jlisti)*JlistN], num_responses);
 
 			*Jlisti = (*Jlisti) +1;
@@ -509,13 +483,11 @@
 	Gnorm = sqrt(Gnorm);
 	_assert_(!xIsNan(Gnorm));
+	_assert_(!xIsInf(Gnorm));
 
 	/*Print info*/
-	_printf0_("f(x) = "<<setw(12)<<setprecision(7)<<*pf<<"  |  ");
-	_printf0_("       "<<setw(12)<<setprecision(7)<<Gnorm<<" |");
-	for(int i=0;i<num_responses;i++) _printf0_(" "<<setw(12)<<setprecision(7)<<Jlist[(*Jlisti)*JlistN+i]);
-	_printf0_("\n");
+	InversionStatsIter( (*Jlisti)+1, *pf, reCast<double>(Gnorm), &Jlist[(*Jlisti)*JlistN], num_responses);
 
 	/*Clean-up and return*/
-	if(solution_type == TransientSolutionEnum) delete femmodel;
+	delete femmodel;
 	*Jlisti = (*Jlisti) +1;
 	xDelete<double>(XU);
@@ -531,5 +503,5 @@
 	/*Intermediaries*/
 	long    omode;
-	double  f,dxmin,gttol;
+	double  f,dxmin,dfmin_frac,gttol;
 	int     maxsteps,maxiter;
 	int     intn ,num_controls,num_cost_functions,solution_type;
@@ -548,4 +520,5 @@
 	femmodel->parameters->FindParam(&maxiter,InversionMaxiterEnum);
 	femmodel->parameters->FindParamAndMakePassive(&dxmin,InversionDxminEnum);
+	femmodel->parameters->FindParamAndMakePassive(&dfmin_frac,InversionDfminFracEnum);
 	femmodel->parameters->FindParamAndMakePassive(&gttol,InversionGttolEnum);
 	femmodel->parameters->FindParamAndMakePassive(&scaling_factors,NULL,InversionControlScalingFactorsEnum);
@@ -595,7 +568,5 @@
 	double*   dz  = xNew<double>(ndz);
 	if(VerboseControl())_printf0_("   Computing initial solution\n");
-	_printf0_("\n");
-	_printf0_("Cost function f(x)   | Gradient norm |g(x)| |  List of contributions\n");
-	_printf0_("____________________________________________________________________\n");
+	InversionStatsHeader(num_cost_functions);
 
 	/*Prepare structure for m1qn3*/
@@ -609,6 +580,8 @@
 	indic = 4; /*gradient required*/
 	simul_ad(&indic,&n,X,&f,G,izs,rzs,(void*)&mystruct);
+
 	/*Estimation of the expected decrease in f during the first iteration*/
-	double df1=f;
+	if(dfmin_frac==0.) dfmin_frac=1.;
+	double df1=dfmin_frac*f;
 
 	/*Call M1QN3 solver*/
@@ -617,4 +590,7 @@
 				&gttol,normtype,&impres,&io,imode,&omode,&niter,&nsim,iz,dz,&ndz,
 				&reverse,&indic,izs,rzs,(void*)&mystruct);
+
+	/*Print exit flag*/
+	InversionStatsFooter(num_cost_functions);
 	switch(int(omode)){
 		case 0:  _printf0_("   Stop requested (indic = 0)\n"); break;
@@ -654,8 +630,7 @@
 		aG[i] = reCast<IssmDouble>(G[i]);
 	}
-
+	
 	ControlInputSetGradientx(femmodel->elements,femmodel->nodes,femmodel->vertices,femmodel->loads,femmodel->materials,femmodel->parameters,aG);
 	SetControlInputsFromVectorx(femmodel,aX);
-
 	xDelete(aX);
 
@@ -684,8 +659,9 @@
 	}
 	else{
+		//FIXME: merge with code above?
 		femmodel->results->AddObject(new GenericExternalResult<IssmPDouble*>(femmodel->results->Size()+1,JEnum,mystruct.Jlist,(*mystruct.i),mystruct.N,0,0));
-
 		femmodel->OutputControlsx(&femmodel->results);
 	}
+	femmodel->results->AddObject(new GenericExternalResult<int>(femmodel->results->Size()+1,InversionStopFlagEnum,int(omode)));
 
 	xDelete(aG);
Index: /issm/trunk/src/c/cores/controlm1qn3_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/controlm1qn3_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/controlm1qn3_core.cpp	(revision 28013)
@@ -40,5 +40,5 @@
 	/*Intermediaries*/
 	long    omode;
-	double  f,dxmin,gttol; 
+	double  f,dxmin,dfmin_frac,gttol; 
 	int     maxsteps,maxiter;
 	int     intn,num_controls,num_cost_functions,solution_type;
@@ -60,4 +60,5 @@
 	femmodel->parameters->FindParam(&maxiter,InversionMaxiterEnum);
 	femmodel->parameters->FindParamAndMakePassive(&dxmin,InversionDxminEnum);
+	femmodel->parameters->FindParamAndMakePassive(&dfmin_frac,InversionDfminFracEnum);
 	femmodel->parameters->FindParamAndMakePassive(&gttol,InversionGttolEnum);
 	femmodel->parameters->FindParamAndMakePassive(&scaling_factors,NULL,InversionControlScalingFactorsEnum);
@@ -105,7 +106,5 @@
 
 	if(VerboseControl())_printf0_("   Computing initial solution\n");
-	_printf0_("\n");
-	_printf0_("Cost function f(x)   | Gradient norm |g(x)| |  List of contributions\n");
-	_printf0_("____________________________________________________________________\n");
+	InversionStatsHeader(num_cost_functions);
 
 	/*Prepare structure for m1qn3*/
@@ -122,5 +121,6 @@
 
 	/*Estimation of the expected decrease in f during the first iteration*/
-	double df1=f;
+	if(dfmin_frac==0.) dfmin_frac=1.;
+	double df1=dfmin_frac*f;
 
 	/*Call M1QN3 solver*/
@@ -131,4 +131,5 @@
 
 	/*Print exit flag*/
+	InversionStatsFooter(num_cost_functions);
 	switch(int(omode)){
 		case 0:  _printf0_("   Stop requested (indic = 0)\n"); break;
@@ -179,4 +180,5 @@
 	femmodel->OutputControlsx(&femmodel->results);
 	femmodel->results->AddObject(new GenericExternalResult<double*>(femmodel->results->Size()+1,JEnum,mystruct.Jlist,(*mystruct.i),mystruct.N,0,0));
+	femmodel->results->AddObject(new GenericExternalResult<int>(femmodel->results->Size()+1,InversionStopFlagEnum,int(omode)));
 
 	/*Finalize*/
@@ -262,5 +264,4 @@
 	femmodel->CostFunctionx(&J,&Jtemp,NULL);
 	*pf = reCast<double>(J);
-	_printf0_("f(x) = "<<setw(12)<<setprecision(7)<<*pf<<"  |  ");
 
 	/*Record cost function values and delete Jtemp*/
@@ -271,9 +272,5 @@
 	if(*indic==0){
 		/*dry run, no gradient required*/
-
-		/*Retrieve objective functions independently*/
-		_printf0_("            N/A |\n");
-		for(int i=0;i<num_responses;i++) _printf0_(" "<<setw(12)<<setprecision(7)<<Jlist[(*Jlisti)*JlistN+i]);
-		_printf0_("\n");
+		InversionStatsIter( (*Jlisti)+1, *pf, NAN, &Jlist[(*Jlisti)*JlistN], num_responses);
 
 		*Jlisti = (*Jlisti) +1;
@@ -310,7 +307,5 @@
 
 	/*Print info*/
-	_printf0_("       "<<setw(12)<<setprecision(7)<<Gnorm<<" |");
-	for(int i=0;i<num_responses;i++) _printf0_(" "<<setw(12)<<setprecision(7)<<Jlist[(*Jlisti)*JlistN+i]);
-	_printf0_("\n");
+	InversionStatsIter( (*Jlisti)+1, *pf, reCast<double>(Gnorm), &Jlist[(*Jlisti)*JlistN], num_responses);
 
 	/*Clean-up and return*/
Index: /issm/trunk/src/c/cores/controltao_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/controltao_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/controltao_core.cpp	(revision 28013)
@@ -98,6 +98,5 @@
 	TaoSetInitialVector(tao,X->pvector->vector);
 	#else
-	//TaoSetSolution(tao,X->pvector->vector);
-	_error_("not implemented yet");
+	TaoSetSolution(tao,X->pvector->vector);
 	#endif
 	TaoSetVariableBounds(tao,XL->pvector->vector,XU->pvector->vector);
@@ -107,4 +106,5 @@
 	user.J=xNewZeroInit<double>(maxiter+5);
 	user.femmodel=femmodel;
+	G=new Vector<IssmDouble>(0); VecFree(&G->pvector->vector);
 	#if PETSC_VERSION_LT(3,17,0)
 	TaoSetObjectiveAndGradientRoutine(tao,FormFunctionGradient,(void*)&user); 
@@ -124,10 +124,8 @@
 	TaoGetSolution(tao,&X->pvector->vector);
 	#endif
-	G=new Vector<IssmDouble>(0); VecFree(&G->pvector->vector);
 	#if PETSC_VERSION_LT(3,17,0)
 	TaoGetGradientVector(tao,&G->pvector->vector);
 	#else
-	//TaoGetGradient(tao,&G->pvector->vector);
-	_error_("not implemented yet");
+	TaoGetGradient(tao,&G->pvector->vector, NULL, NULL);
 	#endif
 	SetControlInputsFromVectorx(femmodel,X);
Index: /issm/trunk/src/c/cores/controlvalidation_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/controlvalidation_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/controlvalidation_core.cpp	(revision 28013)
@@ -243,5 +243,4 @@
 
 	/*Get Dependents*/
-	IssmDouble  output_value;
 	int         num_dependents;
 	IssmPDouble *dependents;
@@ -257,10 +256,7 @@
 		DependentObject* dep=xDynamicCast<DependentObject*>(object);
 		i++;
-		if(solution_type==TransientSolutionEnum){
-			output_value = dep->GetValue();
-		}
-		else{
-			dep->Responsex(&output_value,femmodel);
-		}
+		dep->RecordResponsex(femmodel);
+		IssmDouble output_value = dep->GetValue();
+
 		_printf0_("=== output ="<<output_value<<" \n");
 		if(my_rank==0) {
@@ -339,10 +335,6 @@
 		for(Object* & object:dependent_objects->objects){
 			DependentObject* dep=xDynamicCast<DependentObject*>(object);
-			if(solution_type==TransientSolutionEnum){
-				output_value = dep->GetValue();
-			}
-			else{
-				dep->Responsex(&output_value,femmodel);
-			}
+			dep->RecordResponsex(femmodel);
+			IssmDouble output_value = dep->GetValue();
 			j+=output_value;
 		}
Index: /issm/trunk/src/c/cores/debris_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/debris_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/debris_core.cpp	(revision 28013)
@@ -34,31 +34,33 @@
 	if(VerboseSolution()) _printf0_("   computing debris transport\n");
 
-	// We need surface and bed slopes for removal model
+	// We need surface slopes for removal model
 	surfaceslope_core(femmodel);
-	bedslope_core(femmodel);
 
 	/*Transport Debris*/
 	if(VerboseSolution()) _printf0_("   call computational core\n");
-	//InputDuplicatex(femmodel,VxEnum,VxDebrisEnum);
 	femmodel->inputs->DuplicateInput(VxEnum,VxDebrisEnum);
-	//InputDuplicatex(femmodel,VyEnum,VyDebrisEnum);
+	if(domaintype!=Domain2DverticalEnum){
+		femmodel->inputs->DuplicateInput(VyEnum,VyDebrisEnum);	
+	}
+	femmodel->parameters->SetParam(VxEnum,InputToDepthaverageInEnum);
+	femmodel->parameters->SetParam(VxAverageEnum,InputToDepthaverageOutEnum);
+	depthaverage_core(femmodel);
+	if(domaintype!=Domain2DverticalEnum){
+		femmodel->parameters->SetParam(VyEnum,InputToDepthaverageInEnum);
+		femmodel->parameters->SetParam(VyAverageEnum,InputToDepthaverageOutEnum);
+		depthaverage_core(femmodel);
+	}
+
 	debris_analysis = new DebrisAnalysis();
-	//debris_analysis->PreCore(femmodel);
-	//femmodel->parameters->SetParam(VxDebrisEnum,InputToExtrudeEnum);
-	//extrudefromtop_core(femmodel);	
-
 	debris_analysis->Core(femmodel);
-	delete debris_analysis;
+	delete debris_analysis;	
 
 	femmodel->parameters->SetParam(DebrisThicknessEnum,InputToExtrudeEnum);
 	extrudefromtop_core(femmodel);	
-	//femmodel->parameters->SetParam(VxDebrisEnum,InputToExtrudeEnum);
-	//extrudefromtop_core(femmodel);
 
 	if(save_results) femmodel->RequestedOutputsx(&femmodel->results,requested_outputs,numoutputs);
-
 	if(solution_type==DebrisSolutionEnum)femmodel->RequestedDependentsx();
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	if(numoutputs){for(int i=0;i<numoutputs;i++){xDelete<char>(requested_outputs[i]);} xDelete<char*>(requested_outputs);}
 
Index: /issm/trunk/src/c/cores/hydrology_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/hydrology_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/hydrology_core.cpp	(revision 28013)
@@ -65,7 +65,5 @@
 		/*solid earth considerations:*/
 		SolidEarthWaterUpdates(femmodel);
-
 		delete ug;
-
 	}
 
@@ -261,4 +259,13 @@
 		delete analysis;
 	}
+
+	/*Using the armaPw hydrology model*/
+   else if (hydrology_model==HydrologyarmapwEnum){
+      femmodel->SetCurrentConfiguration(HydrologyArmapwAnalysisEnum);
+      if(VerboseSolution()) _printf0_("   updating subglacial water pressure\n");
+      HydrologyArmapwAnalysis* analysis = new HydrologyArmapwAnalysis();
+      analysis->UpdateSubglacialWaterPressure(femmodel);
+      delete analysis;
+   }
 	else{
 		_error_("Hydrology model "<< EnumToStringx(hydrology_model) <<" not supported yet");
Index: /issm/trunk/src/c/cores/love_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/love_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/love_core.cpp	(revision 28013)
@@ -433,5 +433,5 @@
 		m0= 0.5*(z[iz1+1]-z[iz1])*((hc-hb)/(z[iz1+1]-z[iz1]) + (hb-ha)/(z[iz1]-za));
 		//right derivative
-		m1= 0.5*(z[iz1+1]-z[iz1])*((hd-hc)/(zd-z[iz1+1]) + (hc-hb)/(z[iz1+1]-z[iz1]));
+		m1= 0.5*(z[iz1+1]-z[iz1])*((hd-hc)/(zd-z[iz1+1]) + (hc-hb)/reCast<doubletype>(z[iz1+1]-z[iz1]));
 
 		//interpolation abscissa
@@ -589,11 +589,11 @@
 	int rheo=matlitho->rheologymodel[layer_index];
 
-	if (vi!=0 && omega!=0.0){ //take into account viscosity in the rigidity if the material isn't a perfect fluid
+	if(vi!=0 && omega!=0.0){ //take into account viscosity in the rigidity if the material isn't a perfect fluid
 		doubletype ka=la0 + 2.0/3.0*mu0; //Bulk modulus
-		if (rheo==2){//EBM
+		if(rheo==2){//EBM
 			mu=muEBM<doubletype>(layer_index, omega, matlitho, femmodel);
 			la=ka-2.0/3.0*mu;
 		} 
-		else if (rheo==1){//Burgers
+		else if(rheo==1){//Burgers
 			doubletype vi2=matlitho->burgers_viscosity[layer_index];
 			doubletype mu2=matlitho->burgers_mu[layer_index];
@@ -674,5 +674,6 @@
 	doubletype frh,frhg0,fgr0,fgr,fn,rm0,rlm,flm;
 	doubletype xmin,xmax,x,dr;
-	doubletype g,ro, issolid;
+	doubletype g,ro;
+	bool       issolid;
 
 	if (pomega) { //frequency and degree dependent terms /*{{{*/
@@ -690,5 +691,5 @@
 			ro=matlitho->density[layer_index];
 			issolid=matlitho->issolid[layer_index];
-			if(issolid==1){
+			if(issolid){
 				//GetEarthRheology<doubletype>(&la, &mu,layer_index,omega,matlitho);   
 				mu=vars->mu[layer_index*vars->nfreq+vars->ifreq];
@@ -769,5 +770,5 @@
 				g=GetGravity<doubletype>(x*ra,layer_index,femmodel,matlitho,vars);
 
-				if(issolid==1){
+				if(issolid){
 					yi_prefactor[nindex+ 1*6+3]= fn/x;                  // in dy[1*6+3]
 					yi_prefactor[nindex+ 5*6+2]= -(fgr/g0*fn)/x;        // in dy[5*6+2]
@@ -802,5 +803,5 @@
 				g=GetGravity<doubletype>(x*ra,layer_index,femmodel,matlitho,vars);
 				nindex=nsteps*36+n*36;
-				if(issolid==1){
+				if(issolid){
 					yi_prefactor[nindex+ 1*6+5]= -frhg0;       // in dy[1*6+5]
 					yi_prefactor[nindex+ 2*6+0]= -1.0/x;       // in dy[2*6+0]
@@ -825,5 +826,5 @@
 	//computes yi derivatives at r=radius[layer_index]+ n/nstep*(radius[layer_index+1]-radius[layer_index])
 
-	int issolid=matlitho->issolid[layer_index];
+	bool issolid=matlitho->issolid[layer_index];
 	int iy,id,ny, nindex, nstep, nsteps;
 	//femmodel->parameters->FindParam(&nstep,LoveIntStepsPerLayerEnum);
@@ -842,5 +843,5 @@
 			   fn=(deg*(deg+1.0));
 
-			   if(issolid==1){
+			   if(issolid){
 			   ny = 6;
 
@@ -899,5 +900,5 @@
 	nindex=nsteps*36+n*36;
 
-	if(issolid==1){
+	if(issolid){
 		ny = 6;
 	} else {
@@ -1235,7 +1236,7 @@
 
 		// Boundary Condition matrix - solid regions
-		if (matlitho->issolid[i]){
+		if(matlitho->issolid[i]){
 			one = -1.0;
-			if (i>0) if (!matlitho->issolid[i-1]) one = 1.0;
+			if(i>0) if(!matlitho->issolid[i-1]) one = 1.0;
 			for (int j=0;j<6;j++){
 				yi[(j+6*ici)+ nyi*(j+6*ici+3)] = one;
Index: /issm/trunk/src/c/cores/transient_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/transient_core.cpp	(revision 28012)
+++ /issm/trunk/src/c/cores/transient_core.cpp	(revision 28013)
@@ -118,7 +118,5 @@
 			for(Object* & object:dependent_objects->objects){
 				DependentObject* dep=(DependentObject*)object;
-				IssmDouble  output_value;
-				dep->Responsex(&output_value,femmodel);
-				dep->AddValue(output_value);
+				dep->RecordResponsex(femmodel);
 			}
 		}
@@ -131,7 +129,7 @@
 	/*parameters: */
 	bool isstressbalance,ismasstransport,isage,isoceantransport,issmb,isthermal,isgroundingline,isesa,issampling;
-	bool isslc,ismovingfront,isdamageevolution,ishydrology,isoceancoupling,isstochasticforcing,save_results;
+	bool isslc,ismovingfront,isdamageevolution,ishydrology,isstochasticforcing,save_results;
 	bool isdebris;
-	int  step,sb_coupling_frequency;
+	int  step,sb_coupling_frequency,isoceancoupling;
 	int  domaintype,numoutputs;
 
@@ -159,8 +157,4 @@
 	femmodel->parameters->FindParam(&isstochasticforcing,StochasticForcingIsStochasticForcingEnum);
 
-#if defined(_HAVE_OCEAN_)
-	if(isoceancoupling) OceanExchangeDatax(femmodel,false);
-#endif
-
 	if(isstochasticforcing) StochasticForcingx(femmodel);
 
@@ -200,4 +194,18 @@
 	if(isdebris) debris_core(femmodel);
 
+#if defined(_HAVE_OCEAN_)
+	if(isoceancoupling) {
+		/*First calculate thickness change without melt (dynamic thinning) to send to ocean
+		 * then receive ocean melt 
+		 * then go back to the previous geometry to continue the transient with the melt received*/
+		InputUpdateFromConstantx(femmodel,0.,BasalforcingsFloatingiceMeltingRateEnum,P1Enum);
+		masstransport_core(femmodel);
+		OceanExchangeDatax(femmodel,false);
+		InputDuplicatex(femmodel,ThicknessOldEnum,ThicknessEnum);
+		InputDuplicatex(femmodel,BaseOldEnum,BaseEnum);
+		InputDuplicatex(femmodel,SurfaceOldEnum,SurfaceEnum);
+	}
+#endif
+
 	/* from here on, prepare geometry for next time step*/
 
@@ -242,6 +250,6 @@
 void transient_precore(FemModel* femmodel){/*{{{*/
 
-	bool       isoceancoupling,isslc;
-	int        amr_frequency,amr_restart;
+	bool       isslc;
+	int        amr_frequency,amr_restart,isoceancoupling;
 
 	femmodel->parameters->FindParam(&isoceancoupling,TransientIsoceancouplingEnum);
@@ -269,9 +277,10 @@
 
 	/*parameters: */
-	IssmDouble output_value;
 	IssmDouble finaltime,dt,yts,time;
-	bool       isoceancoupling;
-	int        step,timestepping;
-	int        checkpoint_frequency,num_responses;
+	int       isoceancoupling;
+	int       step,timestepping;
+	int       checkpoint_frequency,num_responses;
+	int		 *M = NULL;
+	int		 *control_enum;
 
 	/*Get rank*/
@@ -286,4 +295,6 @@
 	femmodel->parameters->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	femmodel->parameters->FindParam(&checkpoint_frequency,SettingsCheckpointFrequencyEnum); _assert_(checkpoint_frequency>0);
+	femmodel->parameters->FindParam(&control_enum,NULL,InversionControlParametersEnum);
+	femmodel->parameters->FindParam(&M,NULL,ControlInputSizeMEnum);
 
 	std::vector<IssmDouble> time_all;
@@ -338,6 +349,5 @@
 		for(Object* & object:dependent_objects->objects){
 			DependentObject* dep=(DependentObject*)object;
-			dep->Responsex(&output_value,femmodel);
-			dep->AddValue(output_value);
+			dep->RecordResponsex(femmodel);
 		}
 
@@ -384,5 +394,5 @@
 	if(my_rank==0) for(int i=0; i < Xsize; i++) tape_codi.registerInput(X[i]);
 	SetControlInputsFromVectorx(femmodel,X);
-
+	
 	IssmDouble J     = 0.;
 	int        count = 0;
@@ -392,15 +402,5 @@
 		IssmDouble       output_value = dep->GetValue();
 
-
 		J += output_value;
-
-		tape_codi.registerOutput(J);
-#if _CODIPACK_MAJOR_==2
-		codi_global.output_indices.push_back(J.getIdentifier());
-#elif _CODIPACK_MAJOR_==1
-		codi_global.output_indices.push_back(J.getGradientData());
-#else
-#error "_CODIPACK_MAJOR_ not supported"
-#endif
 
 		/*Keep track of output for printing*/
@@ -411,11 +411,20 @@
 	_assert_(count == num_responses);
 
+	#if defined(_HAVE_CODIPACK_)
+	tape_codi.registerOutput(J);
+	#if _CODIPACK_MAJOR_==2
+	codi_global.output_indices.push_back(J.getIdentifier());
+	#elif _CODIPACK_MAJOR_==1
+	codi_global.output_indices.push_back(J.getGradientData());
+	#else
+	#error "_CODIPACK_MAJOR_ not supported"
+	#endif
+	#endif
+
 	tape_codi.setPassive();
 
 	if(VerboseAutodiff())_printf0_("   CoDiPack fos_reverse\n");
-	for(int i=0;i<num_responses;i++){
-		if(my_rank==0) tape_codi.setGradient(codi_global.output_indices[i],1.0);
-		tape_codi.evaluate();
-	}
+	if(my_rank==0) tape_codi.setGradient(codi_global.output_indices[0],1.0);
+	tape_codi.evaluate();
 
 	/*Initialize Xb and Yb*/
@@ -468,6 +477,5 @@
 			for(Object* & object:dependent_objects->objects){
 				DependentObject* dep=(DependentObject*)object;
-				dep->Responsex(&output_value,femmodel);
-				dep->AddValue(output_value);
+				dep->RecordResponsex(femmodel);
 			}
 
@@ -504,4 +512,10 @@
 	/*Broadcast gradient to other ranks (make sure to sum all gradients)*/
 	ISSM_MPI_Allreduce(Xb,G,Xsize,ISSM_MPI_PDOUBLE,ISSM_MPI_SUM,IssmComm::GetComm());
+	#ifdef _ISSM_DEBUG_
+	for(int i=0; i<Xsize; i++){
+		if(xIsNan(Xb[i])) _error_("Found NaN in gradient at position "<<i);
+		if(xIsInf(Xb[i])) _error_("Found Inf in gradient at position "<<i);
+	}
+	#endif
 
 	/*Cleanup and return misfit*/
@@ -510,4 +524,5 @@
 	xDelete<double>(Yb);
 	xDelete<int>(Yin);
+	xDelete<int>(control_enum);
 	return J.getValue();
 }/*}}}*/
Index: /issm/trunk/src/c/datastructures/DataSet.cpp
===================================================================
--- /issm/trunk/src/c/datastructures/DataSet.cpp	(revision 28012)
+++ /issm/trunk/src/c/datastructures/DataSet.cpp	(revision 28013)
@@ -256,4 +256,9 @@
 				this->AddObject(Cflevelset);
 			}
+			else if(obj_enum==CfsurfacesquaretransientEnum){
+				Cfsurfacesquaretransient* cfsurf=new Cfsurfacesquaretransient();
+				cfsurf->Marshall(marshallhandle);
+				this->AddObject(cfsurf);
+			}
 			else if(obj_enum==CfsurfacesquareEnum){
 				Cfsurfacesquare* cfsurf=new Cfsurfacesquare();
@@ -270,4 +275,19 @@
 				cfdragcoeff->Marshall(marshallhandle);
 				this->AddObject(cfdragcoeff);
+			}
+			else if(obj_enum==CfdragcoeffabsgradtransientEnum){
+				Cfdragcoeffabsgradtransient* cfdragcoeff=new Cfdragcoeffabsgradtransient();
+				cfdragcoeff->Marshall(marshallhandle);
+				this->AddObject(cfdragcoeff);
+			}
+			else if(obj_enum==CfrheologybbarabsgradEnum){
+				Cfrheologybbarabsgrad* cfrheologybbarabsgrad=new Cfrheologybbarabsgrad();
+				cfrheologybbarabsgrad->Marshall(marshallhandle);
+				this->AddObject(cfrheologybbarabsgrad);
+			}
+			else if(obj_enum==NodalvalueEnum){
+				Nodalvalue* nodalvalue=new Nodalvalue();
+				nodalvalue->Marshall(marshallhandle);
+				this->AddObject(nodalvalue);
 			}
 			else if(obj_enum==MassfluxatgateEnum){
Index: /issm/trunk/src/c/main/EnvironmentInit.cpp
===================================================================
--- /issm/trunk/src/c/main/EnvironmentInit.cpp	(revision 28012)
+++ /issm/trunk/src/c/main/EnvironmentInit.cpp	(revision 28013)
@@ -28,5 +28,5 @@
 	if(!my_rank) printf("\n");
 	if(!my_rank) printf("%s version  %s\n",PACKAGE_NAME,PACKAGE_VERSION);
-	if(!my_rank) printf("(website: %s contact: %s)\n",PACKAGE_URL,PACKAGE_BUGREPORT);
+	if(!my_rank) printf("(website: %s forum: %s)\n",PACKAGE_URL,PACKAGE_BUGREPORT);
 	if(!my_rank) printf("\n");
 
Index: /issm/trunk/src/c/modules/Bamgx/Bamgx.cpp
===================================================================
--- /issm/trunk/src/c/modules/Bamgx/Bamgx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/Bamgx/Bamgx.cpp	(revision 28013)
@@ -162,5 +162,5 @@
 		Thr=&BTh,Thb=0;
 		Mesh & Th( *(0 ?  new Mesh(*Thr,&Thr->Gh,Thb,maxnbv) :  new Mesh(maxnbv,BTh,bamgopts,bamgopts->KeepVertices)));
-		if (Thr != &BTh) delete Thr;
+		//if (Thr!=&BTh) delete Thr;
 
 		//Split corners if requested
Index: /issm/trunk/src/c/modules/Calvingx/Calvingx.cpp
===================================================================
--- /issm/trunk/src/c/modules/Calvingx/Calvingx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/Calvingx/Calvingx.cpp	(revision 28013)
@@ -39,4 +39,7 @@
 			femmodel->ElementOperationx(&Element::CalvingRateVonmises);
 			break;
+		case CalvingVonmisesADEnum:
+			femmodel->ElementOperationx(&Element::CalvingRateVonmisesAD);
+			break;
 		case CalvingTestEnum:
 			femmodel->ElementOperationx(&Element::CalvingRateTest);
@@ -48,4 +51,7 @@
 			femmodel->ElementOperationx(&Element::CalvingPollard);
 			break;
+		case CalvingCalvingMIPEnum:
+			femmodel->ElementOperationx(&Element::CalvingRateCalvingMIP);
+			break;
 		default:
 			_error_("Caving law "<<EnumToStringx(calvinglaw)<<" not supported yet");
Index: /issm/trunk/src/c/modules/ControlInputSetGradientx/ControlInputSetGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ControlInputSetGradientx/ControlInputSetGradientx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ControlInputSetGradientx/ControlInputSetGradientx.cpp	(revision 28013)
@@ -25,7 +25,16 @@
 	int offset = 0;
 	for(int i=0;i<num_controls;i++){
-		for(Object* & object : elements->objects){
-			Element* element=xDynamicCast<Element*>(object);
-			element->ControlInputSetGradient(gradient,control_type[i],i,offset,M_all[i],N_all[i],interp_all[i]);
+		/*Is the control a Param?*/
+		if(IsParamEnum(control_type[i])){
+			parameters->SetGradientFromVector(gradient, control_type[i], M_all[i], N_all[i], offset);
+		}
+		else if(IsInputEnum(control_type[i])){
+			for(Object* & object : elements->objects){
+				Element* element=xDynamicCast<Element*>(object);
+				element->ControlInputSetGradient(gradient,control_type[i],i,offset,M_all[i],N_all[i],interp_all[i]);
+			}
+		}
+		else{
+			_error_("not supported yet");
 		}
 		offset+=M_all[i]*N_all[i];
Index: /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp	(revision 28013)
@@ -63,10 +63,12 @@
 	DatasetInput* weights_input=basalelement->GetDatasetInput(InversionCostFunctionsCoefficientsEnum);   _assert_(weights_input);
 
-	/* get the friction law: if 11-Schoof, which has a different name of C */
+	/* get the friction law: if 2-Weertman, 11-Schoof or 14-RegularizedCoulomb, which has a different names of C */
 	element->FindParam(&frictionlaw, FrictionLawEnum);
-	Input* drag_input;
+	Input* drag_input = NULL;
 	switch(frictionlaw) {
 		case 2:
 		case 11:
+		case 13:
+		case 14:
 			drag_input = basalelement->GetInput(FrictionCEnum); _assert_(drag_input);
 			break;
Index: /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.cpp	(revision 28013)
@@ -20,4 +20,7 @@
 		case FrontalForcingsRignotarmaEnum:
 			Thermalforcingarmax(femmodel);
+			bool isdischargearma;
+			femmodel->parameters->FindParam(&isdischargearma,FrontalForcingsIsDischargeARMAEnum);
+			if(isdischargearma==true) Subglacialdischargearmax(femmodel);
 			/*Do not break here, call IcefrontAreax(),RignotMeltParameterizationx()*/
 		case FrontalForcingsRignotEnum:
@@ -105,2 +108,70 @@
    xDelete<IssmDouble>(monthtrends);
 }/*}}}*/
+void Subglacialdischargearmax(FemModel* femmodel){/*{{{*/
+
+	/*Get time parameters*/
+   IssmDouble time,dt,starttime,tstep_arma;
+   femmodel->parameters->FindParam(&time,TimeEnum);
+   femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
+   femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
+   femmodel->parameters->FindParam(&tstep_arma,FrontalForcingsSdARMATimestepEnum);
+
+   /*Determine if this is a time step for the ARMA model*/
+   bool isstepforarma = false;
+
+   #ifndef _HAVE_AD_
+   if((fmod(time,tstep_arma)<fmod((time-dt),tstep_arma)) || (time<=starttime+dt) || tstep_arma==dt) isstepforarma = true;
+   #else
+   _error_("not implemented yet");
+   #endif
+
+   /*Load parameters*/
+	bool isstochastic;
+   bool isdischargestochastic = false;
+	int M,N,arorder,maorder,numbasins,numparams,numbreaks,my_rank;
+   femmodel->parameters->FindParam(&numbasins,FrontalForcingsNumberofBasinsEnum);
+   femmodel->parameters->FindParam(&numparams,FrontalForcingsSdNumberofParamsEnum);
+   femmodel->parameters->FindParam(&numbreaks,FrontalForcingsSdNumberofBreaksEnum);
+   femmodel->parameters->FindParam(&arorder,FrontalForcingsSdarOrderEnum);
+   femmodel->parameters->FindParam(&maorder,FrontalForcingsSdmaOrderEnum);
+   IssmDouble* datebreaks        = NULL;
+   IssmDouble* arlagcoefs        = NULL;
+   IssmDouble* malagcoefs        = NULL;
+   IssmDouble* monthlyfrac       = NULL;
+	IssmDouble* polyparams        = NULL;
+
+   femmodel->parameters->FindParam(&datebreaks,&M,&N,FrontalForcingsSddatebreaksEnum);            _assert_(M==numbasins); _assert_(N==max(numbreaks,1));        
+   femmodel->parameters->FindParam(&polyparams,&M,&N,FrontalForcingsSdpolyparamsEnum);            _assert_(M==numbasins); _assert_(N==(numbreaks+1)*numparams);        
+   femmodel->parameters->FindParam(&arlagcoefs,&M,&N,FrontalForcingsSdarlagcoefsEnum);            _assert_(M==numbasins); _assert_(N==arorder);
+   femmodel->parameters->FindParam(&malagcoefs,&M,&N,FrontalForcingsSdmalagcoefsEnum);            _assert_(M==numbasins); _assert_(N==maorder);
+   femmodel->parameters->FindParam(&monthlyfrac,&M,&N,FrontalForcingsSdMonthlyFracEnum);          _assert_(M==numbasins); _assert_(N==12); 
+
+	femmodel->parameters->FindParam(&isstochastic,StochasticForcingIsStochasticForcingEnum);
+	if(isstochastic){
+		int  numstochasticfields;
+		int* stochasticfields;
+		femmodel->parameters->FindParam(&numstochasticfields,StochasticForcingNumFieldsEnum);
+		femmodel->parameters->FindParam(&stochasticfields,&N,StochasticForcingFieldsEnum); _assert_(N==numstochasticfields);
+		for(int i=0;i<numstochasticfields;i++){
+			if(stochasticfields[i]==FrontalForcingsSubglacialDischargearmaEnum) isdischargestochastic = true;
+		}
+		xDelete<int>(stochasticfields);
+	}
+
+   /*Loop over each element to compute Subglacial Discharge at vertices*/
+   for(Object* &object:femmodel->elements->objects){
+      Element* element = xDynamicCast<Element*>(object);
+		/*Compute ARMA*/
+      element->ArmaProcess(isstepforarma,arorder,maorder,numparams,numbreaks,tstep_arma,polyparams,arlagcoefs,malagcoefs,datebreaks,isdischargestochastic,FrontalForcingsSubglacialDischargearmaEnum);
+		/*Scale with monthly fractions*/
+		element->MonthlyFactorBasin(monthlyfrac,FrontalForcingsSubglacialDischargearmaEnum);
+	}
+
+   /*Cleanup*/
+   xDelete<IssmDouble>(arlagcoefs);
+   xDelete<IssmDouble>(malagcoefs);
+   xDelete<IssmDouble>(monthlyfrac);
+   xDelete<IssmDouble>(polyparams);
+   xDelete<IssmDouble>(datebreaks);
+}/*}}}*/
+
Index: /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.h
===================================================================
--- /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.h	(revision 28012)
+++ /issm/trunk/src/c/modules/FrontalForcingsx/FrontalForcingsx.h	(revision 28013)
@@ -7,4 +7,5 @@
 /* local prototypes: */
 void FrontalForcingsx(FemModel* femmodel);
+void Subglacialdischargearmax(FemModel* femmodel);
 void Thermalforcingarmax(FemModel* femmodel);
 
Index: /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp	(revision 28013)
@@ -30,7 +30,17 @@
 	int offset = 0;
 	for(int i=0;i<num_controls;i++){
-		for(Object* & object : elements->objects){
-			Element* element=xDynamicCast<Element*>(object);
-			element->GetVectorFromControlInputs(vector,control_type[i],i,N[i],data,offset);
+
+		/*Is the control a Param?*/
+		if(IsParamEnum(control_type[i])){
+			parameters->GetVectorFromControl(vector,control_type[i],i,N[i],data,offset);
+		}
+		else if(IsInputEnum(control_type[i])){
+			for(Object* & object : elements->objects){
+				Element* element=xDynamicCast<Element*>(object);
+				element->GetVectorFromControlInputs(vector,control_type[i],i,N[i],data,offset);
+			}
+		}
+		else{
+			_error_("not supported yet");
 		}
 		offset += M[i]*N[i];
Index: /issm/trunk/src/c/modules/GiaDeflectionCorex/GiaDeflectionCorex.cpp
===================================================================
--- /issm/trunk/src/c/modules/GiaDeflectionCorex/GiaDeflectionCorex.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/GiaDeflectionCorex/GiaDeflectionCorex.cpp	(revision 28013)
@@ -88,7 +88,7 @@
 	IssmPDouble* blockt_time=xNew<IssmPDouble>(Ntimp);
 	for(int i=0;i<Ntimp;i++){
-		blockt_time[i]=times[i]/1000.0/yts; 
-		if(i==numtimes-1) blockt_time[i]=reCast<IssmPDouble>(times[numtimes-1])/1000.0/yts; // final loading time, same as evaluation time
-		if(i==numtimes)   blockt_time[i]=reCast<IssmPDouble>(times[numtimes-1])/1000.0/yts;   // evaluation time
+		blockt_time[i]=reCast<IssmPDouble>(times[i]/1000.0/yts); 
+		if(i==numtimes-1) blockt_time[i]=reCast<IssmPDouble>(times[numtimes-1]/1000.0/yts); // final loading time, same as evaluation time
+		if(i==numtimes)   blockt_time[i]=reCast<IssmPDouble>(times[numtimes-1]/1000.0/yts);   // evaluation time
 	}
 
Index: /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp
===================================================================
--- /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp	(revision 28013)
@@ -26,4 +26,25 @@
 		element->InputUpdateFromConstant(constant,name);
 	}
+}
+void InputUpdateFromConstantx(FemModel* femmodel,int constant, int name, int type){
+
+	if(type==P0Enum) InputUpdateFromConstantx(femmodel, constant,name);
+	else if(type==P1Enum){
+
+		if(VerboseModule()) _printf0_("   Input updates from constant (P1 version)\n");
+
+		/*Elements and loads drive the update: */
+		if(IsInputEnum(name)){
+			for(Object* & object : femmodel->elements->objects){
+				Element* element = xDynamicCast<Element*>(object);
+				element->InputUpdateFromConstant(constant,name,P1Enum);
+			}
+		}
+		else{
+			_error_("not supported yet");
+		}
+	}
+	else _error_("InputUpdateFromConstantx error message: type not supported yet!");
+
 }
 
Index: /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h
===================================================================
--- /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h	(revision 28012)
+++ /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h	(revision 28013)
@@ -12,4 +12,5 @@
 void InputUpdateFromConstantx(FemModel* femmodel,bool       constant,int name);
 void InputUpdateFromConstantx(FemModel* femmodel,int        constant,int name);
+void InputUpdateFromConstantx(FemModel* femmodel,int        constant,int name, int type);
 void InputUpdateFromConstantx(FemModel* femmodel,IssmDouble constant,int name);
 #ifdef _HAVE_AD_
Index: /issm/trunk/src/c/modules/InterpFromMesh2dx/InterpFromMesh2dxt.cpp
===================================================================
--- /issm/trunk/src/c/modules/InterpFromMesh2dx/InterpFromMesh2dxt.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/InterpFromMesh2dx/InterpFromMesh2dxt.cpp	(revision 28013)
@@ -76,5 +76,5 @@
 
 				/*is the current point in the current element?*/
-				if (area_1>=0 && area_2>=0 && area_3>=0){
+				if (area_1>=-1.e-8 && area_2>=-1.e-8 && area_3>=-1.e-8){
 
 					/*Yes ! compute the value on the point*/
Index: /issm/trunk/src/c/modules/ModelProcessorx/Autodiff/CreateParametersAutodiff.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Autodiff/CreateParametersAutodiff.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Autodiff/CreateParametersAutodiff.cpp	(revision 28013)
@@ -10,4 +10,5 @@
 void CreateParametersAutodiff(Parameters* parameters,IoModel* iomodel){
 
+	#if defined(_HAVE_AD_) 
 	int         i;
 	bool        isautodiff;
@@ -31,73 +32,71 @@
 
 	#ifdef _HAVE_ADOLC_
-		/*initialize a placeholder to store solver pointers: {{{*/
-		GenericParam<Adolc_edf> *theAdolcEDF_p=new GenericParam<Adolc_edf>(AdolcParamEnum);
+	/*initialize a placeholder to store solver pointers*/
+	GenericParam<Adolc_edf> *theAdolcEDF_p=new GenericParam<Adolc_edf>(AdolcParamEnum);
 
-		/*Solver pointers depend on what type of solver we are implementing: */
-		options=OptionsFromAnalysis(&toolkit,parameters,DefaultAnalysisEnum);
-		ToolkitOptions::Init(toolkit,options);
-		xDelete<char>(toolkit);
+	/*Solver pointers depend on what type of solver we are implementing: */
+	options=OptionsFromAnalysis(&toolkit,parameters,DefaultAnalysisEnum);
+	ToolkitOptions::Init(toolkit,options);
+	xDelete<char>(toolkit);
 
-		switch(IssmSolverTypeFromToolkitOptions()){
-			case MumpsEnum:{
-				#ifdef _HAVE_MUMPS_
-				theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p=reg_ext_fct(mumpsSolveEDF);
-				#else
-				_error_("requesting mumps solver without MUMPS being compiled in!");
-				#endif
-				break;
+	switch(IssmSolverTypeFromToolkitOptions()){
+		case MumpsEnum:{
+								#ifdef _HAVE_MUMPS_
+								theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p=reg_ext_fct(mumpsSolveEDF);
+								#else
+								_error_("requesting mumps solver without MUMPS being compiled in!");
+								#endif
+								break;
 							}
-			case GslEnum: {
-				#ifdef _HAVE_GSL_
-				theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p=reg_ext_fct(EDF_for_solverx);
-				#else
-				_error_("requesting GSL solver without GSL being compiled in!");
-				#endif
-			    break;
+		case GslEnum: {
+							  #ifdef _HAVE_GSL_
+							  theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p=reg_ext_fct(EDF_for_solverx);
+							  #else
+							  _error_("requesting GSL solver without GSL being compiled in!");
+							  #endif
+							  break;
 						  }
-			default:
-				_error_("solver type not supported yet!");
-		}
+		default:
+						_error_("solver type not supported yet!");
+	}
 
-		// to save some space:
-		// we know we won't use adolc inside of  the solver:
-		theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->nestedAdolc=false;
-		// the solution vector is just allocated and doesn't have a meaningful prior value
-		theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->dp_y_priorRequired=false;
-		// the solver wrapper makes sure the matrix and the right hand side don't change
-		theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->dp_x_changes=false;
-		parameters->AddObject(theAdolcEDF_p);
+	// to save some space:
+	// we know we won't use adolc inside of  the solver:
+	theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->nestedAdolc=false;
+	// the solution vector is just allocated and doesn't have a meaningful prior value
+	theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->dp_y_priorRequired=false;
+	// the solver wrapper makes sure the matrix and the right hand side don't change
+	theAdolcEDF_p->GetParameterValue().myEDF_for_solverx_p->dp_x_changes=false;
+	parameters->AddObject(theAdolcEDF_p);
 
-		/*Free resources: */
-		xDelete<char>(options);
-		/*}}}*/
-		#elif _HAVE_CODIPACK_
-		//fprintf(stderr, "*** Codipack CreateParametersAutodiff()\n");
-		/*initialize a placeholder to store solver pointers: {{{*/
-		/*Solver pointers depend on what type of solver we are implementing: */
-		options=OptionsFromAnalysis(&toolkit,parameters,DefaultAnalysisEnum);
-		ToolkitOptions::Init(toolkit,options);
-		xDelete<char>(toolkit);
+	/*Free resources: */
+	xDelete<char>(options);
 
-		switch(IssmSolverTypeFromToolkitOptions()){
-			case MumpsEnum:{
-				#ifndef _HAVE_MUMPS_
-				_error_("CoDiPack: requesting mumps solver without MUMPS being compiled in!");
-				#endif
-				break;
-				}
-			case GslEnum: {
-				#ifndef _HAVE_GSL_
-				_error_("CoDiPack: requesting GSL solver without GSL being compiled in!");
-				#endif
-				break;
-				}
-			default:
-							_error_("solver type not supported yet!");
-		}
-		/*Free resources: */
-		xDelete<char>(options);
-		#endif
-		#if defined(_HAVE_AD_) 
+	#elif _HAVE_CODIPACK_
+	/*initialize a placeholder to store solver pointers*/
+	/*Solver pointers depend on what type of solver we are implementing: */
+	options=OptionsFromAnalysis(&toolkit,parameters,DefaultAnalysisEnum);
+	ToolkitOptions::Init(toolkit,options);
+	xDelete<char>(toolkit);
+
+	switch(IssmSolverTypeFromToolkitOptions()){
+		case MumpsEnum:{
+								#ifndef _HAVE_MUMPS_
+								_error_("CoDiPack: requesting mumps solver without MUMPS being compiled in!");
+								#endif
+								break;
+							}
+		case GslEnum: {
+							  #ifndef _HAVE_GSL_
+							  _error_("CoDiPack: requesting GSL solver without GSL being compiled in!");
+							  #endif
+							  break;
+						  }
+		default:
+						_error_("solver type not supported yet!");
+	}
+	/*Free resources: */
+	xDelete<char>(options);
+	#endif
 
 	if(isautodiff){
@@ -118,5 +117,5 @@
 		#endif
 
-		/*retrieve driver: {{{*/
+		/*retrieve driver:*/
 		iomodel->FindConstant(&autodiff_driver,"md.autodiff.driver");
 		parameters->AddObject(iomodel->CopyConstantObject("md.autodiff.driver",AutodiffDriverEnum));
@@ -143,6 +142,7 @@
 		}
 		xDelete<char>(autodiff_driver);
-		/*}}}*/
-		/*Deal with dependents first: {{{*/
+
+		/*Deal with dependents first:*/
+
 		iomodel->FindConstant(&num_dependent_objects,"md.autodiff.num_dependent_objects");
 		dependent_objects=new DataSet();
@@ -151,8 +151,7 @@
 		if(num_dependent_objects){
 			iomodel->FindConstant(&names,&dummy,"md.autodiff.dependent_object_names");
-			iomodel->FetchData(&indices,&dummy,&dummy,"md.autodiff.dependent_object_indices");
 
 			for(i=0;i<num_dependent_objects;i++){
-				DependentObject* dep=new DependentObject(names[i],indices[i]);
+				DependentObject* dep=new DependentObject(names[i]);
 				dependent_objects->AddObject(dep);
 				num_dep++;
@@ -164,12 +163,10 @@
 			}
 			xDelete<char*>(names);
-			xDelete<int>(indices);
 		}
 		parameters->AddObject(new DataSetParam(AutodiffDependentObjectsEnum,dependent_objects));
 		parameters->AddObject(new IntParam(AutodiffNumDependentsEnum,num_dep));
+		delete dependent_objects;
 
-		delete dependent_objects;
-		/*}}}*/
-		/*Deal with independents: {{{*/
+		/*Deal with independents*/
 
 		/*Independents have already been recovered in iomodel->DeclareIndependents. Just do some more processing. 
@@ -184,5 +181,4 @@
 			xDelete<IssmDouble>(xp);
 		}
-		/*}}}*/
 	}
 	#endif
Index: /issm/trunk/src/c/modules/ModelProcessorx/Control/CreateParametersControl.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Control/CreateParametersControl.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Control/CreateParametersControl.cpp	(revision 28013)
@@ -29,9 +29,9 @@
 
 		switch(inversiontype){
-			  {
 			case 0:/*Brent Search*/
 			case 1:/*TAO*/
 			case 2:/*M1QN3*/
 			case 3:/*Validation*/
+				  {
 				/*How many controls and how many responses?*/
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.num_control_parameters",InversionNumControlParametersEnum));
@@ -63,49 +63,42 @@
 
 				break;
-		}
+				  }
 			case 4:/*AD M1QN3*/
-			{
-			/*Intermediaries*/
-			int            num_independent_objects,M;
-			char**         names                   = NULL;
+				  {
+			 /*Intermediaries*/
+			int     num_independent_objects,M;
+			char**  names = NULL;
 
-				/*this is done somewhere else*/
-				parameters->AddObject(iomodel->CopyConstantObject("md.autodiff.num_independent_objects",InversionNumControlParametersEnum));
-			   parameters->AddObject(iomodel->CopyConstantObject("md.autodiff.num_dependent_objects",InversionNumCostFunctionsEnum));
+			/*this is done somewhere else*/
+			parameters->AddObject(iomodel->CopyConstantObject("md.autodiff.num_independent_objects",InversionNumControlParametersEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.autodiff.num_dependent_objects",InversionNumCostFunctionsEnum));
 
-				/*Step1: create controls (independents)*/
-				iomodel->FetchData(&num_independent_objects,"md.autodiff.num_independent_objects");
-				_assert_(num_independent_objects>0);
-				iomodel->FetchData(&names,&M,"md.autodiff.independent_object_names");
-				_assert_(M==num_independent_objects);
-				int* ind_enums=xNew<int>(num_independent_objects);
-				for(int i=0;i<num_independent_objects;i++){
-					ind_enums[i]=StringToEnumx(names[i]);
-					xDelete<char>(names[i]);
-				}
+			/*Step 1: create controls (independents)*/
+			iomodel->FetchData(&num_independent_objects,"md.autodiff.num_independent_objects");  _assert_(num_independent_objects>0);
+			iomodel->FetchMultipleData(&names,&M,"md.autodiff.independent_name");                _assert_(M==num_independent_objects);
+			iomodel->FetchMultipleData(&control_scaling_factors,&M,"md.autodiff.independent_scaling_factor"); _assert_(M==num_independent_objects);
+			int* ind_enums=xNew<int>(num_independent_objects);
+			for(int i=0;i<num_independent_objects;i++){
+				ind_enums[i]=StringToEnumx(names[i]);
+				xDelete<char>(names[i]);
+			}
+			xDelete<char*>(names);
+			parameters->AddObject(new DoubleVecParam(InversionControlScalingFactorsEnum,control_scaling_factors,num_independent_objects));
+			parameters->AddObject(new IntVecParam(InversionControlParametersEnum,ind_enums,num_independent_objects));
+			xDelete<int>(ind_enums);	
 
-				parameters->AddObject(new IntVecParam(InversionControlParametersEnum,ind_enums,num_independent_objects));
-				iomodel->FindConstant(&cm_responses,&num_costfunc,"md.autodiff.dependent_object_names");
-				      _assert_(num_costfunc>0);
-				if(num_costfunc<1) _error_ ("no cost functions found");
-				int* costfunc_enums=xNew<int>(num_costfunc);
-				for(int i=0;i<num_costfunc;i++){
-					costfunc_enums[i]=StringToEnumx(cm_responses[i]);
-					xDelete<char>(cm_responses[i]);
-				}
-				xDelete<char*>(cm_responses);
-				parameters->AddObject(new IntVecParam(InversionCostFunctionsEnum,costfunc_enums,num_costfunc));
+			/*Step 2: create cost functions (dependent)*/
+			iomodel->FindConstant(&cm_responses,&num_costfunc,"md.autodiff.dependent_object_names");
+			if(num_costfunc<1) _error_ ("no cost functions found");
+			int* costfunc_enums=xNew<int>(num_costfunc);
+			for(int i=0;i<num_costfunc;i++){
+				costfunc_enums[i]=StringToEnumx(cm_responses[i]);
+				xDelete<char>(cm_responses[i]);
+			}
+			xDelete<char*>(cm_responses);
+			parameters->AddObject(new IntVecParam(InversionCostFunctionsEnum,costfunc_enums,num_costfunc));
+			xDelete<int>(costfunc_enums);
 
-				iomodel->FetchData(&control_scaling_factors,NULL,NULL,"md.autodiff.independent_scaling_factors");
-				parameters->AddObject(new DoubleVecParam(InversionControlScalingFactorsEnum,control_scaling_factors,num_independent_objects));
-
-				/*cleanup*/
-				for(int i=0;i<num_independent_objects;i++){
-					xDelete<char>(names[i]);
-				}
-				xDelete<char*>(names);
-				xDelete<int>(ind_enums);	
-				xDelete<int>(costfunc_enums);
-				break;
+			break;
 			}
 			default:
@@ -137,4 +130,5 @@
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.incomplete_adjoint",InversionIncompleteAdjointEnum));
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.dxmin",InversionDxminEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.dfmin_frac",InversionDfminFracEnum));
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.gttol",InversionGttolEnum));
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.maxsteps",InversionMaxstepsEnum));
@@ -150,4 +144,5 @@
 			case 4:/*AD M1QN3*/
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.dxmin",InversionDxminEnum));
+				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.dfmin_frac",InversionDfminFracEnum));
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.gttol",InversionGttolEnum));
 				parameters->AddObject(iomodel->CopyConstantObject("md.inversion.maxsteps",InversionMaxstepsEnum));
Index: /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp	(revision 28013)
@@ -85,5 +85,5 @@
 		for(Object* & object : elements->objects){
 			Element* element=xDynamicCast<Element*>(object);
-			element->DatasetInputAdd(InversionCostFunctionsCoefficientsEnum,&weights[i*iomodel->numberofvertices],inputs,iomodel,M,1,1,cost_function,7,cost_function);
+			element->DatasetInputAdd(InversionCostFunctionsCoefficientsEnum,&weights[i*iomodel->numberofvertices],inputs,iomodel,M,1,1,cost_function,cost_function);
 		}
 	}
@@ -117,4 +117,5 @@
 	for(int i=0;i<num_controls;i++){
 		control = control_enums[i];
+		if(!IsInputEnum(control)) _error_("Only inputs can be parameters except if you use AD");
 		scale   = 1.;
 
@@ -211,12 +212,12 @@
 	#if defined(_HAVE_AD_)
 	/*Intermediaries*/
-	int				num_independent_objects,M,N,M_par,N_par;
-	char**			names                   = NULL;
-	int*				types							= NULL;
-	int*				control_sizes				= NULL;
-	IssmDouble*		independent					= NULL;
-	IssmDouble*		independents_fullmin    = NULL;
-	IssmDouble*		independents_fullmax		= NULL;
-	bool				control_analysis			=false;
+	int          num_independent_objects,M,N;
+	char       **names                = NULL;
+	int         *types                = NULL;
+	int         *control_sizes        = NULL;
+	IssmDouble  *independent          = NULL;
+	IssmDouble **independents_fullmin = NULL;
+	IssmDouble **independents_fullmax = NULL;
+	bool         control_analysis     = false;
 
 	iomodel->FindConstant(&control_analysis,"md.inversion.iscontrol");
@@ -226,27 +227,17 @@
 
 	/*Step1: create controls (independents)*/
-	iomodel->FetchData(&num_independent_objects,"md.autodiff.num_independent_objects");
-	_assert_(num_independent_objects>0); 
-	iomodel->FetchData(&names,&M,"md.autodiff.independent_object_names");
-	_assert_(M==num_independent_objects);
-	iomodel->FetchData(&types,NULL,NULL,"md.autodiff.independent_object_types");
-
-	int* M_all = xNew<int>(num_independent_objects);
-	int* N_all = xNew<int>(num_independent_objects);
+	iomodel->FetchData(&num_independent_objects,"md.autodiff.num_independent_objects"); _assert_(num_independent_objects>0); 
+	iomodel->FetchMultipleData(&names,&M,"md.autodiff.independent_name"); _assert_(M==num_independent_objects);
+	iomodel->FetchMultipleData(&types,&M,"md.autodiff.independent_type"); _assert_(M==num_independent_objects);
+
+	int* M_all = NULL;
+	int* N_all = NULL;
 	int* Interp_all = xNew<int>(num_independent_objects);
 
 	/*create independent objects, and at the same time, fetch the corresponding independent variables, 
 	 *and declare them as such in ADOLC: */
-	iomodel->FetchData(&independents_fullmin,&M_par,&N_par,"md.autodiff.independent_min_parameters");
-	iomodel->FetchData(&independents_fullmax,&M_par,&N_par,"md.autodiff.independent_max_parameters");
-	iomodel->FetchData(&control_sizes,NULL,NULL,"md.autodiff.independent_control_sizes");
-
-	int* start_point = NULL;
-	start_point = xNew<int>(num_independent_objects);
-	int counter = 0;
-	for(int i=0;i<num_independent_objects;i++){
-		start_point[i]=counter; 
-		counter+=control_sizes[i];
-	}
+	iomodel->FetchMultipleData(&independents_fullmin,&M_all,&N_all,&M,"md.autodiff.independent_min_parameters"); _assert_(M==num_independent_objects);
+	iomodel->FetchMultipleData(&independents_fullmax,NULL  ,NULL  ,&M,"md.autodiff.independent_max_parameters"); _assert_(M==num_independent_objects);
+	iomodel->FetchMultipleData(&control_sizes,&M,"md.autodiff.independent_control_size");                        _assert_(M==num_independent_objects);
 
 	for(int i=0;i<num_independent_objects;i++){
@@ -257,13 +248,12 @@
 			char* iofieldname  = NULL;
 			int   input_enum;
-			IssmDouble*  	independents_min			= NULL;
-			IssmDouble*	   independents_max			= NULL;
-
+			IssmDouble* independents_min = NULL;
+			IssmDouble*	independents_max = NULL;
+
+			/*Fetch required data*/
 			FieldAndEnumFromCode(&input_enum,&iofieldname,names[i]);
-
-			/*Fetch required data*/
 			iomodel->FetchData(&independent,&M,&N,iofieldname);
-			_assert_(independent);
-			_assert_(N==control_sizes[i]);
+			_assert_(independent && N==control_sizes[i]);
+			xDelete<char>(iofieldname);
 
 			independents_min = NULL; independents_min = xNew<IssmDouble>(M*N);
@@ -271,30 +261,42 @@
 			for(int m=0;m<M;m++){
 				for(int n=0;n<N;n++){
-					independents_min[N*m+n]=independents_fullmin[N_par*m+start_point[i]+n];
-					independents_max[N*m+n]=independents_fullmax[N_par*m+start_point[i]+n];
+					independents_min[N*m+n]=independents_fullmin[i][N*m+n];
+					independents_max[N*m+n]=independents_fullmax[i][N*m+n];
 				}
 			}
-			if(N!=1) M_all[i]=M-1;
-			else M_all[i]=M;
-
-			if(M_all[i]==iomodel->numberofvertices){
-				Interp_all[i] = P1Enum;
+
+			if(IsInputEnum(input_enum)){
+
+				/*remove last row if time series*/
+				if(N!=1) M_all[i]=M-1;
+
+				if(M_all[i]==iomodel->numberofvertices){
+					Interp_all[i] = P1Enum;
+				}
+				else if(M_all[i]==iomodel->numberofelements){
+					Interp_all[i] = P0Enum;
+				}
+				else{
+					_error_("Control size not supported");
+				}
+
+				for(Object* & object : elements->objects){
+					Element* element=xDynamicCast<Element*>(object);
+					element->ControlInputCreate(independent,independents_min,independents_max,inputs,iomodel,M,N,1.,input_enum,i+1);
+				}
 			}
-			else if(M_all[i]==iomodel->numberofelements){
-				Interp_all[i] = P0Enum;
-			}
-			else{
-				_error_("Control size not supported");
-			}
-			N_all[i] = N;
-
-			for(Object* & object : elements->objects){
-				Element* element=xDynamicCast<Element*>(object);
-				element->ControlInputCreate(independent,independents_min,independents_max,inputs,iomodel,M,N,1.,input_enum,i+1);
+			else if(IsParamEnum(input_enum)){
+				//_error_("not supported yet");
+				Interp_all[i] = DummyEnum; //Placeholder
+				parameters->AddObject(new ControlParam(independent,independents_min,independents_max,input_enum,M_all[i],N_all[i]));
+
+				if(M!=1){
+					_assert_(M==2); //TransientParam
+					M_all[i]=M-1;
+				}
 			}
 			xDelete<IssmDouble>(independent);
 			xDelete<IssmDouble>(independents_min);
 			xDelete<IssmDouble>(independents_max);
-			xDelete<char>(iofieldname);
 
 		}
@@ -310,4 +312,6 @@
 	for(int i=0;i<num_independent_objects;i++){
 		xDelete<char>(names[i]);
+		xDelete<IssmDouble>(independents_fullmin[i]);
+		xDelete<IssmDouble>(independents_fullmax[i]);
 	}
 	xDelete<char*>(names);
@@ -316,9 +320,7 @@
 	xDelete<int>(N_all);
 	xDelete<int>(Interp_all);
-	xDelete<IssmDouble>(independents_fullmin);
-	xDelete<IssmDouble>(independents_fullmax);
-	xDelete<int>(start_point);
+	xDelete<IssmDouble*>(independents_fullmin);
+	xDelete<IssmDouble*>(independents_fullmax);
 	xDelete<int>(control_sizes);
-	/*Step2: create cost functions (dependents)*/
 
 	return;
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp	(revision 28013)
@@ -197,7 +197,13 @@
 						break;
 					case MatlithoEnum: { /*{{{*/
-							iomodel->FetchData(13,"md.materials.radius","md.materials.viscosity","md.materials.lame_lambda","md.materials.lame_mu","md.materials.burgers_viscosity","md.materials.burgers_mu","md.materials.ebm_alpha","md.materials.ebm_delta","md.materials.ebm_taul","md.materials.ebm_tauh","md.materials.rheologymodel","md.materials.issolid","md.materials.density");
-							materials->AddObject(new Matlitho(iomodel->numberofelements+1,iomodel));
-							iomodel->DeleteData(13,"md.materials.radius","md.materials.viscosity","md.materials.lame_lambda","md.materials.lame_mu","md.materials.burgers_viscosity","md.materials.burgers_mu","md.materials.ebm_alpha","md.materials.ebm_delta","md.materials.ebm_taul","md.materials.ebm_tauh","md.materials.rheologymodel","md.materials.issolid","md.materials.density");
+							bool* issolid = NULL;
+							int*  rheologymodel = NULL;
+							iomodel->FetchData(&issolid, NULL, NULL, "md.materials.issolid");
+							iomodel->FetchData(&rheologymodel, NULL, NULL, "md.materials.rheologymodel");
+							iomodel->FetchData(11,"md.materials.radius","md.materials.viscosity","md.materials.lame_lambda","md.materials.lame_mu","md.materials.burgers_viscosity","md.materials.burgers_mu","md.materials.ebm_alpha","md.materials.ebm_delta","md.materials.ebm_taul","md.materials.ebm_tauh","md.materials.density");
+							materials->AddObject(new Matlitho(iomodel->numberofelements+1, iomodel, issolid, rheologymodel));
+							iomodel->DeleteData(11,"md.materials.radius","md.materials.viscosity","md.materials.lame_lambda","md.materials.lame_mu","md.materials.burgers_viscosity","md.materials.burgers_mu","md.materials.ebm_alpha","md.materials.ebm_delta","md.materials.ebm_taul","md.materials.ebm_tauh","md.materials.density");
+							xDelete<bool>(issolid);
+							xDelete<int>(rheologymodel);
 						}
 						/*}}}*/
@@ -447,5 +453,5 @@
 	if(solution_type!=LoveSolutionEnum) CreateNumberNodeToElementConnectivity(iomodel);
 	if(!isamr){
-		bool isoceancoupling;
+		int isoceancoupling;
 		iomodel->FindConstant(&isoceancoupling,"md.transient.isoceancoupling");
 
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateNodes.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateNodes.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateNodes.cpp	(revision 28013)
@@ -618,5 +618,11 @@
 		xDelete<int>(vertex_pairing);
 	}
-	if(!isamr && (analysis==MasstransportAnalysisEnum || analysis==FreeSurfaceBaseAnalysisEnum || analysis==FreeSurfaceTopAnalysisEnum || analysis==DebrisAnalysisEnum)){
+	if(!isamr && (analysis==MasstransportAnalysisEnum
+					|| analysis==FreeSurfaceBaseAnalysisEnum
+					|| analysis==FreeSurfaceTopAnalysisEnum
+					|| analysis==DebrisAnalysisEnum
+					|| analysis==ThermalAnalysisEnum
+					|| analysis==EnthalpyAnalysisEnum
+					)){
 		int *vertex_pairing = NULL;
 		int  numvertex_pairing;
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp	(revision 28013)
@@ -205,11 +205,11 @@
 
 					/*First create a cfsurfacesquare object for that specific string (cfsurfacesquare_model_string_s[j]):*/
-					output_definitions->AddObject(new Cfsurfacesquare(cfsurfacesquare_name_s[j],StringToEnumx(cfsurfacesquare_definitionstring_s[j]),StringToEnumx(cfsurfacesquare_model_string_s[j]),StringToEnumx(cfsurfacesquare_observation_string_s[j]),StringToEnumx(cfsurfacesquare_weights_string_s[j]),cfsurfacesquare_datatime_s[j],false));
+					output_definitions->AddObject(new Cfsurfacesquare(cfsurfacesquare_name_s[j],StringToEnumx(cfsurfacesquare_definitionstring_s[j]),StringToEnumx(cfsurfacesquare_model_string_s[j]),cfsurfacesquare_datatime_s[j]));
 
 					/*Now, for this particular cfsurfacesquare object, make sure we plug into the elements: the observation, and the weights.*/
 					for(Object* & object : elements->objects){
 						Element* element=xDynamicCast<Element*>(object);
-						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_observation_s[j],inputs,iomodel,cfsurfacesquare_observation_M_s[j],cfsurfacesquare_observation_N_s[j],obs_vector_type,StringToEnumx(cfsurfacesquare_observation_string_s[j]),7,SurfaceObservationEnum);
-						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_weights_s[j],inputs,iomodel,cfsurfacesquare_weights_M_s[j],cfsurfacesquare_weights_N_s[j],weight_vector_type,StringToEnumx(cfsurfacesquare_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_observation_s[j],inputs,iomodel,cfsurfacesquare_observation_M_s[j],cfsurfacesquare_observation_N_s[j],obs_vector_type,StringToEnumx(cfsurfacesquare_observation_string_s[j]),SurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_weights_s[j],inputs,iomodel,cfsurfacesquare_weights_M_s[j],cfsurfacesquare_weights_N_s[j],weight_vector_type,StringToEnumx(cfsurfacesquare_weights_string_s[j]),WeightsSurfaceObservationEnum);
 
 					}
@@ -244,4 +244,71 @@
 				/*}}}*/
 			}
+			else if (output_definition_enums[i]==CfsurfacesquaretransientEnum){
+				/*Deal with cfsurfacesquaretransient: {{{*/
+
+				/*cfsurfacesquaretransient variables: */
+				int          num_cfsurfacesquaretransients,test;
+				char       **cfssqt_name_s                = NULL;
+				char       **cfssqt_definitionstring_s    = NULL;
+				char       **cfssqt_model_string_s        = NULL;
+				IssmDouble **cfssqt_observations_s        = NULL;
+				int         *cfssqt_observations_M_s      = NULL;
+				int         *cfssqt_observations_N_s      = NULL;
+				IssmDouble **cfssqt_weights_s             = NULL;
+				int         *cfssqt_weights_M_s           = NULL;
+				int         *cfssqt_weights_N_s           = NULL;
+
+				/*Fetch name, model_string, observation, observation_string, etc ... (see src/m/classes/cfsurfacesquaretransient.m): */
+				iomodel->FetchMultipleData(&cfssqt_name_s,&num_cfsurfacesquaretransients,"md.cfsurfacesquaretransient.name");
+				iomodel->FetchMultipleData(&cfssqt_definitionstring_s,&test,"md.cfsurfacesquaretransient.definitionstring"); _assert_(test==num_cfsurfacesquaretransients);
+				iomodel->FetchMultipleData(&cfssqt_model_string_s,&test,"md.cfsurfacesquaretransient.model_string"); _assert_(test==num_cfsurfacesquaretransients);
+				iomodel->FetchMultipleData(&cfssqt_observations_s,&cfssqt_observations_M_s,&cfssqt_observations_N_s,&test, "md.cfsurfacesquaretransient.observations"); _assert_(test==num_cfsurfacesquaretransients);
+				iomodel->FetchMultipleData(&cfssqt_weights_s,&cfssqt_weights_M_s,&cfssqt_weights_N_s, &test,"md.cfsurfacesquaretransient.weights"); _assert_(test==num_cfsurfacesquaretransients);
+
+				for(j=0;j<num_cfsurfacesquaretransients;j++){
+
+               /*Check that we can use P1 inputs*/
+					if (cfssqt_observations_M_s[j]!=(iomodel->numberofvertices+1)) _error_("observations should be a P1 time series");
+               if (cfssqt_weights_M_s[j]!=iomodel->numberofvertices+1)        _error_("weights should be a P1 time series");
+					_assert_(cfssqt_observations_N_s[j]>0);
+
+					/*extract data times from last row of observations*/
+					IssmDouble *datatimes = xNew<IssmDouble>(cfssqt_observations_N_s[j]);
+					for(int k=0;k<cfssqt_observations_N_s[j];k++) datatimes[k] = (cfssqt_observations_s[j])[cfssqt_observations_N_s[j]*(cfssqt_weights_M_s[j]-1)+k];
+
+					/*First create a cfsurfacesquaretransient object for that specific string (cfssqt_model_string_s[j]):*/
+					output_definitions->AddObject(new Cfsurfacesquaretransient(cfssqt_name_s[j], StringToEnumx(cfssqt_definitionstring_s[j]), StringToEnumx(cfssqt_model_string_s[j]), cfssqt_observations_N_s[j],datatimes ));
+					xDelete<IssmDouble>(datatimes);
+
+					/*Now, for this particular cfsurfacesquaretransient object, make sure we plug into the elements: the observation, and the weights.*/
+					for(Object* & object : elements->objects){
+						Element* element=xDynamicCast<Element*>(object);
+						element->DatasetInputAdd(StringToEnumx(cfssqt_definitionstring_s[j]),cfssqt_observations_s[j],inputs,iomodel,cfssqt_observations_M_s[j],cfssqt_observations_N_s[j],1,SurfaceObservationEnum,SurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfssqt_definitionstring_s[j]),cfssqt_weights_s[j],inputs,iomodel,cfssqt_weights_M_s[j],cfssqt_weights_N_s[j],1,WeightsSurfaceObservationEnum,WeightsSurfaceObservationEnum);
+
+					}
+				}
+
+				/*Free resources:*/
+				for(j=0;j<num_cfsurfacesquaretransients;j++){
+					char* string=NULL;
+					IssmDouble* matrix = NULL;
+					string = cfssqt_definitionstring_s[j];		xDelete<char>(string);
+					string = cfssqt_model_string_s[j];			xDelete<char>(string);
+					string = cfssqt_name_s[j];    xDelete<char>(string);
+					matrix = cfssqt_observations_s[j]; xDelete<IssmDouble>(matrix);
+					matrix = cfssqt_weights_s[j]; xDelete<IssmDouble>(matrix);
+				}
+				xDelete<char*>(cfssqt_name_s);
+				xDelete<char*>(cfssqt_model_string_s);
+				xDelete<char*>(cfssqt_definitionstring_s);
+				xDelete<IssmDouble*>(cfssqt_observations_s);
+				xDelete<int>(cfssqt_observations_M_s);
+				xDelete<int>(cfssqt_observations_N_s);
+				xDelete<IssmDouble*>(cfssqt_weights_s);
+				xDelete<int>(cfssqt_weights_M_s);
+				xDelete<int>(cfssqt_weights_N_s);
+				/*}}}*/
+			}
 			else if (output_definition_enums[i]==CfdragcoeffabsgradEnum){
 				/*Deal with cfdragcoeffabsgrad: {{{*/
@@ -275,5 +342,5 @@
 
 					/*First create a cfdragcoeffabsgrad object for that specific string (cfdragcoeffabsgrad_model_string_s[j]):*/
-					output_definitions->AddObject(new Cfdragcoeffabsgrad(cfdragcoeffabsgrad_name_s[j],StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j]),StringToEnumx(cfdragcoeffabsgrad_weights_string_s[j]),false));
+					output_definitions->AddObject(new Cfdragcoeffabsgrad(cfdragcoeffabsgrad_name_s[j],StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j])));
 
 					/*Now, for this particular cfdragcoeffabsgrad object, make sure we plug into the elements: the observation, and the weights.*/
@@ -282,5 +349,5 @@
 						Element* element=xDynamicCast<Element*>(object);
 
-						element->DatasetInputAdd(StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j]),cfdragcoeffabsgrad_weights_s[j],inputs,iomodel,cfdragcoeffabsgrad_weights_M_s[j],cfdragcoeffabsgrad_weights_N_s[j],weight_vector_type,StringToEnumx(cfdragcoeffabsgrad_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j]),cfdragcoeffabsgrad_weights_s[j],inputs,iomodel,cfdragcoeffabsgrad_weights_M_s[j],cfdragcoeffabsgrad_weights_N_s[j],weight_vector_type,StringToEnumx(cfdragcoeffabsgrad_weights_string_s[j]),WeightsSurfaceObservationEnum);
 
 					}
@@ -306,4 +373,173 @@
 				/*}}}*/
 			}
+			else if (output_definition_enums[i]==CfdragcoeffabsgradtransientEnum){
+				/*Deal with cfdragcoeffabsgradtransient: {{{*/
+
+				/*cfdragcoeffabsgrad variables: */
+				int          num_cfdragcoeffabsgradtransients, test;
+				char**       cfdraggradt_name_s						= NULL;    
+				char**		 cfdraggradt_definitionstring_s		= NULL;    
+				IssmDouble** cfdraggradt_weights_s					= NULL;
+				int*         cfdraggradt_weights_M_s				= NULL;
+				int*         cfdraggradt_weights_N_s				= NULL;
+
+				/*Fetch name, model_string, observation, observation_string, etc ... (see src/m/classes/cfdragcoeffabsgradtransient.m): */
+				iomodel->FetchMultipleData(&cfdraggradt_name_s,&num_cfdragcoeffabsgradtransients,                                                        "md.cfdragcoeffabsgradtransient.name");
+				iomodel->FetchMultipleData(&cfdraggradt_definitionstring_s,&num_cfdragcoeffabsgradtransients,                                            "md.cfdragcoeffabsgradtransient.definitionstring");
+				iomodel->FetchMultipleData(&cfdraggradt_weights_s,&cfdraggradt_weights_M_s,&cfdraggradt_weights_N_s,&test,             "md.cfdragcoeffabsgradtransient.weights");
+					
+				for(j=0;j<num_cfdragcoeffabsgradtransients;j++){
+               
+					/*Check that we can use P1 inputs*/
+					if (cfdraggradt_weights_M_s[j]!=iomodel->numberofvertices+1)  _error_("weights should be a P1 time series");
+					
+					/*extract data times from last row of observations*/
+					IssmDouble *datatimes = xNew<IssmDouble>(cfdraggradt_weights_N_s[j]);
+					for(int k=0;k<cfdraggradt_weights_N_s[j];k++) datatimes[k] = (cfdraggradt_weights_s[j])[cfdraggradt_weights_N_s[j]*(cfdraggradt_weights_M_s[j]-1)+k];
+
+					 /*First create a cfdragcoeffabsgradtransient object for that specific string:*/
+					output_definitions->AddObject(new Cfdragcoeffabsgradtransient(cfdraggradt_name_s[j],StringToEnumx(cfdraggradt_definitionstring_s[j]), cfdraggradt_weights_N_s[j], datatimes));
+
+					/*Now, for this particular cfdragcoeffabsgrad object, make sure we plug into the elements: the observation, and the weights.*/
+					for(Object* & object : elements->objects){
+
+						Element* element=xDynamicCast<Element*>(object);
+
+						element->DatasetInputAdd(StringToEnumx(cfdraggradt_definitionstring_s[j]),cfdraggradt_weights_s[j],inputs,iomodel,cfdraggradt_weights_M_s[j],cfdraggradt_weights_N_s[j],1,WeightsSurfaceObservationEnum,WeightsSurfaceObservationEnum);
+
+					}
+				}
+
+				/*Free resources:*/
+				for(j=0;j<num_cfdragcoeffabsgradtransients;j++){
+					char* string=NULL;
+					IssmDouble* matrix = NULL;
+
+					string = cfdraggradt_definitionstring_s[j];		xDelete<char>(string);
+					string = cfdraggradt_name_s[j];    xDelete<char>(string);
+					matrix = cfdraggradt_weights_s[j]; xDelete<IssmDouble>(matrix);
+				}
+				xDelete<char*>(cfdraggradt_name_s);
+				xDelete<char*>(cfdraggradt_definitionstring_s);
+				xDelete<IssmDouble*>(cfdraggradt_weights_s);
+				xDelete<int>(cfdraggradt_weights_M_s);
+				xDelete<int>(cfdraggradt_weights_N_s);
+				/*}}}*/
+			}
+			else if (output_definition_enums[i]==CfrheologybbarabsgradEnum){
+				/*Deal with cfrheologybbarabsgrad: {{{*/
+
+				/*cfrheologybbarabsgrad variables: */
+				int          num_cfrheologybbarabsgrads;
+				char**       cfrheologybbarabsgrad_name_s                = NULL;
+				char**       cfrheologybbarabsgrad_definitionstring_s    = NULL;
+				IssmDouble** cfrheologybbarabsgrad_weights_s             = NULL;
+				int*         cfrheologybbarabsgrad_weights_M_s           = NULL;
+				int*         cfrheologybbarabsgrad_weights_N_s           = NULL;
+				char**       cfrheologybbarabsgrad_weights_string_s      = NULL;
+
+				/*Fetch name, model_string, observation, observation_string, etc ... (see src/m/classes/cfrheologybbarabsgrad.m): */
+				iomodel->FetchMultipleData(&cfrheologybbarabsgrad_name_s,&num_cfrheologybbarabsgrads,                                                        "md.cfrheologybbarabsgrad.name");
+				iomodel->FetchMultipleData(&cfrheologybbarabsgrad_definitionstring_s,&num_cfrheologybbarabsgrads,                                            "md.cfrheologybbarabsgrad.definitionstring");
+				iomodel->FetchMultipleData(&cfrheologybbarabsgrad_weights_s,&cfrheologybbarabsgrad_weights_M_s,&cfrheologybbarabsgrad_weights_N_s,&num_cfrheologybbarabsgrads,             "md.cfrheologybbarabsgrad.weights");
+				iomodel->FetchMultipleData(&cfrheologybbarabsgrad_weights_string_s,&num_cfrheologybbarabsgrads,                                              "md.cfrheologybbarabsgrad.weights_string");
+
+				for(j=0;j<num_cfrheologybbarabsgrads;j++){
+
+					int weight_vector_type=0;
+					if ((cfrheologybbarabsgrad_weights_M_s[j]==iomodel->numberofvertices) || (cfrheologybbarabsgrad_weights_M_s[j]==iomodel->numberofvertices+1)){
+						weight_vector_type=1;
+					}
+					else if ((cfrheologybbarabsgrad_weights_M_s[j]==iomodel->numberofelements) || (cfrheologybbarabsgrad_weights_M_s[j]==iomodel->numberofelements+1)){
+						weight_vector_type=2;
+					}
+					else
+					 _error_("cfrheologybbarabsgrad weight size not supported yet");
+
+					/*First create a cfrheologybbarabsgrad object for that specific string (cfrheologybbarabsgrad_model_string_s[j]):*/
+					output_definitions->AddObject(new Cfrheologybbarabsgrad(cfrheologybbarabsgrad_name_s[j],StringToEnumx(cfrheologybbarabsgrad_definitionstring_s[j]),StringToEnumx(cfrheologybbarabsgrad_weights_string_s[j])));
+
+					/*Now, for this particular cfrheologybbarabsgrad object, make sure we plug into the elements: the observation, and the weights.*/
+					for(Object* & object : elements->objects){
+
+						Element* element=xDynamicCast<Element*>(object);
+
+						element->DatasetInputAdd(StringToEnumx(cfrheologybbarabsgrad_definitionstring_s[j]),cfrheologybbarabsgrad_weights_s[j],inputs,iomodel,cfrheologybbarabsgrad_weights_M_s[j],cfrheologybbarabsgrad_weights_N_s[j],weight_vector_type,StringToEnumx(cfrheologybbarabsgrad_weights_string_s[j]),WeightsSurfaceObservationEnum);
+
+					}
+
+				}
+				    /*Free resources:*/
+            for(j=0;j<num_cfrheologybbarabsgrads;j++){
+               char* string=NULL;
+               IssmDouble* matrix = NULL;
+
+               string = cfrheologybbarabsgrad_definitionstring_s[j];    xDelete<char>(string);
+               string = cfrheologybbarabsgrad_weights_string_s[j];      xDelete<char>(string);
+               string = cfrheologybbarabsgrad_name_s[j];    xDelete<char>(string);
+               matrix = cfrheologybbarabsgrad_weights_s[j]; xDelete<IssmDouble>(matrix);
+            }
+            xDelete<char*>(cfrheologybbarabsgrad_name_s);
+            xDelete<char*>(cfrheologybbarabsgrad_definitionstring_s);
+            xDelete<IssmDouble*>(cfrheologybbarabsgrad_weights_s);
+            xDelete<int>(cfrheologybbarabsgrad_weights_M_s);
+            xDelete<int>(cfrheologybbarabsgrad_weights_N_s);
+            xDelete<char*>(cfrheologybbarabsgrad_weights_string_s);
+            /*}}}*/
+         }
+			else if (output_definition_enums[i]==CfrheologybbarabsgradtransientEnum){
+				/*Deal with cfrheologybbarabsgradtransient: {{{*/
+
+				/*cfrheologybbarabsgrad variables: */
+				int          num_cfrheologybbarabsgradtransients, test;
+				char**       cfrheogradt_name_s                = NULL;
+				char**       cfrheogradt_definitionstring_s    = NULL;
+				IssmDouble** cfrheogradt_weights_s             = NULL;
+				int*         cfrheogradt_weights_M_s           = NULL;
+				int*         cfrheogradt_weights_N_s           = NULL;
+				char**       cfrheogradt_weights_string_s      = NULL;
+
+				/*Fetch name, model_string, observation, observation_string, etc ... (see src/m/classes/cfrheologybbarabsgradtransient.m): */
+				iomodel->FetchMultipleData(&cfrheogradt_name_s,&num_cfrheologybbarabsgradtransients,                                                        "md.cfrheologybbarabsgradtransient.name");
+				iomodel->FetchMultipleData(&cfrheogradt_definitionstring_s,&num_cfrheologybbarabsgradtransients,                                            "md.cfrheologybbarabsgradtransient.definitionstring");
+				iomodel->FetchMultipleData(&cfrheogradt_weights_s,&cfrheogradt_weights_M_s,&cfrheogradt_weights_N_s,&test,             "md.cfrheologybbarabsgradtransient.weights");
+
+				for(j=0;j<num_cfrheologybbarabsgradtransients;j++){
+
+					if (cfrheogradt_weights_M_s[j]!=iomodel->numberofvertices+1) _error_("weights should be a P1 time series");
+					
+					/*extract data times from last row of observations*/
+					IssmDouble *datatimes = xNew<IssmDouble>(cfrheogradt_weights_N_s[j]);
+					for(int k=0;k<cfrheogradt_weights_N_s[j];k++) datatimes[k] = (cfrheogradt_weights_s[j])[cfrheogradt_weights_N_s[j]*(cfrheogradt_weights_M_s[j]-1)+k];
+
+					/*First create a cfrheologybbarabsgradtransient object for that specific string:*/
+					output_definitions->AddObject(new Cfrheologybbarabsgradtransient(cfrheogradt_name_s[j],StringToEnumx(cfrheogradt_definitionstring_s[j]), cfrheogradt_weights_N_s[j], datatimes));
+
+					/*Now, for this particular cfrheologybbarabsgrad object, make sure we plug into the elements: the observation, and the weights.*/
+					for(Object* & object : elements->objects){
+
+						Element* element=xDynamicCast<Element*>(object);
+
+						element->DatasetInputAdd(StringToEnumx(cfrheogradt_definitionstring_s[j]),cfrheogradt_weights_s[j],inputs,iomodel,cfrheogradt_weights_M_s[j],cfrheogradt_weights_N_s[j],1,WeightsSurfaceObservationEnum,WeightsSurfaceObservationEnum);
+
+					}
+				}
+				
+				/*Free resources:*/
+            for(j=0;j<num_cfrheologybbarabsgradtransients;j++){
+               char* string=NULL;
+               IssmDouble* matrix = NULL;
+
+               string = cfrheogradt_definitionstring_s[j];    xDelete<char>(string);
+               string = cfrheogradt_name_s[j];    xDelete<char>(string);
+               matrix = cfrheogradt_weights_s[j]; xDelete<IssmDouble>(matrix);
+            }
+            xDelete<char*>(cfrheogradt_name_s);
+            xDelete<char*>(cfrheogradt_definitionstring_s);
+            xDelete<IssmDouble*>(cfrheogradt_weights_s);
+            xDelete<int>(cfrheogradt_weights_M_s);
+            xDelete<int>(cfrheogradt_weights_N_s);
+            /*}}}*/
+         }
 			else if (output_definition_enums[i]==CfsurfacelogvelEnum){
 				/*Deal with cfsurfacelogvel: {{{*/
@@ -359,5 +595,5 @@
 
 					/*First create a cfsurfacelogvel object for that specific string (cfsurfacelogvel_modeltring[j]):*/
-					output_definitions->AddObject(new Cfsurfacelogvel(cfsurfacelogvel_name[j],StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_datatime[j],false));
+					output_definitions->AddObject(new Cfsurfacelogvel(cfsurfacelogvel_name[j],StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_datatime[j]));
 
 					/*Now, for this particular cfsurfacelogvel object, make sure we plug into the elements: the observation, and the weights.*/
@@ -366,7 +602,7 @@
 						Element* element=xDynamicCast<Element*>(object);
 
-						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vxobs[j],inputs,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vxobs_string[j]),7,VxObsEnum);
-							element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vyobs[j],inputs,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vyobs_string[j]),7,VyObsEnum);
-						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_weights[j],inputs,iomodel,cfsurfacelogvel_weights_M[j],cfsurfacelogvel_weights_N[j],weight_vector_type,StringToEnumx(cfsurfacelogvel_weightstring[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vxobs[j],inputs,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vxobs_string[j]),VxObsEnum);
+							element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vyobs[j],inputs,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vyobs_string[j]),VyObsEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_weights[j],inputs,iomodel,cfsurfacelogvel_weights_M[j],cfsurfacelogvel_weights_N[j],weight_vector_type,StringToEnumx(cfsurfacelogvel_weightstring[j]),WeightsSurfaceObservationEnum);
 
 					}
@@ -453,11 +689,11 @@
 
 					/*First create a cflevelsetmisfit object for that specific string (cflevelsetmisfit_model_string_s[j]):*/
-					output_definitions->AddObject(new Cflevelsetmisfit(cflevelsetmisfit_name_s[j],StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),StringToEnumx(cflevelsetmisfit_model_string_s[j]),StringToEnumx(cflevelsetmisfit_observation_string_s[j]),StringToEnumx(cflevelsetmisfit_weights_string_s[j]),cflevelsetmisfit_datatime_s[j],false));
+					output_definitions->AddObject(new Cflevelsetmisfit(cflevelsetmisfit_name_s[j],StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),StringToEnumx(cflevelsetmisfit_model_string_s[j]),cflevelsetmisfit_datatime_s[j]));
 
 					/*Now, for this particular cflevelsetmisfit object, make sure we plug into the elements: the observation, and the weights.*/
 					for(Object* & object : elements->objects){
 						Element* element=xDynamicCast<Element*>(object);
-						element->DatasetInputAdd(StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),cflevelsetmisfit_observation_s[j],inputs,iomodel,cflevelsetmisfit_observation_M_s[j],cflevelsetmisfit_observation_N_s[j],obs_vector_type,StringToEnumx(cflevelsetmisfit_observation_string_s[j]),7,LevelsetObservationEnum);
-						element->DatasetInputAdd(StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),cflevelsetmisfit_weights_s[j],inputs,iomodel,cflevelsetmisfit_weights_M_s[j],cflevelsetmisfit_weights_N_s[j],weight_vector_type,StringToEnumx(cflevelsetmisfit_weights_string_s[j]),7,WeightsLevelsetObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),cflevelsetmisfit_observation_s[j],inputs,iomodel,cflevelsetmisfit_observation_M_s[j],cflevelsetmisfit_observation_N_s[j],obs_vector_type,StringToEnumx(cflevelsetmisfit_observation_string_s[j]),LevelsetObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cflevelsetmisfit_definitionstring_s[j]),cflevelsetmisfit_weights_s[j],inputs,iomodel,cflevelsetmisfit_weights_M_s[j],cflevelsetmisfit_weights_N_s[j],weight_vector_type,StringToEnumx(cflevelsetmisfit_weights_string_s[j]),WeightsLevelsetObservationEnum);
 					}
 				}
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp	(revision 28013)
@@ -487,7 +487,20 @@
 			parameters->AddObject(iomodel->CopyConstantObject("md.smb.rlaps",SmbRlapsEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.smb.rdl",SmbRdlEnum));
-			break;
-		case SMBdebrisMLEnum:
-			break;
+			parameters->AddObject(iomodel->CopyConstantObject("md.smb.ismethod",SmbSemicMethodEnum));
+			break;
+		case SMBdebrisEvattEnum:
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.qlaps",SmbDesfacEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.rlaps",SmbRlapsEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.dsgrad",SmbSWgradEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.dlgrad",SmbLWgradEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.windspeedgrad",SmbWindspeedgradEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.humiditygrad",SmbHumiditygradEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.icealbedo",SmbIcealbedoEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.snowalbedo",SmbSnowalbedoEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.debrisalbedo",SmbDebrisalbedoEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.isAnderson",SmbDebrisIsAndersonEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.iscryokarst",SmbDebrisIsCryokarstEnum));
+                        parameters->AddObject(iomodel->CopyConstantObject("md.smb.AndersonD0",SmbDebrisAndersonD0Enum));
+                        break;
 		default:
 			_error_("Surface mass balance model "<<EnumToStringx(smb_model)<<" not supported yet");
@@ -496,4 +509,5 @@
 	int hydrology_model;
 	iomodel->FindConstant(&hydrology_model,"md.hydrology.model");
+	parameters->AddObject(new BoolParam(HydrologyIsWaterPressureArmaEnum,false));
 	if(hydrology_model==HydrologydcEnum){
 		IssmDouble sedcomp, sedporo, watcomp, rhofresh, g;
@@ -533,4 +547,28 @@
 		/*Nothing to add*/
 	}
+	else if(hydrology_model==HydrologyarmapwEnum){
+		parameters->SetParam(true,HydrologyIsWaterPressureArmaEnum);
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.num_basins",HydrologyNumBasinsEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.num_breaks",HydrologyarmaNumBreaksEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.num_params",HydrologyarmaNumParamsEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.ar_order",HydrologyarmaarOrderEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.ma_order",HydrologyarmamaOrderEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.hydrology.arma_timestep",HydrologyarmaTimestepEnum));
+      iomodel->FetchData(&transparam,&M,&N,"md.hydrology.datebreaks");
+      parameters->AddObject(new DoubleMatParam(HydrologyarmadatebreaksEnum,transparam,M,N));
+      xDelete<IssmDouble>(transparam);
+      iomodel->FetchData(&transparam,&M,&N,"md.hydrology.polynomialparams");
+      parameters->AddObject(new DoubleMatParam(HydrologyarmapolyparamsEnum,transparam,M,N));
+      xDelete<IssmDouble>(transparam);
+      iomodel->FetchData(&transparam,&M,&N,"md.hydrology.arlag_coefs");
+      parameters->AddObject(new DoubleMatParam(HydrologyarmaarlagcoefsEnum,transparam,M,N));
+      xDelete<IssmDouble>(transparam);
+      iomodel->FetchData(&transparam,&M,&N,"md.hydrology.malag_coefs");
+      parameters->AddObject(new DoubleMatParam(HydrologyarmamalagcoefsEnum,transparam,M,N));
+      xDelete<IssmDouble>(transparam);
+      iomodel->FetchData(&transparam,&M,&N,"md.hydrology.monthlyfactors");
+      parameters->AddObject(new DoubleMatParam(HydrologyarmaMonthlyFactorsEnum,transparam,M,N));
+      xDelete<IssmDouble>(transparam);
+   }
 	else{
 		_error_("Hydrology model "<<EnumToStringx(hydrology_model)<<" not supported yet");
@@ -549,9 +587,10 @@
 	parameters->AddObject(new BoolParam(StochasticForcingIsWaterPressureEnum,false));
    if(isstochasticforcing){
-      int num_fields,stochastic_dim;
+      int num_fields,num_tcov,stochastic_dim;
       char** fields;
       parameters->AddObject(iomodel->CopyConstantObject("md.stochasticforcing.num_fields",StochasticForcingNumFieldsEnum));
       parameters->AddObject(iomodel->CopyConstantObject("md.stochasticforcing.defaultdimension",StochasticForcingDefaultDimensionEnum));
       parameters->AddObject(iomodel->CopyConstantObject("md.stochasticforcing.stochastictimestep",StochasticForcingTimestepEnum));
+      parameters->AddObject(iomodel->CopyConstantObject("md.stochasticforcing.num_timescovariance",StochasticForcingNumTimesCovarianceEnum));
       iomodel->FindConstant(&fields,&num_fields,"md.stochasticforcing.fields");
       if(num_fields<1) _error_("no stochasticforcing fields found");
@@ -568,4 +607,7 @@
       parameters->AddObject(new IntVecParam(StochasticForcingDimensionsEnum,transparam,N));
       xDelete<IssmDouble>(transparam);
+      iomodel->FetchData(&transparam,&M,&N,"md.stochasticforcing.timecovariance");
+      parameters->AddObject(new DoubleVecParam(StochasticForcingTimeCovarianceEnum,transparam,N));
+      xDelete<IssmDouble>(transparam);
       iomodel->FetchData(&transparam,&M,&N,"md.stochasticforcing.covariance");
       parameters->AddObject(new DoubleMatParam(StochasticForcingCovarianceEnum,transparam,M,N));
Index: /issm/trunk/src/c/modules/OceanExchangeDatax/OceanExchangeDatax.cpp
===================================================================
--- /issm/trunk/src/c/modules/OceanExchangeDatax/OceanExchangeDatax.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/OceanExchangeDatax/OceanExchangeDatax.cpp	(revision 28013)
@@ -22,5 +22,5 @@
 	tomitgcmcomm=parcom->GetParameterValue();
 
-	int oceangridnxsize,oceangridnysize,ngrids_ocean,nels_ocean;
+	int oceangridnxsize,oceangridnysize,ngrids_ocean,nels_ocean,isoceancoupling;
 	IssmDouble  oceantime,coupling_time,time,yts;
 	IssmDouble rho_ice;
@@ -28,5 +28,5 @@
 	IssmDouble *oceangridx;
 	IssmDouble *oceangridy;
-	IssmDouble *icebase_oceangrid = NULL;
+	IssmDouble *icethickness_oceangrid = NULL;
 	IssmDouble *icemask_oceangrid = NULL;
 	IssmDouble* x_ice             = NULL;
@@ -34,5 +34,5 @@
 	IssmDouble* lat_ice           = NULL;
 	IssmDouble* lon_ice           = NULL;
-	IssmDouble* icebase           = NULL;
+	IssmDouble* icethickness      = NULL;
 	IssmDouble* icemask           = NULL;
 	IssmDouble* melt_mesh         = NULL;
@@ -45,4 +45,5 @@
 	femmodel->parameters->FindParam(&coupling_time,TimesteppingCouplingTimeEnum);
 	femmodel->parameters->FindParam(&time,TimeEnum);
+	femmodel->parameters->FindParam(&isoceancoupling,TransientIsoceancouplingEnum);
 
 	/*Exchange or recover mesh and inputs needed*/
@@ -57,5 +58,4 @@
 		ISSM_MPI_Bcast(&oceangridnysize,1,ISSM_MPI_INT,0,IssmComm::GetComm());
 		ISSM_MPI_Bcast(&ngrids_ocean,1,ISSM_MPI_INT,0,IssmComm::GetComm());
-		ISSM_MPI_Bcast(&oceantime,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
 		femmodel->parameters->SetParam(oceangridnxsize,OceanGridNxEnum);
 		femmodel->parameters->SetParam(oceangridnysize,OceanGridNyEnum);
@@ -65,8 +65,4 @@
 			ISSM_MPI_Recv(oceangridx,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001005,tomitgcmcomm,&status);
 			ISSM_MPI_Recv(oceangridy,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001006,tomitgcmcomm,&status);
-
-			/*Exchange varying parameters for the initialization*/
-			ISSM_MPI_Send(&time,1,ISSM_MPI_DOUBLE,0,10001001,tomitgcmcomm);
-			ISSM_MPI_Recv(&oceantime,1,ISSM_MPI_DOUBLE,0,10001002,tomitgcmcomm,&status);
 		}
 		
@@ -77,13 +73,20 @@
 	}
 	else{
+		/*Recoved ocean grid from parameters*/
 		femmodel->parameters->FindParam(&oceangridx,&ngrids_ocean,OceanGridXEnum);
 		femmodel->parameters->FindParam(&oceangridy,&ngrids_ocean,OceanGridYEnum);
 	}
 
-	/*Interpolate ice base and mask onto ocean grid*/
+
+	/*Interpolate ice thickness and mask onto ocean grid*/
 	femmodel->GetMesh(femmodel->vertices,femmodel->elements,&x_ice,&y_ice,&index_ice);
 	BamgTriangulatex(&index_ocean,&nels_ocean,oceangridx,oceangridy,ngrids_ocean);
-	femmodel->vertices->LatLonList(&lat_ice,&lon_ice);
-	GetVectorFromInputsx(&icebase,femmodel,BaseEnum,VertexSIdEnum);
+	if(isoceancoupling==2){
+		femmodel->vertices->LatLonList(&lat_ice,&lon_ice);
+	}
+	else{
+		femmodel->vertices->XYList(&lon_ice,&lat_ice);
+	}
+	GetVectorFromInputsx(&icethickness,femmodel,ThicknessEnum,VertexSIdEnum);
 	Options* options = new Options();
 	GenericOption<double> *odouble = new GenericOption<double>();
@@ -95,8 +98,8 @@
 	odouble->size[1]=1;
 	options->AddOption(odouble);
-	InterpFromMeshToMesh2dx(&icebase_oceangrid,index_ice,lon_ice,lat_ice,ngrids_ice,nels_ice,
-					icebase,ngrids_ice,1,oceangridx,oceangridy,ngrids_ocean,options);
+	InterpFromMeshToMesh2dx(&icethickness_oceangrid,index_ice,lon_ice,lat_ice,ngrids_ice,nels_ice,
+					icethickness,ngrids_ice,1,oceangridx,oceangridy,ngrids_ocean,options);
 	delete options;
-	xDelete<IssmDouble>(icebase);
+	xDelete<IssmDouble>(icethickness);
 
 	GetVectorFromInputsx(&icemask,femmodel,MaskIceLevelsetEnum,VertexSIdEnum);
@@ -116,16 +119,18 @@
 
 	/*Put +9999 for places where there is no ice!*/
-	for(int i=0;i<ngrids_ocean;i++) if(icemask_oceangrid[i]>0.) icebase_oceangrid[i]=+9999.;
+	femmodel->parameters->FindParam(&rho_ice,MaterialsRhoIceEnum);
+	for(int i=0;i<ngrids_ocean;i++) icethickness_oceangrid[i]=icethickness_oceangrid[i]*rho_ice; //ocean needs ice mass in kg/m^2
+	for(int i=0;i<ngrids_ocean;i++) if(icemask_oceangrid[i]>0.) icethickness_oceangrid[i]=+9999.;
 	xDelete<IssmDouble>(icemask_oceangrid);
 
-	if(init_stage==true){ //just send icebase
+	if(init_stage==true){ //just send icethickness
 		if(my_rank==0){
-			ISSM_MPI_Send(icebase_oceangrid,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001008,tomitgcmcomm);
+			ISSM_MPI_Send(icethickness_oceangrid,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001008,tomitgcmcomm);
 		}
 	}
 	else{ //send and receive exchanged data
-		femmodel->parameters->FindParam(&rho_ice,MaterialsRhoIceEnum);
 		femmodel->parameters->FindParam(&yts,ConstantsYtsEnum);
 		if(my_rank==0){
+			ISSM_MPI_Send(icethickness_oceangrid,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001008,tomitgcmcomm);
 			ISSM_MPI_Send(&time,1,ISSM_MPI_DOUBLE,0,10001001,tomitgcmcomm);
 			ISSM_MPI_Recv(&oceantime,1,ISSM_MPI_DOUBLE,0,10001002,tomitgcmcomm,&status);
@@ -133,5 +138,4 @@
 			oceanmelt = xNew<IssmDouble>(ngrids_ocean);
 			ISSM_MPI_Recv(oceanmelt,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001007,tomitgcmcomm,&status);
-			ISSM_MPI_Send(icebase_oceangrid,ngrids_ocean,ISSM_MPI_DOUBLE,0,10001008,tomitgcmcomm);
 		}
 		ISSM_MPI_Bcast(&oceantime,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm());
@@ -155,5 +159,5 @@
 	xDelete<IssmDouble>(x_ice);
 	xDelete<IssmDouble>(y_ice);
-	xDelete<IssmDouble>(icebase_oceangrid);
+	xDelete<IssmDouble>(icethickness_oceangrid);
 	xDelete<IssmDouble>(oceangridx);
 	xDelete<IssmDouble>(oceangridy);
Index: /issm/trunk/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp	(revision 28013)
@@ -85,5 +85,5 @@
 		}
 	}
-	if(found==0)_error_("cound not find " << field << " at step " << step  << "\n");
+	if(found==0)_error_("could not find " << field << " at step " << step  << "\n");
 
 	/*assign output pointers:*/
@@ -170,5 +170,5 @@
 		_printf0_("    opening file: " << file << "\n");
 		FILE* fid=fopen(file,"rb");
-		if(fid==NULL)_error_("cound not open file: " << file << "\n");
+		if(fid==NULL)_error_("could not open file: " << file << "\n");
 
 		/*figure out size of file, and read the whole thing:*/
@@ -403,5 +403,5 @@
 		_printf0_("    opening file:\n");
 		FILE* fid=fopen(file,"rb");
-		if(fid==NULL)_error_("cound not open file: " << file << "\n");
+		if(fid==NULL)_error_("could not open file: " << file << "\n");
 
 		/*figure out size of file, and read the whole thing:*/
Index: /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp	(revision 28013)
@@ -10,5 +10,5 @@
 #include "../modules.h"
 
-void SetActiveNodesLSMx(FemModel* femmodel,bool ishydrology){/*{{{*/
+void SetActiveNodesLSMx(FemModel* femmodel,bool ishydrology,bool isdebris){/*{{{*/
 	/* activate/deactivate nodes for levelset method according to IceMaskNodeActivation */
 
@@ -16,4 +16,5 @@
 	int nodeactivationmask = IceMaskNodeActivationEnum;
 	if(ishydrology) nodeactivationmask = HydrologyMaskNodeActivationEnum;
+	if(isdebris) nodeactivationmask = DebrisMaskNodeActivationEnum;
 
 
@@ -59,9 +60,10 @@
 }/*}}}*/
 
-void GetMaskOfIceVerticesLSMx0(FemModel* femmodel,bool ishydrology){/*{{{*/
+void GetMaskOfIceVerticesLSMx0(FemModel* femmodel,bool ishydrology,bool isdebris){/*{{{*/
 
 	/*Determine which node activation to construct*/
 	int nodeactivationmask = IceMaskNodeActivationEnum;
 	if(ishydrology) nodeactivationmask = HydrologyMaskNodeActivationEnum;
+	if(isdebris) nodeactivationmask = DebrisMaskNodeActivationEnum;
 
 	/*Initialize vector with number of vertices*/
@@ -83,6 +85,15 @@
 			}
 		}
-	}
-	else{
+	}else if(isdebris){
+		for(Object* & object : femmodel->elements->objects){
+                        Element* element=xDynamicCast<Element*>(object);
+                        if(element->IsIceInElement() && !element->IsAllMinThicknessInElement()){
+                                int nbv = element->GetNumberOfVertices();
+                                for(int iv=0;iv<nbv;iv++){
+                                        vec_mask_ice->SetValue(element->vertices[iv]->Pid(),1.,INS_VAL);
+                                }
+                        }
+                }
+	}else{
 		for(Object* & object : femmodel->elements->objects){
 			Element* element=xDynamicCast<Element*>(object);
@@ -101,12 +112,23 @@
 	delete vec_mask_ice;
 }/*}}}*/
-void GetMaskOfIceVerticesLSMx(FemModel* femmodel,bool ishydrology){/*{{{*/
+void GetMaskOfIceVerticesLSMx(FemModel* femmodel,bool ishydrology,bool isdebris){/*{{{*/
 
 	/*Set configuration to levelset*/
 	if(ishydrology){
 		/*We may not be running with ismovingfront so we can't assume LevelsetAnalysis is active*/
-		femmodel->SetCurrentConfiguration(HydrologyGlaDSAnalysisEnum);
-	}
-	else{
+		int hydrology_model;
+		femmodel->parameters->FindParam(&hydrology_model,HydrologyModelEnum);
+		if(hydrology_model==HydrologyshaktiEnum){
+			femmodel->SetCurrentConfiguration(HydrologyShaktiAnalysisEnum);
+		}
+		else if(hydrology_model==HydrologyGlaDSEnum){
+			femmodel->SetCurrentConfiguration(HydrologyGlaDSAnalysisEnum);
+		}
+		else{
+			_error_("hydrology model not supported yet");
+		}
+	}else if(isdebris){
+		femmodel->SetCurrentConfiguration(DebrisAnalysisEnum);
+	}else{
 		femmodel->SetCurrentConfiguration(LevelsetAnalysisEnum);
 	}
@@ -115,4 +137,5 @@
 	int nodeactivationmask = IceMaskNodeActivationEnum;
 	if(ishydrology) nodeactivationmask = HydrologyMaskNodeActivationEnum;
+	if(isdebris) nodeactivationmask = DebrisMaskNodeActivationEnum;
 
 	/*Create vector on gset*/
@@ -137,6 +160,16 @@
 				xDelete<int>(glist_local);
 			}
-		}
-		else{
+		}else if(isdebris){
+			if(element->IsIceInElement() && !element->IsAllMinThicknessInElement()){
+                                int numnodes = element->GetNumberOfNodes();
+                                int  gsize_local=GetNumberOfDofs(element->nodes,numnodes,GsetEnum,NoneEnum);
+                                int* glist_local=GetGlobalDofList(element->nodes,numnodes,GsetEnum,NoneEnum);
+                                IssmDouble* ones = xNew<IssmDouble>(gsize_local);
+                                for(int n=0;n<gsize_local;n++) ones[n] = 1.;
+                                vec_mask_ice->SetValues(gsize_local,glist_local,ones,INS_VAL);
+                                xDelete<IssmDouble>(ones);
+                                xDelete<int>(glist_local);
+			}
+		}else{
 			if(element->IsIceInElement()){
 				int numnodes = element->GetNumberOfNodes();
Index: /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.h
===================================================================
--- /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.h	(revision 28012)
+++ /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.h	(revision 28013)
@@ -8,6 +8,6 @@
 #include "../../classes/classes.h"
 
-void SetActiveNodesLSMx(FemModel* femmodel,bool ishydrology=false);
-void GetMaskOfIceVerticesLSMx0(FemModel* femmodel,bool ishydrology=false);
-void GetMaskOfIceVerticesLSMx(FemModel* femmodel,bool ishydrology=false);
+void SetActiveNodesLSMx(FemModel* femmodel,bool ishydrology=false,bool isdebris=false);
+void GetMaskOfIceVerticesLSMx0(FemModel* femmodel,bool ishydrology=false,bool isdebris=false);
+void GetMaskOfIceVerticesLSMx(FemModel* femmodel,bool ishydrology=false,bool isdebris=false);
 #endif  /* _SETACTIVENODESLSMX_H*/
Index: /issm/trunk/src/c/modules/SetControlInputsFromVectorx/SetControlInputsFromVectorx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SetControlInputsFromVectorx/SetControlInputsFromVectorx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/SetControlInputsFromVectorx/SetControlInputsFromVectorx.cpp	(revision 28013)
@@ -22,7 +22,16 @@
 	int offset = 0;
 	for(int i=0;i<num_controls;i++){
-		for(Object* & object : femmodel->elements->objects){
-			Element* element=xDynamicCast<Element*>(object);
-			element->SetControlInputsFromVector(vector,control_type[i],i,offset,M[i],N[i]);
+		/*Is the control a Param?*/
+		if(IsParamEnum(control_type[i])){
+			femmodel->parameters->SetControlFromVector(vector,control_type[i],M[i],N[i],offset);
+		}
+		else if(IsInputEnum(control_type[i])){
+			for(Object* & object : femmodel->elements->objects){
+				Element* element=xDynamicCast<Element*>(object);
+				element->SetControlInputsFromVector(vector,control_type[i],i,offset,M[i],N[i]);
+			}
+		}
+		else{
+			_error_("not supported yet");
 		}
 		offset += M[i]*N[i]; 
Index: /issm/trunk/src/c/modules/StochasticForcingx/StochasticForcingx.cpp
===================================================================
--- /issm/trunk/src/c/modules/StochasticForcingx/StochasticForcingx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/StochasticForcingx/StochasticForcingx.cpp	(revision 28013)
@@ -14,15 +14,18 @@
    /*Retrieve parameters*/
    bool randomflag;
-   int M,N,numfields,my_rank;
-   int* fields            = NULL;
-   int* dimensions        = NULL;
-   IssmDouble* covariance = NULL;
+   int M,N,numfields,numtcov,my_rank;
+   int* fields                = NULL;
+   int* dimensions            = NULL;
+   IssmDouble* timecovariance = NULL;
+   IssmDouble* covariance     = NULL;
    femmodel->parameters->FindParam(&randomflag,StochasticForcingRandomflagEnum);
    femmodel->parameters->FindParam(&numfields,StochasticForcingNumFieldsEnum);
+   femmodel->parameters->FindParam(&numtcov,StochasticForcingNumTimesCovarianceEnum);
    femmodel->parameters->FindParam(&fields,&N,StochasticForcingFieldsEnum);    _assert_(N==numfields);
    femmodel->parameters->FindParam(&dimensions,&N,StochasticForcingDimensionsEnum);    _assert_(N==numfields);
+   femmodel->parameters->FindParam(&timecovariance,&N,StochasticForcingTimeCovarianceEnum);    _assert_(N==numtcov);
    int dimtot=0;
    for(int i=0;i<numfields;i++) dimtot = dimtot+dimensions[i];
-   femmodel->parameters->FindParam(&covariance,&M,&N,StochasticForcingCovarianceEnum); _assert_(M==dimtot); _assert_(N==dimtot);
+   femmodel->parameters->FindParam(&covariance,&M,&N,StochasticForcingCovarianceEnum); _assert_(M==numtcov); _assert_(N==dimtot*dimtot);
 
 	/*Check if this is a timestep for new noiseterms computation*/
@@ -33,5 +36,9 @@
    femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
    femmodel->parameters->FindParam(&tstep_stoch,StochasticForcingTimestepEnum);
-		
+	
+	/*Check if we use HydroarmaPw*/
+	bool ispwHydro;
+	femmodel->parameters->FindParam(&ispwHydro,HydrologyIsWaterPressureArmaEnum);
+
 	#ifndef _HAVE_AD_
    if((fmod(time,tstep_stoch)<fmod((time-dt),tstep_stoch)) || (time<=starttime+dt) || tstep_stoch==dt) isstepforstoch = true;
@@ -41,6 +48,16 @@
 
    /*Compute noise terms*/
-	IssmDouble* noiseterms = xNew<IssmDouble>(dimtot);
+	IssmDouble* timestepcovariance = xNew<IssmDouble>(dimtot*dimtot);
+	IssmDouble* noiseterms         = xNew<IssmDouble>(dimtot);
    if(isstepforstoch){
+		/*Find covariance to be applied at current time step*/
+		int itime;
+		if(numtcov>1){
+			for(int i=0;i<numtcov;i++){
+				if(time>=timecovariance[i]) itime=i;
+			}
+		}
+		else itime=0;
+		for(int i=0;i<dimtot*dimtot;i++) timestepcovariance[i] = covariance[itime*dimtot*dimtot+i];
 		my_rank=IssmComm::GetRank();
    	if(my_rank==0){
@@ -51,5 +68,5 @@
 			/*multivariateNormal needs to be passed a NULL pointer to avoid memory leak issues*/
    	   IssmDouble* temparray = NULL;
-   	   multivariateNormal(&temparray,dimtot,0.0,covariance,fixedseed);
+   	   multivariateNormal(&temparray,dimtot,0.0,timestepcovariance,fixedseed);
    	   for(int i=0;i<dimtot;i++) noiseterms[i]=temparray[i];
 			xDelete<IssmDouble>(temparray);
@@ -76,5 +93,5 @@
 
 		/*Deal with the ARMA models*/
-		if(fields[j]==SMBarmaEnum || fields[j]==FrontalForcingsRignotarmaEnum || fields[j]==BasalforcingsDeepwaterMeltingRatearmaEnum){
+		if(fields[j]==SMBarmaEnum || fields[j]==FrontalForcingsRignotarmaEnum || fields[j]==BasalforcingsDeepwaterMeltingRatearmaEnum || fields[j]==FrontalForcingsSubglacialDischargearmaEnum || (fields[j]==FrictionWaterPressureEnum && ispwHydro)){
 			switch(fields[j]){
 				case SMBarmaEnum:
@@ -90,4 +107,12 @@
 					noiseenum_type = BasalforcingsDeepwaterMeltingRateNoiseEnum;
 					break;
+				case FrontalForcingsSubglacialDischargearmaEnum:
+					dimenum_type   = FrontalForcingsBasinIdEnum;
+					noiseenum_type = SubglacialdischargeARMANoiseEnum;
+					break;	
+				case FrictionWaterPressureEnum:
+					dimenum_type   = HydrologyBasinsIdEnum;
+					noiseenum_type = FrictionWaterPressureNoiseEnum;
+					break;	
 			}
 			for(Object* &object:femmodel->elements->objects){
@@ -106,4 +131,5 @@
 				case FrontalForcingsRignotarmaEnum:
 				case BasalforcingsDeepwaterMeltingRatearmaEnum:
+				case FrontalForcingsSubglacialDischargearmaEnum:
 					/*Already done above*/
 					break;
@@ -202,56 +228,8 @@
                   IssmDouble p_water[numvertices];
 						element->GetInputValue(&dimensionid,StochasticForcingDefaultIdEnum);
-						Gauss* gauss=element->NewGauss();
-						Friction* friction = new Friction(element);
-						for(int i=0;i<numvertices;i++){
-							gauss->GaussVertex(i);
-							p_water_deterministic[i] = friction->SubglacialWaterPressure(gauss);
-							p_water[i]               = p_water_deterministic[i] + noisefield[dimensionid]; 
-						}
-						element->AddInput(FrictionWaterPressureEnum,p_water,P1DGEnum);
-						delete gauss;
-						delete friction;
-					}
-					break;
-				case FrictionSchoofWaterPressureEnum:
-					/*Specify that WaterPressure is stochastic*/ 
-					femmodel->parameters->SetParam(true,StochasticForcingIsWaterPressureEnum);
-					for(Object* &object:femmodel->elements->objects){
-                  Element* element = xDynamicCast<Element*>(object);
-                  int numvertices  = element->GetNumberOfVertices();
-                  IssmDouble p_water_deterministic[numvertices];
-                  IssmDouble p_water[numvertices];
-						element->GetInputValue(&dimensionid,StochasticForcingDefaultIdEnum);
-						Gauss* gauss=element->NewGauss();
-						Friction* friction = new Friction(element);
-						for(int i=0;i<numvertices;i++){
-							gauss->GaussVertex(i);
-							p_water_deterministic[i] = friction->SubglacialWaterPressure(gauss);
-							p_water[i]               = p_water_deterministic[i] + noisefield[dimensionid]; 
-						}
-						element->AddInput(FrictionSchoofWaterPressureEnum,p_water,P1DGEnum);
-						delete gauss;
-						delete friction;
-					}
-					break;
-				case FrictionCoulombWaterPressureEnum:
-					/*Specify that WaterPressure is stochastic*/ 
-					femmodel->parameters->SetParam(true,StochasticForcingIsWaterPressureEnum);
-					for(Object* &object:femmodel->elements->objects){
-                  Element* element = xDynamicCast<Element*>(object);
-                  int numvertices  = element->GetNumberOfVertices();
-                  IssmDouble p_water_deterministic[numvertices];
-                  IssmDouble p_water[numvertices];
-						element->GetInputValue(&dimensionid,StochasticForcingDefaultIdEnum);
-						Gauss* gauss=element->NewGauss();
-						Friction* friction = new Friction(element);
-						for(int i=0;i<numvertices;i++){
-							gauss->GaussVertex(i);
-							p_water_deterministic[i] = friction->SubglacialWaterPressure(gauss);
-							p_water[i]               = p_water_deterministic[i] + noisefield[dimensionid]; 
-						}
-						element->AddInput(FrictionCoulombWaterPressureEnum,p_water,P1DGEnum);
-						delete gauss;
-						delete friction;
+						element->SubglacialWaterPressure(FrictionWaterPressureEnum);
+                  element->GetInputListOnVertices(&p_water_deterministic[0],FrictionWaterPressureEnum);
+                  for(int i=0;i<numvertices;i++) p_water[i] = p_water_deterministic[i] + noisefield[dimensionid];
+                  element->AddInput(FrictionWaterPressureEnum,p_water,P1DGEnum);
 					}
 					break;
@@ -268,4 +246,6 @@
    xDelete<int>(dimensions);
    xDelete<IssmDouble>(covariance);
+   xDelete<IssmDouble>(timecovariance);
+   xDelete<IssmDouble>(timestepcovariance);
    xDelete<IssmDouble>(noiseterms);
 }/*}}}*/
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp	(revision 28013)
@@ -240,5 +240,5 @@
 
 		// change in pure snow albedo due to soot loading
-		IssmDouble dac2 = max(0.04 - as2, pow(-c2,0.55)/(0.16 + 0.6*pow(S1,0.5) + (1.8*pow(c2,0.6))*pow(S2,-0.25)));
+		IssmDouble dac2 = max(0.04 - as2, pow(-c2,0.55)/(0.16 + 0.6*pow(S2,0.5) + (1.8*pow(c2,0.6))*pow(S2,-0.25)));
 
 		// determine the effective change due to finite depth and soot loading
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp	(revision 28012)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp	(revision 28013)
@@ -67,5 +67,4 @@
 			}
 
-			smb[v]=smb[v]/1000*rho_water/rho_ice;      // SMB in m/y ice
 		}  //end of the loop over the vertices
 
@@ -187,6 +186,6 @@
 	femmodel->parameters->FindParam(&arlagcoefs,&M,&N,SmbARMAarlagcoefsEnum);             _assert_(M==numbasins); _assert_(N==arorder);
    femmodel->parameters->FindParam(&malagcoefs,&M,&N,SmbARMAmalagcoefsEnum);             _assert_(M==numbasins); _assert_(N==maorder);
-   femmodel->parameters->FindParam(&lapserates,&M,&N,SmbLapseRatesEnum);                 _assert_(M==numbasins); _assert_(N==numelevbins);
-   femmodel->parameters->FindParam(&elevbins,&M,&N,SmbElevationBinsEnum);                _assert_(M==numbasins); _assert_(N==numelevbins-1);
+   femmodel->parameters->FindParam(&lapserates,&M,&N,SmbLapseRatesEnum);                 _assert_(M==numbasins); _assert_(N==numelevbins*12);
+   femmodel->parameters->FindParam(&elevbins,&M,&N,SmbElevationBinsEnum);                _assert_(M==numbasins); _assert_(N==(numelevbins-1)*12);
    femmodel->parameters->FindParam(&refelevation,&M,SmbRefElevationEnum);                _assert_(M==numbasins);
 
@@ -504,131 +503,9 @@
 
 }/*}}}*/
-void SmbDebrisMLx(FemModel* femmodel){/*{{{*/
-
-	//      The function is based on:
-	//      Evatt GW, Abrahams ID, Heil M, Mayer C, Kingslake J, Mitchell SL, et al. Glacial melt under a porous debris layer. Journal of Glaciology 61 (2015) 825–836, doi:10.3189/2
-	//      Constants/Values are taken from Mayer, Licciulli (2021): https://www.frontiersin.org/articles/10.3389/feart.2021.710276/full#B7
-	//      function taken from https://github.com/carlolic/DebrisExp/blob/main/USFs/USF_DebrisCoverage.f90
-
-	/*Intermediaries*/
-	// altitude gradients of the crucial parameters (radiation from Marty et al., TaAClimat; 2002)
-	IssmDouble LW=2.9;          // W/m^2 /100m                       2.9
-	IssmDouble SW=1.3;          // W/m^2 /100m                       1.3
-	IssmDouble HumidityG=0;     // % /100m         rough estimate
-	IssmDouble AirTemp=0.7;     // C /100m
-	IssmDouble WindSpeed=0.02;  // m/s /100m       rough estimate    0.2
-
-	// accumulation follows a linear increase above the ELA up to a plateau
-	IssmDouble AccG=0.1;                    // m w.e. /100m
-	IssmDouble AccMax=1.;                    // m w.e.
-	IssmDouble ReferenceElevation=2200.;     // m M&L
-	IssmDouble AblationDays=120.;            //
-
-	IssmDouble In=100.;                 // Wm^-2        incoming long wave
-	IssmDouble Q=500.;                  // Wm^-2        incoming short wave
-	IssmDouble K=0.585;                // Wm^-1K^-1    thermal conductivity          0.585
-	IssmDouble Qm=0.0012;              // kg m^-3      measured humiditiy level
-	IssmDouble Qh=0.006 ;              // kg m^-3      saturated humidity level
-	IssmDouble Tm=2.;                   // C            air temperature
-	IssmDouble Rhoaa=1.22;             // kgm^-3       air densitiy
-	IssmDouble Um=1.5;                 // ms^-1        measured wind speed
-	IssmDouble Xm=1.5;                 // ms^-1        measurement height
-        IssmDouble Xr=0.01;                // ms^-1        surface roughness             0.01
-        IssmDouble Alphad=0.07;            //              debris albedo                 0.07
-        IssmDouble Alphai=0.4;             //              ice ablbedo
-        IssmDouble Ustar=0.16;             // ms^-1        friction velocity             0.16
-        IssmDouble Ca=1000.;                // jkg^-1K^-1   specific heat capacity of air
-        IssmDouble Lm;//=3.34E+05;            // jkg^-1K^-1   latent heat of ice melt
-        IssmDouble Lv=2.50E+06;            // jkg^-1K^-1   latent heat of evaporation
-        IssmDouble Tf=273.;                 // K            water freeezing temperature
-        IssmDouble Eps=0.95;               //              thermal emissivity
-        IssmDouble Rhoi=900.;               // kgm^-3       ice density
-        IssmDouble Sigma=5.67E-08;         // Wm^-2K^-4    Stefan Boltzmann constant
-        IssmDouble Kstar=0.4;              //              von kármán constant
-        IssmDouble Gamma=180.;              // m^-1         wind speed attenuation        234
-	IssmDouble PhiD;//=0.005;              //              debris packing fraction       0.01
-	IssmDouble Humidity=0.2;           //              relative humidity
-
-	IssmDouble smb,yts,z,debris;
-	IssmDouble MassBalanceCmDayDebris,MassBalanceMYearDebris;
-
-	/*Get material parameters and constants */
-	//femmodel->parameters->FindParam(&Rhoi,MaterialsRhoIceEnum); // Note Carlo's model used as  benchmark was run with different densities for debris and FS
-	femmodel->parameters->FindParam(&Lm,MaterialsLatentheatEnum);
-	femmodel->parameters->FindParam(&yts,ConstantsYtsEnum); 
-	PhiD=0.;
-	//if(isdebris) femmodel->parameters->FindParam(&PhiD,DebrisPackingFractionEnum);
-
-	/* Loop over all the elements of this partition */
-	for(Object* & object : femmodel->elements->objects){
-		Element* element=xDynamicCast<Element*>(object);
-
-		/* Allocate all arrays */
-		int         numvertices=element->GetNumberOfVertices();
-		IssmDouble* surfacelist=xNew<IssmDouble>(numvertices);
-		IssmDouble* smb=xNew<IssmDouble>(numvertices);
-		IssmDouble* debriscover=xNew<IssmDouble>(numvertices);
-		element->GetInputListOnVertices(surfacelist,SurfaceEnum);
-
-		/* Get inputs */
-		element->GetInputListOnVertices(debriscover,DebrisThicknessEnum);
-
-		/*Loop over all vertices of element and calculate SMB as function of Debris Cover and z */
-		for(int v=0;v<numvertices;v++){
-
-                        /*Get vertex elevation */
-                        z=surfacelist[v];
-
-                        /* Get debris cover */
-                        debris=debriscover[v];
-
-                        /*IssmDouble dk=1e-5; // TODO make Alphad and Alphai a user input
-                        IssmDouble n=debris/dk;
-                        IssmDouble nmax=1000;
-                        IssmDouble Alphaeff;
-                        if(n>nmax){
-                                Alphaeff=Alphad;
-                        } else {
-                                Alphaeff=Alphai+n*(Alphad-Alphai)/nmax;
-                        }*/
-
-                        // M&L
-                       IssmDouble Alphaeff=Alphad;
-
-                        /* compute smb */
-                        for (int ismb=0;ismb<2;ismb++){
-                                if(ismb==0){
-                                        // calc a reference smb to identify accum and melt region; debris only develops in ablation area
-                                        debris=0.;
-                                }else{
-                                	// only in the meltregime debris develops
-                                        if(-MassBalanceCmDayDebris<0) debris=debriscover[v]; 
-                                }
-                                MassBalanceCmDayDebris=(((In-(z-ReferenceElevation)*LW/100.)-(Eps*Sigma*(Tf*Tf*Tf*Tf))+ 
-                                    (Q+(z-ReferenceElevation)*SW/100.)*(1.-Alphaeff)+ 
-                                    (Rhoaa*Ca*Ustar*Ustar)/((Um-(z-ReferenceElevation)* 
-                                    WindSpeed/100.)-Ustar*(2.-(exp(Gamma*Xr))))*(Tm-(z- 
-                                    ReferenceElevation)*AirTemp/100.))/((1-PhiD)*Rhoi*Lm)/(1.+ 
-                                    ((Rhoaa*Ca*Ustar*Ustar)/((Um-(z-ReferenceElevation)* 
-                                    WindSpeed/100.)-Ustar*(2.-(exp(Gamma*Xr))))+4.*Eps*Sigma*(Tf*Tf*Tf))/ 
-                                    K*debris)-(Lv*Ustar*Ustar*(Qh-(Qh*(Humidity-(z- 
-                                    ReferenceElevation)*HumidityG/100.)))*(exp(-Gamma*Xr)))/((1.-PhiD)* 
-                                    Rhoi*Lm*Ustar)/((((Um-(z-ReferenceElevation)*WindSpeed/100.) 
-                                    -2.*Ustar)*exp(-Gamma*Xr))/Ustar+exp(Gamma*debris)))*100.*24.*60.*60.;
-                        }
-
-                        /* account form ablation days, and convert to m/s */
-			MassBalanceMYearDebris=-MassBalanceCmDayDebris/100.*AblationDays/yts;
-
-			/*Update array accordingly*/
-			smb[v]=MassBalanceMYearDebris;
-		}
-
-		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
-		xDelete<IssmDouble>(surfacelist);
-		xDelete<IssmDouble>(smb);
-		xDelete<IssmDouble>(debriscover);
-	}
+void SmbDebrisEvattx(FemModel* femmodel){/*{{{*/
+        for(Object* & object : femmodel->elements->objects){
+                Element* element=xDynamicCast<Element*>(object);
+                element->SmbDebrisEvatt();
+        }
 }/*}}}*/
 void SmbGradientsComponentsx(FemModel* femmodel){/*{{{*/
@@ -641,9 +518,10 @@
 }/*}}}*/
 #ifdef _HAVE_SEMIC_
-void SmbSemicx(FemModel* femmodel){/*{{{*/
-
-	for(Object* & object : femmodel->elements->objects){
-		Element* element=xDynamicCast<Element*>(object);
-		element->SmbSemic();
+void SmbSemicx(FemModel* femmodel,int ismethod){/*{{{*/
+
+	for(Object* & object : femmodel->elements->objects){
+		Element* element=xDynamicCast<Element*>(object);
+		if (ismethod == 1) element->SmbSemicTransient(); // Inwoo's version.
+		else element->SmbSemic(); // original SmbSEMIC
 	}
 
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.h
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.h	(revision 28012)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.h	(revision 28013)
@@ -23,7 +23,7 @@
 void SmbMeltComponentsx(FemModel* femmodel);
 void SmbGradientsComponentsx(FemModel* femmodel);
-void SmbDebrisMLx(FemModel* femmodel);
+void SmbDebrisEvattx(FemModel* femmodel);
 /* SEMIC: */
-void SmbSemicx(FemModel* femmodel);
+void SmbSemicx(FemModel* femmodel, int ismethod);
 /*GEMB: */
 void       Gembx(FemModel* femmodel);
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/run_semic_transient.f90
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/run_semic_transient.f90	(revision 28013)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/run_semic_transient.f90	(revision 28013)
@@ -0,0 +1,248 @@
+subroutine run_semic_transient(nx, ntime, nloop, sf_in, rf_in, swd_in, lwd_in, wind_in, &
+      sp_in, rhoa_in, qq_in, tt_in, tsurf_in, qmr_in, &
+      tstic, &
+      hcrit, rcrit, &
+      mask, hice, hsnow, &
+      albedo, albedo_snow, &
+      alb_scheme, alb_smax, alb_smin, albi, albl, &
+      Tamp, &
+      tmin, tmax, tmid, mcrit, w_crit, tau_a, tau_f, afac, verbose, &
+      tsurf_out, smb_out, smbi_out, smbs_out, saccu_out, smelt_out,  refr_out, alb_out, & 
+      alb_snow_out,hsnow_out,hice_out,qmr_out) !{{{
+
+   use utils
+   use surface_physics
+   implicit none
+
+   ! declare surface physics class
+   type(surface_physics_class) :: surface
+   ! declare forcing class
+   !type(forc_class) :: forc
+   ! declare validation class
+   !type(vali_class) :: vali	! validation not needed here
+
+   integer, parameter:: dp=kind(0.d0)  !< define precision (machine specific)
+   integer :: i, k, n
+   integer :: nnx, nny
+   integer :: year=0
+   integer :: day =1 !< not used value.
+   integer, intent(in) :: nx, ntime, nloop   ! number of grid / number of times
+   logical, intent(in) :: verbose            ! verbosity
+   logical :: debug=.false.
+
+   ! forcing data    
+   ! input argument format array size (nx, ntime)...
+   double precision, intent(in), dimension(nx):: sf_in    ! snow fall rate [m/s]
+   double precision, intent(in), dimension(nx):: rf_in    ! rain fall rate [m/s]
+   double precision, intent(in), dimension(nx):: swd_in   ! downwelling shortwave radiation [W/m2]
+   double precision, intent(in), dimension(nx):: lwd_in   ! downwelling longwave radiation [W/m2]
+   double precision, intent(in), dimension(nx):: wind_in  ! surface wind speed [m/s]
+   double precision, intent(in), dimension(nx):: sp_in    ! surface pressure [Pa]
+   double precision, intent(in), dimension(nx):: rhoa_in  ! air density [kg/m3]
+   double precision, intent(in), dimension(nx):: qq_in    ! air specific humidity [kg/kg]
+   double precision, intent(in), dimension(nx):: tt_in    ! air temperature [K]
+
+   ! input data
+   double precision, intent(in) :: tstic  ! time step from ISSM [sec].
+
+   ! output data
+   ! Ice surface Temperature [K]
+   double precision, intent(out), dimension(nx):: tsurf_out    
+   ! surface mass balance=(Accu-Melt) [m/s]
+   double precision, intent(out), dimension(nx):: smb_out     
+   double precision, intent(out), dimension(nx):: smbi_out     ! SMB ice  [water equivalent m/s]
+   double precision, intent(out), dimension(nx):: smbs_out     ! SMB snow [water equivalent m/s]
+   double precision, intent(out), dimension(nx):: saccu_out    ! accumulation [m/s]
+   double precision, intent(out), dimension(nx):: smelt_out    ! ablation [m/s]
+   double precision, intent(out), dimension(nx):: refr_out     ! freezing [m/s]
+   double precision, intent(out), dimension(nx):: alb_out      ! grid-averaged albedo [no unit] 
+   double precision, intent(out), dimension(nx):: alb_snow_out 
+   double precision, intent(out), dimension(nx):: hice_out    
+   double precision, intent(out), dimension(nx):: hsnow_out   
+   double precision, intent(out), dimension(nx):: qmr_out     
+
+   double precision :: total_time, start, finish
+
+   ! set parameters
+   !character (len=256) :: name         ! not used(?)
+   !character (len=256) :: boundary(30) ! not used(?)
+   !character (len=256), intent(in) :: alb_scheme  !< name of albedo scheme
+   integer, intent(in)          :: alb_scheme
+   !integer :: n_ksub    
+   !double precision             :: ceff         !< surface heat heat capacity of snow/ice [J/W m2]
+   double precision, intent(in), dimension(nx):: albedo
+   double precision, intent(in), dimension(nx):: albedo_snow !< spatial..
+   double precision, intent(in), dimension(nx):: hsnow
+   double precision, intent(in), dimension(nx):: hice
+   double precision, intent(in), dimension(nx):: tsurf_in    !< input temperature [K]
+   double precision, intent(in), dimension(nx):: qmr_in 
+   double precision, intent(in), dimension(nx):: mask
+
+   double precision, intent(in) :: albi
+   double precision, intent(in) :: albl
+   double precision, intent(in) :: alb_smax
+   double precision, intent(in) :: alb_smin
+   double precision, intent(in) :: hcrit !< critical snow height for which grid cell is 50% snow covered [m]
+   double precision, intent(in) :: rcrit !< critical snow height fro which refreezing fraction is 50% [m]
+   double precision, intent(in) :: Tamp
+   !double precision    :: csh
+   !double precision    :: clh
+   double precision, intent(in) :: tmin
+   double precision, intent(in) :: tmax
+   !double precision    :: tsticsub
+   ! parameters for isba albedo scheme.
+   double precision, intent(in) :: tau_a  !< critical liquide water concent for "isba" albedo scheme [kg/m2]
+   double precision, intent(in) :: tau_f
+   double precision, intent(in) :: w_crit
+   double precision, intent(in) :: mcrit
+   double precision, intent(in) :: afac !< param
+   double precision, intent(in) :: tmid !< param for "alex" albedo parameterization [K]
+
+   if (debug) then
+      print*,'   ntime: ', ntime
+      print*,'   nx   : ', nx
+   end if
+
+   ! set vector length
+   surface%par%nx = nx
+
+   ! FIXME should be user input
+   !boundary = "" "" ""
+   if (debug) then
+      print*, "run_semic_transient: initialize parameters."
+   end if
+   surface%par%tstic = tstic      !< time step [s]
+   surface%par%ceff= 2.0e6_dp     !< surface heat capacity of snow/ice [J/K/m2]
+   surface%par%csh = 2.0e-3_dp    !< turbulent heat exchange coefficient 
+   surface%par%clh = 5.0e-4_dp    !< turbulent heat exchange coefficient [no unit]
+   surface%par%alb_smax = alb_smax !0.79_dp !< max snow albedo
+   surface%par%alb_smin = alb_smin !0.6_dp  !< min snow albedo
+   surface%par%albi = albi ! 0.41_dp     !< albedo for ice
+   surface%par%albl = albl ! 0.07_dp     !< albedo for land
+   surface%par%tmin = tmin ! -999_dp
+   surface%par%tmax = tmax ! 273.15_dp
+   surface%par%hcrit = hcrit !0.028_dp   !< critical snow height for which grid cell is 50 % snow colvered [m]
+   surface%par%rcrit = rcrit !0.85_dp    !< refreezing fraction is 50% [m]
+   surface%par%amp   = Tamp !3.0_dp   !< amplitude of diurnal cycle [K]
+   if (alb_scheme == 0) then
+      surface%par%alb_scheme="none"
+   else if (alb_scheme == 1) then
+      surface%par%alb_scheme = "slater"
+   else if (alb_scheme == 2) then
+      surface%par%alb_scheme = "denby"
+   else if (alb_scheme == 3) then
+      surface%par%alb_scheme = "isba"
+   else
+      print*, "ERROR: current albedo scheme is not available."
+      call exit(1)
+   end if 
+   surface%par%tau_a  = tau_a  !0.008_dp
+   surface%par%tau_f  = tau_f  !0.24_dp
+   surface%par%w_crit = w_crit !15.0_dp ! default value
+   surface%par%mcrit  = mcrit  !6.0e-8_dp
+   surface%par%n_ksub = 3      ! sub ...
+   ! snow albedo of alex
+   surface%par%afac   = afac
+   surface%par%tmid   = tmid
+   
+   ! initialize sub-daily time step tsticsub
+   surface%par%tsticsub = surface%par%tstic / dble(surface%par%n_ksub)
+
+   ! allocate necessary arrays for surface_physics module
+   call surface_alloc(surface%now,surface%par%nx)
+
+   ! initialise prognostic variables
+   if (debug) then
+      print*,"run_semic_transient: initialize variables."
+   end if
+   ! these values will be updated through "surface_energy_and_mass_balance" function.
+   surface%now%mask    (:) = mask       (:) ! 2.0_dp  !loi_mask(:nx)
+   if (debug) then
+      print*,"run_semic_transient: initialize variables: mask"
+   end if
+   surface%now%hsnow   (:) = hsnow      (:) ! initial snow height...
+   surface%now%hice    (:) = hice       (:) ! initial ice height..
+   surface%now%tsurf   (:) = tsurf_in   (:) !< initial ice surface temperature
+   surface%now%alb     (:) = albedo     (:) !< initial albedo for energy balance.
+   surface%now%alb_snow(:) = albedo_snow(:) !< initial albedo for ISBA albedo method.
+   if (debug) then
+      print*,"run_semic_transient: initialize variables. DONE."
+   end if
+
+   if (debug) then
+      !print*, "====== global variable =========="
+      !print*, "nloop          :", nloop
+      !print*, "nx             :", surface%par%nx
+      !print*, "======  parameters ======"
+      !print*, "csh            :", surface%par%csh
+      !print*, "clh            :", surface%par%clh
+      !print*, "albeo scheme   :", surface%par%alb_scheme
+      !print*, "albeo ice      :", surface%par%albi
+      !print*, "tstic          :", surface%par%tstic
+      !print*, "tsticsub       :", surface%par%tsticsub
+      !print*, "n_ksub         :", surface%par%n_ksub
+      print*, "====== inputs ========="
+      print*, "hsnow          :", hsnow
+      print*, "======  state variables ======"
+      print*, "hsnow          :", surface%now%hsnow
+      print*, "hice           :", surface%now%hice
+      print*, "albeo          :", surface%now%alb
+      print*, "albeo snow     :", surface%now%alb_snow
+      print*, "mask           :", surface%now%mask
+      print*, "tsurf          :", surface%now%tsurf
+      print*, "sf             :", sf_in
+   end if
+
+   ! define boundary conditions (not used, here!)
+   call surface_boundary_define(surface%bnd,surface%par%boundary)
+   !call print_boundary_opt(surface%bnd)
+
+   ! input with single value
+   do k =1,nloop
+      do i =1,ntime
+         if (debug) then
+            print*,"run_semic_transient: forcing data: ntime = ", i
+         end if
+         surface%now%sf   = sf_in  !(:,i)
+         surface%now%rf   = rf_in  !(:,i)
+         surface%now%sp   = sp_in  !(:,i)
+         surface%now%lwd  = lwd_in !(:,i)
+         surface%now%swd  = swd_in !(:,i)
+         surface%now%wind = wind_in!(:,i)
+         surface%now%rhoa = rhoa_in!(:,i)
+         surface%now%t2m  = tt_in  !(:,i)
+         surface%now%qq   = qq_in  !(:,i)
+         ! qmr_res is used to "energy_balance" in semic.
+         surface%now%qmr_res = qmr_in
+
+         ! calculate prognostic and diagnsotic variables
+         call surface_energy_and_mass_balance(surface%now,surface%par,surface%bnd,i,year)
+         
+         if (debug) then
+            print*,"done..."
+         end if
+         if (k == nloop) then
+            tsurf_out         = surface%now%tsurf
+            ! melt - potential surface melt [m/s]
+            ! smb = SMB_ice + SMB_snow
+            ! smbi  - SMB_ice  (water equivalent m/sec)
+            ! smbs  - SMB_snow (water equivalent m/sec)
+            smb_out           =surface%now%smb      ! smb = smb_snow + smb_ice
+            smbi_out          =surface%now%smb_ice  ! Csi (snow>ice) - melted_ice + refrezon_rain.
+            smbs_out          =surface%now%smb_snow ! smb_snow = snowfall - sublimiation - melted_snow + refrozen_snow
+            saccu_out         =surface%now%acc      ! acc      = snowfall - sublimiation - refreezing 
+            smelt_out         =surface%now%melt     ! potential surface melt = melt_ice + melt_snow
+            refr_out          =surface%now%refr     ! refreezing values. [m/sec]
+            alb_out           =surface%now%alb
+            alb_snow_out      =surface%now%alb_snow
+            hsnow_out         =surface%now%hsnow
+            hice_out          =surface%now%hice
+            qmr_out           =surface%now%qmr_res
+         end if
+      end do
+   end do
+
+   ! de-allocate surface_physics arrays
+   call surface_dealloc(surface%now)
+
+end subroutine run_semic_transient ! }}}
Index: /issm/trunk/src/c/shared/Elements/PrintArrays.cpp
===================================================================
--- /issm/trunk/src/c/shared/Elements/PrintArrays.cpp	(revision 28012)
+++ /issm/trunk/src/c/shared/Elements/PrintArrays.cpp	(revision 28013)
@@ -84,2 +84,24 @@
 	}
 }
+
+void InversionStatsHeader(int NJ){
+	//https://cboard.cprogramming.com/c-programming/151930-ascii-table-border.html
+	int width = max(10*NJ,24);
+	_printf0_("\n");
+	_printf0_("┌────┬─────────────────┬────────────┬"); for(int i=0;i<width;i++){_printf0_("─");} _printf0_("┐\n");
+	_printf0_("│Iter│  Cost function  │ Grad. norm │  List of contributions "); for(int i=0;i<width-24;i++){_printf0_(" ");} _printf0_("│\n");
+	_printf0_("├────┼─────────────────┼────────────┼"); for(int i=0;i<width;i++){_printf0_("─");} _printf0_("┤\n");
+}
+void InversionStatsIter(int iter,double J, double Gnorm, double* Jlist, int N){
+	int width = max(10*N,24);
+	_printf0_("│"<<setw(3)<<iter<<" ");
+	_printf0_("│ f(x)="<<setw(10)<<setprecision(5)<<J<<" ");
+	_printf0_("│   "<<setw(8)<<setprecision(3)<<Gnorm<<" │");
+	for(int i=0;i<N;i++) _printf0_(" "<<setw(9)<<setprecision(4)<<Jlist[i]);
+	for(int i=0;i<width-10*N;i++){_printf0_(" ");}
+	_printf0_("│\n");
+}
+void InversionStatsFooter(int NJ){
+	int width = max(10*NJ,24);
+	_printf0_("└────┴─────────────────┴────────────┴");for(int i=0;i<width;i++){_printf0_("─");} _printf0_("┘\n");
+}
Index: /issm/trunk/src/c/shared/Elements/elements.h
===================================================================
--- /issm/trunk/src/c/shared/Elements/elements.h	(revision 28012)
+++ /issm/trunk/src/c/shared/Elements/elements.h	(revision 28013)
@@ -59,3 +59,6 @@
 void printsparsity(IssmPDouble* array,int lines,int cols=1);
 void printbinary(int n);
+void InversionStatsHeader(int NJ);
+void InversionStatsIter(int iter,double J, double Gnorm, double* Jlist, int N);
+void InversionStatsFooter(int NJ);
 #endif //ifndef _SHARED_ELEMENTS_H_
Index: /issm/trunk/src/c/shared/Enum/Enum.vim
===================================================================
--- /issm/trunk/src/c/shared/Enum/Enum.vim	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/Enum.vim	(revision 28013)
@@ -114,4 +114,6 @@
 syn keyword cConstant BasalforcingsUpperwaterElevationEnum
 syn keyword cConstant BasalforcingsUpperwaterMeltingRateEnum
+syn keyword cConstant CalvingADStressThresholdFloatingiceEnum
+syn keyword cConstant CalvingADStressThresholdGroundediceEnum
 syn keyword cConstant CalvingCrevasseDepthEnum
 syn keyword cConstant CalvingCrevasseThresholdEnum
@@ -131,4 +133,5 @@
 syn keyword cConstant CalvingVelUpperboundEnum
 syn keyword cConstant CalvingRcEnum
+syn keyword cConstant CalvingNumberofBasinsEnum
 syn keyword cConstant ConfigurationTypeEnum
 syn keyword cConstant ConstantsGEnum
@@ -209,5 +212,7 @@
 syn keyword cConstant FrictionGammaEnum
 syn keyword cConstant FrictionLawEnum
+syn keyword cConstant FrictionLinearizeEnum
 syn keyword cConstant FrictionPseudoplasticityExponentEnum
+syn keyword cConstant FrictionU0Enum
 syn keyword cConstant FrictionThresholdSpeedEnum
 syn keyword cConstant FrictionVoidRatioEnum
@@ -221,4 +226,5 @@
 syn keyword cConstant FrontalForcingsARMAmonthtrendsEnum
 syn keyword cConstant FrontalForcingsARMApolyparamsEnum
+syn keyword cConstant FrontalForcingsIsDischargeARMAEnum
 syn keyword cConstant FrontalForcingsNumberofBasinsEnum
 syn keyword cConstant FrontalForcingsNumberofBreaksEnum
@@ -228,4 +234,14 @@
 syn keyword cConstant FrontalForcingsARMAarlagcoefsEnum
 syn keyword cConstant FrontalForcingsARMAmalagcoefsEnum
+syn keyword cConstant FrontalForcingsSdarlagcoefsEnum
+syn keyword cConstant FrontalForcingsSdARMATimestepEnum
+syn keyword cConstant FrontalForcingsSdarOrderEnum
+syn keyword cConstant FrontalForcingsSddatebreaksEnum
+syn keyword cConstant FrontalForcingsSdmalagcoefsEnum
+syn keyword cConstant FrontalForcingsSdmaOrderEnum
+syn keyword cConstant FrontalForcingsSdMonthlyFracEnum
+syn keyword cConstant FrontalForcingsSdNumberofBreaksEnum
+syn keyword cConstant FrontalForcingsSdNumberofParamsEnum
+syn keyword cConstant FrontalForcingsSdpolyparamsEnum
 syn keyword cConstant GrdModelEnum
 syn keyword cConstant GroundinglineFrictionInterpolationEnum
@@ -234,17 +250,34 @@
 syn keyword cConstant GroundinglineNumRequestedOutputsEnum
 syn keyword cConstant GroundinglineRequestedOutputsEnum
+syn keyword cConstant HydrologyarmaarOrderEnum
+syn keyword cConstant HydrologyarmaarlagcoefsEnum
+syn keyword cConstant HydrologyarmadatebreaksEnum
+syn keyword cConstant HydrologyarmamalagcoefsEnum
+syn keyword cConstant HydrologyarmamaOrderEnum
+syn keyword cConstant HydrologyarmaMonthlyFactorsEnum
+syn keyword cConstant HydrologyarmaNumBreaksEnum
+syn keyword cConstant HydrologyarmaNumParamsEnum
+syn keyword cConstant HydrologyarmapolyparamsEnum
+syn keyword cConstant HydrologyarmaTimestepEnum
 syn keyword cConstant HydrologyAveragingEnum
+syn keyword cConstant HydrologyChannelAlphaEnum
+syn keyword cConstant HydrologyChannelBetaEnum
 syn keyword cConstant HydrologyCavitySpacingEnum
-syn keyword cConstant HydrologyChannelConductivityEnum
 syn keyword cConstant HydrologyChannelSheetWidthEnum
 syn keyword cConstant HydrologyEnglacialVoidRatioEnum
 syn keyword cConstant HydrologyIschannelsEnum
+syn keyword cConstant HydrologyIsTransitionEnum
+syn keyword cConstant HydrologyIsWaterPressureArmaEnum
 syn keyword cConstant HydrologyMeltFlagEnum
 syn keyword cConstant HydrologyModelEnum
+syn keyword cConstant HydrologyNumBasinsEnum
 syn keyword cConstant HydrologyNumRequestedOutputsEnum
+syn keyword cConstant HydrologyOmegaEnum
 syn keyword cConstant HydrologyPressureMeltCoefficientEnum
 syn keyword cConstant HydrologyRelaxationEnum
 syn keyword cConstant HydrologyRequestedOutputsEnum
 syn keyword cConstant HydrologySedimentKmaxEnum
+syn keyword cConstant HydrologySheetAlphaEnum
+syn keyword cConstant HydrologySheetBetaEnum
 syn keyword cConstant HydrologyStepsPerStepEnum
 syn keyword cConstant HydrologyStorageEnum
@@ -288,4 +321,5 @@
 syn keyword cConstant InversionCostFunctionsEnum
 syn keyword cConstant InversionDxminEnum
+syn keyword cConstant InversionDfminFracEnum
 syn keyword cConstant InversionGatolEnum
 syn keyword cConstant InversionGradientScalingEnum
@@ -301,4 +335,5 @@
 syn keyword cConstant InversionNumCostFunctionsEnum
 syn keyword cConstant InversionStepThresholdEnum
+syn keyword cConstant InversionStopFlagEnum
 syn keyword cConstant InversionTypeEnum
 syn keyword cConstant IvinsEnum
@@ -478,5 +513,7 @@
 syn keyword cConstant StochasticForcingNoisetermsEnum
 syn keyword cConstant StochasticForcingNumFieldsEnum
+syn keyword cConstant StochasticForcingNumTimesCovarianceEnum
 syn keyword cConstant StochasticForcingRandomflagEnum
+syn keyword cConstant StochasticForcingTimeCovarianceEnum
 syn keyword cConstant StochasticForcingTimestepEnum
 syn keyword cConstant SolidearthSettingsReltolEnum
@@ -499,9 +536,25 @@
 syn keyword cConstant SmbAccurefEnum
 syn keyword cConstant SmbAdThreshEnum
+syn keyword cConstant SmbAlbedoSchemeEnum
+syn keyword cConstant SmbAlbedoSnowMaxEnum
+syn keyword cConstant SmbAlbedoSnowMinEnum
+syn keyword cConstant SmbAlbedoIceEnum
+syn keyword cConstant SmbAlbedoLandEnum
 syn keyword cConstant SmbARMATimestepEnum
 syn keyword cConstant SmbARMAarOrderEnum
+syn keyword cConstant SmbARMAarlagcoefsEnum
+syn keyword cConstant SmbARMAdatebreaksEnum
 syn keyword cConstant SmbARMAmaOrderEnum
+syn keyword cConstant SmbARMAmalagcoefsEnum
+syn keyword cConstant SmbARMApolyparamsEnum
 syn keyword cConstant SmbAveragingEnum
+syn keyword cConstant SmbDebrisalbedoEnum
+syn keyword cConstant SmbIcealbedoEnum
+syn keyword cConstant SmbSnowalbedoEnum
+syn keyword cConstant SmbDebrisIsAndersonEnum
+syn keyword cConstant SmbDebrisIsCryokarstEnum
+syn keyword cConstant SmbDebrisAndersonD0Enum
 syn keyword cConstant SmbDesfacEnum
+syn keyword cConstant SmbDesfacElevEnum
 syn keyword cConstant SmbDpermilEnum
 syn keyword cConstant SmbDsnowIdxEnum
@@ -515,4 +568,5 @@
 syn keyword cConstant SmbEIdxEnum
 syn keyword cConstant SmbFEnum
+syn keyword cConstant SmbHumiditygradEnum
 syn keyword cConstant SmbInitDensityScalingEnum
 syn keyword cConstant SmbIsaccumulationEnum
@@ -535,4 +589,5 @@
 syn keyword cConstant SmbKEnum
 syn keyword cConstant SmbLapseRatesEnum
+syn keyword cConstant SmbLWgradEnum
 syn keyword cConstant SmbNumBasinsEnum
 syn keyword cConstant SmbNumBreaksEnum
@@ -541,8 +596,4 @@
 syn keyword cConstant SmbNumRequestedOutputsEnum
 syn keyword cConstant SmbPfacEnum
-syn keyword cConstant SmbARMAarlagcoefsEnum
-syn keyword cConstant SmbARMAdatebreaksEnum
-syn keyword cConstant SmbARMAmalagcoefsEnum
-syn keyword cConstant SmbARMApolyparamsEnum
 syn keyword cConstant SmbRdlEnum
 syn keyword cConstant SmbRefElevationEnum
@@ -554,6 +605,18 @@
 syn keyword cConstant SmbRunoffrefEnum
 syn keyword cConstant SmbSealevEnum
+syn keyword cConstant SmbSemicMethodEnum
+syn keyword cConstant SmbSemicHcritEnum
+syn keyword cConstant SmbSemicRcritEnum
+syn keyword cConstant SmbSemicWcritEnum
+syn keyword cConstant SmbSemicMcritEnum
+syn keyword cConstant SmbSemicAfacEnum
+syn keyword cConstant SmbSemicTauAEnum
+syn keyword cConstant SmbSemicTauFEnum
+syn keyword cConstant SmbSemicTminEnum
+syn keyword cConstant SmbSemicTmidEnum
+syn keyword cConstant SmbSemicTmaxEnum
 syn keyword cConstant SmbStepsPerStepEnum
 syn keyword cConstant SmbSwIdxEnum
+syn keyword cConstant SmbSWgradEnum
 syn keyword cConstant SmbT0dryEnum
 syn keyword cConstant SmbT0wetEnum
@@ -564,4 +627,5 @@
 syn keyword cConstant SmbTemperaturesReconstructedYearsEnum
 syn keyword cConstant SmbPrecipitationsReconstructedYearsEnum
+syn keyword cConstant SmbWindspeedgradEnum
 syn keyword cConstant SmoothThicknessMultiplierEnum
 syn keyword cConstant SolutionTypeEnum
@@ -666,6 +730,8 @@
 syn keyword cConstant BasalforcingsDeepwaterMeltingRateValuesMovingaverageEnum
 syn keyword cConstant BasalforcingsFloatingiceMeltingRateEnum
+syn keyword cConstant BasalforcingsFloatingiceMeltingRateObsEnum
 syn keyword cConstant BasalforcingsGeothermalfluxEnum
 syn keyword cConstant BasalforcingsGroundediceMeltingRateEnum
+syn keyword cConstant BasalforcingsGroundediceMeltingRateObsEnum
 syn keyword cConstant BasalforcingsLinearBasinIdEnum
 syn keyword cConstant BasalforcingsPerturbationMeltingRateEnum
@@ -709,4 +775,5 @@
 syn keyword cConstant BottomPressureEnum
 syn keyword cConstant BottomPressureOldEnum
+syn keyword cConstant CalvingBasinIdEnum
 syn keyword cConstant CalvingCalvingrateEnum
 syn keyword cConstant CalvingHabFractionEnum
@@ -779,4 +846,5 @@
 syn keyword cConstant EtaDiffEnum
 syn keyword cConstant FlowequationBorderFSEnum
+syn keyword cConstant FrictionAlpha2Enum
 syn keyword cConstant FrictionAsEnum
 syn keyword cConstant FrictionCEnum
@@ -784,6 +852,6 @@
 syn keyword cConstant FrictionCoefficientEnum
 syn keyword cConstant FrictionCoefficientcoulombEnum
-syn keyword cConstant FrictionCoulombWaterPressureEnum
 syn keyword cConstant FrictionEffectivePressureEnum
+syn keyword cConstant FrictionKEnum
 syn keyword cConstant FrictionMEnum
 syn keyword cConstant FrictionPEnum
@@ -792,9 +860,10 @@
 syn keyword cConstant FrictionSedimentCompressibilityCoefficientEnum
 syn keyword cConstant FrictionTillFrictionAngleEnum
-syn keyword cConstant FrictionSchoofWaterPressureEnum
 syn keyword cConstant FrictionWaterLayerEnum
 syn keyword cConstant FrictionWaterPressureEnum
+syn keyword cConstant FrictionWaterPressureNoiseEnum
 syn keyword cConstant FrictionfEnum
 syn keyword cConstant FrontalForcingsBasinIdEnum
+syn keyword cConstant FrontalForcingsSubglacialDischargearmaEnum
 syn keyword cConstant FrontalForcingsSubglacialDischargeEnum
 syn keyword cConstant GeometryHydrostaticRatioEnum
@@ -808,6 +877,8 @@
 syn keyword cConstant HydraulicPotentialOldEnum
 syn keyword cConstant HydrologyBasalFluxEnum
+syn keyword cConstant HydrologyBasinsIdEnum
 syn keyword cConstant HydrologyBumpHeightEnum
 syn keyword cConstant HydrologyBumpSpacingEnum
+syn keyword cConstant HydrologyChannelConductivityEnum
 syn keyword cConstant HydrologydcBasalMoulinInputEnum
 syn keyword cConstant HydrologydcEplThicknessEnum
@@ -842,4 +913,5 @@
 syn keyword cConstant HydrologyWaterVyEnum
 syn keyword cConstant HydrologyMaskNodeActivationEnum
+syn keyword cConstant DebrisMaskNodeActivationEnum
 syn keyword cConstant IceEnum
 syn keyword cConstant IceMaskNodeActivationEnum
@@ -982,4 +1054,8 @@
 syn keyword cConstant SmbAccumulatedRefreezeEnum
 syn keyword cConstant SmbAccumulatedRunoffEnum
+syn keyword cConstant SmbAlbedoEnum
+syn keyword cConstant SmbAlbedoInitEnum
+syn keyword cConstant SmbAlbedoSnowEnum
+syn keyword cConstant SmbAlbedoSnowInitEnum
 syn keyword cConstant SmbAEnum
 syn keyword cConstant SmbAdiffEnum
@@ -1029,10 +1105,18 @@
 syn keyword cConstant SmbGspEnum
 syn keyword cConstant SmbGspiniEnum
+syn keyword cConstant SmbHIceEnum
+syn keyword cConstant SmbHIceInitEnum
+syn keyword cConstant SmbHSnowEnum
+syn keyword cConstant SmbHSnowInitEnum
 syn keyword cConstant SmbHrefEnum
 syn keyword cConstant SmbIsInitializedEnum
 syn keyword cConstant SmbMAddEnum
 syn keyword cConstant SmbMassBalanceEnum
+syn keyword cConstant SmbMassBalanceSnowEnum
+syn keyword cConstant SmbMassBalanceIceEnum
+syn keyword cConstant SmbMassBalanceSemicEnum
 syn keyword cConstant SmbMassBalanceSubstepEnum
 syn keyword cConstant SmbMassBalanceTransientEnum
+syn keyword cConstant SmbMaskEnum
 syn keyword cConstant SmbMeanLHFEnum
 syn keyword cConstant SmbMeanSHFEnum
@@ -1040,4 +1124,8 @@
 syn keyword cConstant SmbMeltEnum
 syn keyword cConstant SmbMonthlytemperaturesEnum
+syn keyword cConstant SmbMonthlydsradiationEnum
+syn keyword cConstant SmbMonthlydlradiationEnum
+syn keyword cConstant SmbMonthlywindspeedEnum
+syn keyword cConstant SmbMonthlyairhumidityEnum
 syn keyword cConstant SmbMSurfEnum
 syn keyword cConstant SmbNetLWEnum
@@ -1049,4 +1137,8 @@
 syn keyword cConstant SmbPrecipitationEnum
 syn keyword cConstant SmbPrecipitationsAnomalyEnum
+syn keyword cConstant SmbDsradiationAnomalyEnum
+syn keyword cConstant SmbDlradiationAnomalyEnum
+syn keyword cConstant SmbWindspeedAnomalyEnum
+syn keyword cConstant SmbAirhumidityAnomalyEnum
 syn keyword cConstant SmbPrecipitationsLgmEnum
 syn keyword cConstant SmbPrecipitationsPresentdayEnum
@@ -1062,10 +1154,16 @@
 syn keyword cConstant SmbS0pEnum
 syn keyword cConstant SmbS0tEnum
+syn keyword cConstant SmbSemicQmrEnum
+syn keyword cConstant SmbSemicQmrInitEnum
 syn keyword cConstant SmbSizeiniEnum
 syn keyword cConstant SmbSmbCorrEnum
 syn keyword cConstant SmbSmbrefEnum
 syn keyword cConstant SmbSzaValueEnum
+syn keyword cConstant SmbSummerMeltEnum
+syn keyword cConstant SmbSummerAlbedoEnum
+syn keyword cConstant SmbSnowheightEnum
 syn keyword cConstant SmbTEnum
 syn keyword cConstant SmbTaEnum
+syn keyword cConstant SmbTampEnum
 syn keyword cConstant SmbTeValueEnum
 syn keyword cConstant SmbTemperaturesAnomalyEnum
@@ -1109,4 +1207,7 @@
 syn keyword cConstant StressTensoryzEnum
 syn keyword cConstant StressTensorzzEnum
+syn keyword cConstant SubglacialdischargeARMANoiseEnum
+syn keyword cConstant SubglacialdischargeValuesAutoregressionEnum
+syn keyword cConstant SubglacialdischargeValuesMovingaverageEnum
 syn keyword cConstant SurfaceAbsMisfitEnum
 syn keyword cConstant SurfaceAbsVelMisfitEnum
@@ -1170,5 +1271,11 @@
 syn keyword cConstant WaterfractionEnum
 syn keyword cConstant WaterheightEnum
+syn keyword cConstant WaterPressureArmaPerturbationEnum
+syn keyword cConstant WaterPressureValuesAutoregressionEnum
+syn keyword cConstant WaterPressureValuesMovingaverageEnum
 syn keyword cConstant WeightsLevelsetObservationEnum
+syn keyword cConstant WeightsMeltObservationEnum
+syn keyword cConstant WeightsVxObservationEnum
+syn keyword cConstant WeightsVyObservationEnum
 syn keyword cConstant WeightsSurfaceObservationEnum
 syn keyword cConstant OldAccumulatedDeltaBottomPressureEnum
@@ -1313,9 +1420,15 @@
 syn keyword cConstant CalvingTestEnum
 syn keyword cConstant CalvingParameterizationEnum
+syn keyword cConstant CalvingCalvingMIPEnum
 syn keyword cConstant CalvingVonmisesEnum
+syn keyword cConstant CalvingVonmisesADEnum
 syn keyword cConstant CalvingPollardEnum
 syn keyword cConstant CfdragcoeffabsgradEnum
+syn keyword cConstant CfdragcoeffabsgradtransientEnum
+syn keyword cConstant CfrheologybbarabsgradEnum
+syn keyword cConstant CfrheologybbarabsgradtransientEnum
 syn keyword cConstant CfsurfacelogvelEnum
 syn keyword cConstant CfsurfacesquareEnum
+syn keyword cConstant CfsurfacesquaretransientEnum
 syn keyword cConstant CflevelsetmisfitEnum
 syn keyword cConstant ChannelEnum
@@ -1334,4 +1447,5 @@
 syn keyword cConstant ControlInputMinsEnum
 syn keyword cConstant ControlInputValuesEnum
+syn keyword cConstant ControlParamEnum
 syn keyword cConstant CrouzeixRaviartEnum
 syn keyword cConstant CuffeyEnum
@@ -1411,4 +1525,6 @@
 syn keyword cConstant HOFSApproximationEnum
 syn keyword cConstant HookEnum
+syn keyword cConstant HydrologyArmapwAnalysisEnum
+syn keyword cConstant HydrologyarmapwEnum
 syn keyword cConstant HydrologyDCEfficientAnalysisEnum
 syn keyword cConstant HydrologyDCInefficientAnalysisEnum
@@ -1568,5 +1684,5 @@
 syn keyword cConstant SMBarmaEnum
 syn keyword cConstant SMBcomponentsEnum
-syn keyword cConstant SMBdebrisMLEnum
+syn keyword cConstant SMBdebrisEvattEnum
 syn keyword cConstant SMBd18opddEnum
 syn keyword cConstant SMBforcingEnum
@@ -1640,4 +1756,6 @@
 syn keyword cConstant TotalSmbEnum
 syn keyword cConstant TotalSmbScaledEnum
+syn keyword cConstant TotalSmbRefreezeEnum
+syn keyword cConstant TotalSmbMeltEnum
 syn keyword cConstant TransientArrayParamEnum
 syn keyword cConstant TransientInputEnum
@@ -1686,7 +1804,11 @@
 syn keyword cType BoolParam
 syn keyword cType Cfdragcoeffabsgrad
+syn keyword cType Cfdragcoeffabsgradtransient
 syn keyword cType Cflevelsetmisfit
+syn keyword cType Cfrheologybbarabsgrad
+syn keyword cType Cfrheologybbarabsgradtransient
 syn keyword cType Cfsurfacelogvel
 syn keyword cType Cfsurfacesquare
+syn keyword cType Cfsurfacesquaretransient
 syn keyword cType Channel
 syn keyword cType classes
@@ -1696,4 +1818,5 @@
 syn keyword cType Contours
 syn keyword cType ControlInput
+syn keyword cType ControlParam
 syn keyword cType Covertree
 syn keyword cType DatasetInput
@@ -1826,4 +1949,5 @@
 syn keyword cType FreeSurfaceTopAnalysis
 syn keyword cType GLheightadvectionAnalysis
+syn keyword cType HydrologyArmapwAnalysis
 syn keyword cType HydrologyDCEfficientAnalysis
 syn keyword cType HydrologyDCInefficientAnalysis
Index: /issm/trunk/src/c/shared/Enum/EnumDefinitions.h
===================================================================
--- /issm/trunk/src/c/shared/Enum/EnumDefinitions.h	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/EnumDefinitions.h	(revision 28013)
@@ -108,4 +108,6 @@
 	BasalforcingsUpperwaterElevationEnum,
 	BasalforcingsUpperwaterMeltingRateEnum,
+	CalvingADStressThresholdFloatingiceEnum,
+	CalvingADStressThresholdGroundediceEnum,
 	CalvingCrevasseDepthEnum,
 	CalvingCrevasseThresholdEnum,
@@ -125,4 +127,5 @@
 	CalvingVelUpperboundEnum,
 	CalvingRcEnum,
+	CalvingNumberofBasinsEnum,
 	ConfigurationTypeEnum,
 	ConstantsGEnum,
@@ -203,5 +206,7 @@
 	FrictionGammaEnum,
 	FrictionLawEnum,
+	FrictionLinearizeEnum,
 	FrictionPseudoplasticityExponentEnum,
+	FrictionU0Enum,
 	FrictionThresholdSpeedEnum,
 	FrictionVoidRatioEnum,
@@ -215,4 +220,5 @@
    FrontalForcingsARMAmonthtrendsEnum,
    FrontalForcingsARMApolyparamsEnum,
+	FrontalForcingsIsDischargeARMAEnum,
 	FrontalForcingsNumberofBasinsEnum,
 	FrontalForcingsNumberofBreaksEnum,
@@ -222,4 +228,14 @@
    FrontalForcingsARMAarlagcoefsEnum,
    FrontalForcingsARMAmalagcoefsEnum,
+   FrontalForcingsSdarlagcoefsEnum,
+	FrontalForcingsSdARMATimestepEnum,
+   FrontalForcingsSdarOrderEnum,
+   FrontalForcingsSddatebreaksEnum,
+   FrontalForcingsSdmalagcoefsEnum,
+   FrontalForcingsSdmaOrderEnum,
+   FrontalForcingsSdMonthlyFracEnum,
+	FrontalForcingsSdNumberofBreaksEnum,
+	FrontalForcingsSdNumberofParamsEnum,
+   FrontalForcingsSdpolyparamsEnum,
 	GrdModelEnum,
 	GroundinglineFrictionInterpolationEnum,
@@ -228,17 +244,34 @@
 	GroundinglineNumRequestedOutputsEnum,
 	GroundinglineRequestedOutputsEnum,
+	HydrologyarmaarOrderEnum,
+   HydrologyarmaarlagcoefsEnum,
+   HydrologyarmadatebreaksEnum,
+   HydrologyarmamalagcoefsEnum,
+   HydrologyarmamaOrderEnum,
+   HydrologyarmaMonthlyFactorsEnum,
+   HydrologyarmaNumBreaksEnum,
+   HydrologyarmaNumParamsEnum,
+   HydrologyarmapolyparamsEnum,
+   HydrologyarmaTimestepEnum,
 	HydrologyAveragingEnum,
-	HydrologyCavitySpacingEnum,
-	HydrologyChannelConductivityEnum,
+	HydrologyChannelAlphaEnum,
+	HydrologyChannelBetaEnum,
+	HydrologyCavitySpacingEnum,	
 	HydrologyChannelSheetWidthEnum,
 	HydrologyEnglacialVoidRatioEnum,
 	HydrologyIschannelsEnum,
+	HydrologyIsTransitionEnum,
+	HydrologyIsWaterPressureArmaEnum,
 	HydrologyMeltFlagEnum,
 	HydrologyModelEnum,
+	HydrologyNumBasinsEnum,
 	HydrologyNumRequestedOutputsEnum,
+	HydrologyOmegaEnum,
 	HydrologyPressureMeltCoefficientEnum,
 	HydrologyRelaxationEnum,
 	HydrologyRequestedOutputsEnum,
 	HydrologySedimentKmaxEnum,
+	HydrologySheetAlphaEnum,
+	HydrologySheetBetaEnum,
 	HydrologyStepsPerStepEnum,
 	HydrologyStorageEnum,
@@ -282,4 +315,5 @@
 	InversionCostFunctionsEnum,
 	InversionDxminEnum,
+	InversionDfminFracEnum,
 	InversionGatolEnum,
 	InversionGradientScalingEnum,
@@ -295,4 +329,5 @@
 	InversionNumCostFunctionsEnum,
 	InversionStepThresholdEnum,
+	InversionStopFlagEnum,
 	InversionTypeEnum,
 	IvinsEnum,
@@ -472,5 +507,7 @@
 	StochasticForcingNoisetermsEnum,
 	StochasticForcingNumFieldsEnum,
+	StochasticForcingNumTimesCovarianceEnum,
 	StochasticForcingRandomflagEnum,
+	StochasticForcingTimeCovarianceEnum,
 	StochasticForcingTimestepEnum,
 	SolidearthSettingsReltolEnum,
@@ -493,9 +530,25 @@
 	SmbAccurefEnum,
 	SmbAdThreshEnum,
+	SmbAlbedoSchemeEnum,
+	SmbAlbedoSnowMaxEnum,
+	SmbAlbedoSnowMinEnum,
+	SmbAlbedoIceEnum,
+	SmbAlbedoLandEnum,
 	SmbARMATimestepEnum,
    SmbARMAarOrderEnum,
+	SmbARMAarlagcoefsEnum,
+	SmbARMAdatebreaksEnum,
    SmbARMAmaOrderEnum,
-	SmbAveragingEnum,
+	SmbARMAmalagcoefsEnum,
+	SmbARMApolyparamsEnum,
+   SmbAveragingEnum,
+	SmbDebrisalbedoEnum,
+	SmbIcealbedoEnum,
+	SmbSnowalbedoEnum,
+        SmbDebrisIsAndersonEnum,
+        SmbDebrisIsCryokarstEnum,
+        SmbDebrisAndersonD0Enum,
 	SmbDesfacEnum,
+	SmbDesfacElevEnum,
 	SmbDpermilEnum,
 	SmbDsnowIdxEnum,
@@ -509,4 +562,5 @@
 	SmbEIdxEnum,
 	SmbFEnum,
+	SmbHumiditygradEnum,
 	SmbInitDensityScalingEnum,
 	SmbIsaccumulationEnum,
@@ -529,4 +583,5 @@
 	SmbKEnum,
    SmbLapseRatesEnum,
+   	SmbLWgradEnum,
 	SmbNumBasinsEnum,
 	SmbNumBreaksEnum,
@@ -535,8 +590,4 @@
 	SmbNumRequestedOutputsEnum,
 	SmbPfacEnum,
-	SmbARMAarlagcoefsEnum,
-	SmbARMAdatebreaksEnum,
-	SmbARMAmalagcoefsEnum,
-	SmbARMApolyparamsEnum,
 	SmbRdlEnum,
 	SmbRefElevationEnum,
@@ -548,6 +599,18 @@
 	SmbRunoffrefEnum,
 	SmbSealevEnum,
+	SmbSemicMethodEnum,
+	SmbSemicHcritEnum,
+	SmbSemicRcritEnum,
+	SmbSemicWcritEnum,
+	SmbSemicMcritEnum,
+	SmbSemicAfacEnum,
+	SmbSemicTauAEnum,
+	SmbSemicTauFEnum,
+	SmbSemicTminEnum,
+	SmbSemicTmidEnum,
+	SmbSemicTmaxEnum,
 	SmbStepsPerStepEnum,
 	SmbSwIdxEnum,
+	SmbSWgradEnum,
 	SmbT0dryEnum,
 	SmbT0wetEnum,
@@ -558,4 +621,5 @@
 	SmbTemperaturesReconstructedYearsEnum,
 	SmbPrecipitationsReconstructedYearsEnum,
+	SmbWindspeedgradEnum,
 	SmoothThicknessMultiplierEnum,
 	SolutionTypeEnum,
@@ -662,6 +726,8 @@
 	BasalforcingsDeepwaterMeltingRateValuesMovingaverageEnum,
 	BasalforcingsFloatingiceMeltingRateEnum,
+	BasalforcingsFloatingiceMeltingRateObsEnum,
 	BasalforcingsGeothermalfluxEnum,
 	BasalforcingsGroundediceMeltingRateEnum,
+	BasalforcingsGroundediceMeltingRateObsEnum,
 	BasalforcingsLinearBasinIdEnum,
 	BasalforcingsPerturbationMeltingRateEnum,
@@ -705,4 +771,5 @@
 	BottomPressureEnum,
 	BottomPressureOldEnum,
+	CalvingBasinIdEnum,
 	CalvingCalvingrateEnum,
 	CalvingHabFractionEnum,
@@ -775,4 +842,5 @@
 	EtaDiffEnum,
 	FlowequationBorderFSEnum,
+	FrictionAlpha2Enum,
 	FrictionAsEnum,
 	FrictionCEnum,
@@ -780,6 +848,6 @@
 	FrictionCoefficientEnum,
 	FrictionCoefficientcoulombEnum,
-	FrictionCoulombWaterPressureEnum,
 	FrictionEffectivePressureEnum,
+	FrictionKEnum,
 	FrictionMEnum,
 	FrictionPEnum,
@@ -788,9 +856,10 @@
 	FrictionSedimentCompressibilityCoefficientEnum,
 	FrictionTillFrictionAngleEnum,
-	FrictionSchoofWaterPressureEnum,
 	FrictionWaterLayerEnum,
 	FrictionWaterPressureEnum,
+	FrictionWaterPressureNoiseEnum,
 	FrictionfEnum,
 	FrontalForcingsBasinIdEnum,
+	FrontalForcingsSubglacialDischargearmaEnum,
 	FrontalForcingsSubglacialDischargeEnum,
 	GeometryHydrostaticRatioEnum,
@@ -804,6 +873,8 @@
 	HydraulicPotentialOldEnum,
 	HydrologyBasalFluxEnum,
+	HydrologyBasinsIdEnum,
 	HydrologyBumpHeightEnum,
 	HydrologyBumpSpacingEnum,
+	HydrologyChannelConductivityEnum,
 	HydrologydcBasalMoulinInputEnum,
 	HydrologydcEplThicknessEnum,
@@ -838,4 +909,5 @@
 	HydrologyWaterVyEnum,
 	HydrologyMaskNodeActivationEnum,
+	DebrisMaskNodeActivationEnum,
 	IceEnum,
 	IceMaskNodeActivationEnum,
@@ -978,4 +1050,8 @@
 	SmbAccumulatedRefreezeEnum,
 	SmbAccumulatedRunoffEnum,
+	SmbAlbedoEnum,
+	SmbAlbedoInitEnum,
+	SmbAlbedoSnowEnum,
+	SmbAlbedoSnowInitEnum,
 	SmbAEnum,
 	SmbAdiffEnum,
@@ -1025,10 +1101,18 @@
 	SmbGspEnum,
 	SmbGspiniEnum,
+	SmbHIceEnum,
+	SmbHIceInitEnum,
+	SmbHSnowEnum,
+	SmbHSnowInitEnum,
 	SmbHrefEnum,
 	SmbIsInitializedEnum,
 	SmbMAddEnum,
 	SmbMassBalanceEnum,
+	SmbMassBalanceSnowEnum,
+	SmbMassBalanceIceEnum,
+	SmbMassBalanceSemicEnum,
    SmbMassBalanceSubstepEnum,
    SmbMassBalanceTransientEnum,
+	SmbMaskEnum,
 	SmbMeanLHFEnum,
 	SmbMeanSHFEnum,
@@ -1037,4 +1121,8 @@
 	SmbMInitnum,
 	SmbMonthlytemperaturesEnum,
+	SmbMonthlydsradiationEnum,
+        SmbMonthlydlradiationEnum,
+        SmbMonthlywindspeedEnum,
+        SmbMonthlyairhumidityEnum,
 	SmbMSurfEnum,
 	SmbNetLWEnum,
@@ -1046,4 +1134,8 @@
 	SmbPrecipitationEnum,
 	SmbPrecipitationsAnomalyEnum,
+	SmbDsradiationAnomalyEnum,
+        SmbDlradiationAnomalyEnum,
+        SmbWindspeedAnomalyEnum,
+        SmbAirhumidityAnomalyEnum,
 	SmbPrecipitationsLgmEnum,
 	SmbPrecipitationsPresentdayEnum,
@@ -1059,10 +1151,16 @@
 	SmbS0pEnum,
 	SmbS0tEnum,
+	SmbSemicQmrEnum,
+	SmbSemicQmrInitEnum,
 	SmbSizeiniEnum,
 	SmbSmbCorrEnum,
 	SmbSmbrefEnum,
 	SmbSzaValueEnum,
+	SmbSummerMeltEnum,
+        SmbSummerAlbedoEnum,
+        SmbSnowheightEnum,
 	SmbTEnum,
 	SmbTaEnum,
+	SmbTampEnum,
 	SmbTeValueEnum,
 	SmbTemperaturesAnomalyEnum,
@@ -1106,4 +1204,7 @@
 	StressTensoryzEnum,
 	StressTensorzzEnum,
+	SubglacialdischargeARMANoiseEnum,
+	SubglacialdischargeValuesAutoregressionEnum,
+	SubglacialdischargeValuesMovingaverageEnum,
 	SurfaceAbsMisfitEnum,
 	SurfaceAbsVelMisfitEnum,
@@ -1167,5 +1268,11 @@
 	WaterfractionEnum,
 	WaterheightEnum,
+	WaterPressureArmaPerturbationEnum,
+   WaterPressureValuesAutoregressionEnum,
+   WaterPressureValuesMovingaverageEnum,
 	WeightsLevelsetObservationEnum,
+	WeightsMeltObservationEnum,
+	WeightsVxObservationEnum,
+	WeightsVyObservationEnum,
 	WeightsSurfaceObservationEnum,
 	OldAccumulatedDeltaBottomPressureEnum,
@@ -1312,9 +1419,15 @@
 	CalvingTestEnum,
 	CalvingParameterizationEnum,
+	CalvingCalvingMIPEnum,
 	CalvingVonmisesEnum,
+	CalvingVonmisesADEnum,
 	CalvingPollardEnum,
 	CfdragcoeffabsgradEnum,
+	CfdragcoeffabsgradtransientEnum,
+	CfrheologybbarabsgradEnum,
+	CfrheologybbarabsgradtransientEnum,
 	CfsurfacelogvelEnum,
 	CfsurfacesquareEnum,
+	CfsurfacesquaretransientEnum,
 	CflevelsetmisfitEnum,
 	ChannelEnum,
@@ -1333,4 +1446,5 @@
 	ControlInputMinsEnum,
 	ControlInputValuesEnum,
+	ControlParamEnum,
 	CrouzeixRaviartEnum,
 	CuffeyEnum,
@@ -1410,4 +1524,6 @@
 	HOFSApproximationEnum,
 	HookEnum,
+	HydrologyArmapwAnalysisEnum,
+   HydrologyarmapwEnum,
 	HydrologyDCEfficientAnalysisEnum,
 	HydrologyDCInefficientAnalysisEnum,
@@ -1567,5 +1683,5 @@
 	SMBarmaEnum,
 	SMBcomponentsEnum,
-	SMBdebrisMLEnum,
+	SMBdebrisEvattEnum,
 	SMBd18opddEnum,
 	SMBforcingEnum,
@@ -1639,4 +1755,6 @@
 	TotalSmbEnum,
 	TotalSmbScaledEnum,
+	TotalSmbRefreezeEnum,
+	TotalSmbMeltEnum,
 	TransientArrayParamEnum,
 	TransientInputEnum,
Index: /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp
===================================================================
--- /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp	(revision 28013)
@@ -116,4 +116,6 @@
 		case BasalforcingsUpperwaterElevationEnum : return "BasalforcingsUpperwaterElevation";
 		case BasalforcingsUpperwaterMeltingRateEnum : return "BasalforcingsUpperwaterMeltingRate";
+		case CalvingADStressThresholdFloatingiceEnum : return "CalvingADStressThresholdFloatingice";
+		case CalvingADStressThresholdGroundediceEnum : return "CalvingADStressThresholdGroundedice";
 		case CalvingCrevasseDepthEnum : return "CalvingCrevasseDepth";
 		case CalvingCrevasseThresholdEnum : return "CalvingCrevasseThreshold";
@@ -133,4 +135,5 @@
 		case CalvingVelUpperboundEnum : return "CalvingVelUpperbound";
 		case CalvingRcEnum : return "CalvingRc";
+		case CalvingNumberofBasinsEnum : return "CalvingNumberofBasins";
 		case ConfigurationTypeEnum : return "ConfigurationType";
 		case ConstantsGEnum : return "ConstantsG";
@@ -211,5 +214,7 @@
 		case FrictionGammaEnum : return "FrictionGamma";
 		case FrictionLawEnum : return "FrictionLaw";
+		case FrictionLinearizeEnum : return "FrictionLinearize";
 		case FrictionPseudoplasticityExponentEnum : return "FrictionPseudoplasticityExponent";
+		case FrictionU0Enum : return "FrictionU0";
 		case FrictionThresholdSpeedEnum : return "FrictionThresholdSpeed";
 		case FrictionVoidRatioEnum : return "FrictionVoidRatio";
@@ -223,4 +228,5 @@
 		case FrontalForcingsARMAmonthtrendsEnum : return "FrontalForcingsARMAmonthtrends";
 		case FrontalForcingsARMApolyparamsEnum : return "FrontalForcingsARMApolyparams";
+		case FrontalForcingsIsDischargeARMAEnum : return "FrontalForcingsIsDischargeARMA";
 		case FrontalForcingsNumberofBasinsEnum : return "FrontalForcingsNumberofBasins";
 		case FrontalForcingsNumberofBreaksEnum : return "FrontalForcingsNumberofBreaks";
@@ -230,4 +236,14 @@
 		case FrontalForcingsARMAarlagcoefsEnum : return "FrontalForcingsARMAarlagcoefs";
 		case FrontalForcingsARMAmalagcoefsEnum : return "FrontalForcingsARMAmalagcoefs";
+		case FrontalForcingsSdarlagcoefsEnum : return "FrontalForcingsSdarlagcoefs";
+		case FrontalForcingsSdARMATimestepEnum : return "FrontalForcingsSdARMATimestep";
+		case FrontalForcingsSdarOrderEnum : return "FrontalForcingsSdarOrder";
+		case FrontalForcingsSddatebreaksEnum : return "FrontalForcingsSddatebreaks";
+		case FrontalForcingsSdmalagcoefsEnum : return "FrontalForcingsSdmalagcoefs";
+		case FrontalForcingsSdmaOrderEnum : return "FrontalForcingsSdmaOrder";
+		case FrontalForcingsSdMonthlyFracEnum : return "FrontalForcingsSdMonthlyFrac";
+		case FrontalForcingsSdNumberofBreaksEnum : return "FrontalForcingsSdNumberofBreaks";
+		case FrontalForcingsSdNumberofParamsEnum : return "FrontalForcingsSdNumberofParams";
+		case FrontalForcingsSdpolyparamsEnum : return "FrontalForcingsSdpolyparams";
 		case GrdModelEnum : return "GrdModel";
 		case GroundinglineFrictionInterpolationEnum : return "GroundinglineFrictionInterpolation";
@@ -236,17 +252,34 @@
 		case GroundinglineNumRequestedOutputsEnum : return "GroundinglineNumRequestedOutputs";
 		case GroundinglineRequestedOutputsEnum : return "GroundinglineRequestedOutputs";
+		case HydrologyarmaarOrderEnum : return "HydrologyarmaarOrder";
+		case HydrologyarmaarlagcoefsEnum : return "Hydrologyarmaarlagcoefs";
+		case HydrologyarmadatebreaksEnum : return "Hydrologyarmadatebreaks";
+		case HydrologyarmamalagcoefsEnum : return "Hydrologyarmamalagcoefs";
+		case HydrologyarmamaOrderEnum : return "HydrologyarmamaOrder";
+		case HydrologyarmaMonthlyFactorsEnum : return "HydrologyarmaMonthlyFactors";
+		case HydrologyarmaNumBreaksEnum : return "HydrologyarmaNumBreaks";
+		case HydrologyarmaNumParamsEnum : return "HydrologyarmaNumParams";
+		case HydrologyarmapolyparamsEnum : return "Hydrologyarmapolyparams";
+		case HydrologyarmaTimestepEnum : return "HydrologyarmaTimestep";
 		case HydrologyAveragingEnum : return "HydrologyAveraging";
+		case HydrologyChannelAlphaEnum : return "HydrologyChannelAlpha";
+		case HydrologyChannelBetaEnum : return "HydrologyChannelBeta";
 		case HydrologyCavitySpacingEnum : return "HydrologyCavitySpacing";
-		case HydrologyChannelConductivityEnum : return "HydrologyChannelConductivity";
 		case HydrologyChannelSheetWidthEnum : return "HydrologyChannelSheetWidth";
 		case HydrologyEnglacialVoidRatioEnum : return "HydrologyEnglacialVoidRatio";
 		case HydrologyIschannelsEnum : return "HydrologyIschannels";
+		case HydrologyIsTransitionEnum : return "HydrologyIsTransition";
+		case HydrologyIsWaterPressureArmaEnum : return "HydrologyIsWaterPressureArma";
 		case HydrologyMeltFlagEnum : return "HydrologyMeltFlag";
 		case HydrologyModelEnum : return "HydrologyModel";
+		case HydrologyNumBasinsEnum : return "HydrologyNumBasins";
 		case HydrologyNumRequestedOutputsEnum : return "HydrologyNumRequestedOutputs";
+		case HydrologyOmegaEnum : return "HydrologyOmega";
 		case HydrologyPressureMeltCoefficientEnum : return "HydrologyPressureMeltCoefficient";
 		case HydrologyRelaxationEnum : return "HydrologyRelaxation";
 		case HydrologyRequestedOutputsEnum : return "HydrologyRequestedOutputs";
 		case HydrologySedimentKmaxEnum : return "HydrologySedimentKmax";
+		case HydrologySheetAlphaEnum : return "HydrologySheetAlpha";
+		case HydrologySheetBetaEnum : return "HydrologySheetBeta";
 		case HydrologyStepsPerStepEnum : return "HydrologyStepsPerStep";
 		case HydrologyStorageEnum : return "HydrologyStorage";
@@ -290,4 +323,5 @@
 		case InversionCostFunctionsEnum : return "InversionCostFunctions";
 		case InversionDxminEnum : return "InversionDxmin";
+		case InversionDfminFracEnum : return "InversionDfminFrac";
 		case InversionGatolEnum : return "InversionGatol";
 		case InversionGradientScalingEnum : return "InversionGradientScaling";
@@ -303,4 +337,5 @@
 		case InversionNumCostFunctionsEnum : return "InversionNumCostFunctions";
 		case InversionStepThresholdEnum : return "InversionStepThreshold";
+		case InversionStopFlagEnum : return "InversionStopFlag";
 		case InversionTypeEnum : return "InversionType";
 		case IvinsEnum : return "Ivins";
@@ -480,5 +515,7 @@
 		case StochasticForcingNoisetermsEnum : return "StochasticForcingNoiseterms";
 		case StochasticForcingNumFieldsEnum : return "StochasticForcingNumFields";
+		case StochasticForcingNumTimesCovarianceEnum : return "StochasticForcingNumTimesCovariance";
 		case StochasticForcingRandomflagEnum : return "StochasticForcingRandomflag";
+		case StochasticForcingTimeCovarianceEnum : return "StochasticForcingTimeCovariance";
 		case StochasticForcingTimestepEnum : return "StochasticForcingTimestep";
 		case SolidearthSettingsReltolEnum : return "SolidearthSettingsReltol";
@@ -501,9 +538,25 @@
 		case SmbAccurefEnum : return "SmbAccuref";
 		case SmbAdThreshEnum : return "SmbAdThresh";
+		case SmbAlbedoSchemeEnum : return "SmbAlbedoScheme";
+		case SmbAlbedoSnowMaxEnum : return "SmbAlbedoSnowMax";
+		case SmbAlbedoSnowMinEnum : return "SmbAlbedoSnowMin";
+		case SmbAlbedoIceEnum : return "SmbAlbedoIce";
+		case SmbAlbedoLandEnum : return "SmbAlbedoLand";
 		case SmbARMATimestepEnum : return "SmbARMATimestep";
 		case SmbARMAarOrderEnum : return "SmbARMAarOrder";
+		case SmbARMAarlagcoefsEnum : return "SmbARMAarlagcoefs";
+		case SmbARMAdatebreaksEnum : return "SmbARMAdatebreaks";
 		case SmbARMAmaOrderEnum : return "SmbARMAmaOrder";
+		case SmbARMAmalagcoefsEnum : return "SmbARMAmalagcoefs";
+		case SmbARMApolyparamsEnum : return "SmbARMApolyparams";
 		case SmbAveragingEnum : return "SmbAveraging";
+		case SmbDebrisalbedoEnum : return "SmbDebrisalbedo";
+		case SmbIcealbedoEnum : return "SmbIcealbedo";
+		case SmbSnowalbedoEnum : return "SmbSnowalbedo";
+		case SmbDebrisIsAndersonEnum : return "SmbDebrisIsAnderson";
+		case SmbDebrisIsCryokarstEnum : return "SmbDebrisIsCryokarst";
+		case SmbDebrisAndersonD0Enum : return "SmbDebrisAndersonD0";
 		case SmbDesfacEnum : return "SmbDesfac";
+		case SmbDesfacElevEnum : return "SmbDesfacElev";
 		case SmbDpermilEnum : return "SmbDpermil";
 		case SmbDsnowIdxEnum : return "SmbDsnowIdx";
@@ -517,4 +570,5 @@
 		case SmbEIdxEnum : return "SmbEIdx";
 		case SmbFEnum : return "SmbF";
+		case SmbHumiditygradEnum : return "SmbHumiditygrad";
 		case SmbInitDensityScalingEnum : return "SmbInitDensityScaling";
 		case SmbIsaccumulationEnum : return "SmbIsaccumulation";
@@ -537,4 +591,5 @@
 		case SmbKEnum : return "SmbK";
 		case SmbLapseRatesEnum : return "SmbLapseRates";
+		case SmbLWgradEnum : return "SmbLWgrad";
 		case SmbNumBasinsEnum : return "SmbNumBasins";
 		case SmbNumBreaksEnum : return "SmbNumBreaks";
@@ -543,8 +598,4 @@
 		case SmbNumRequestedOutputsEnum : return "SmbNumRequestedOutputs";
 		case SmbPfacEnum : return "SmbPfac";
-		case SmbARMAarlagcoefsEnum : return "SmbARMAarlagcoefs";
-		case SmbARMAdatebreaksEnum : return "SmbARMAdatebreaks";
-		case SmbARMAmalagcoefsEnum : return "SmbARMAmalagcoefs";
-		case SmbARMApolyparamsEnum : return "SmbARMApolyparams";
 		case SmbRdlEnum : return "SmbRdl";
 		case SmbRefElevationEnum : return "SmbRefElevation";
@@ -556,6 +607,18 @@
 		case SmbRunoffrefEnum : return "SmbRunoffref";
 		case SmbSealevEnum : return "SmbSealev";
+		case SmbSemicMethodEnum : return "SmbSemicMethod";
+		case SmbSemicHcritEnum : return "SmbSemicHcrit";
+		case SmbSemicRcritEnum : return "SmbSemicRcrit";
+		case SmbSemicWcritEnum : return "SmbSemicWcrit";
+		case SmbSemicMcritEnum : return "SmbSemicMcrit";
+		case SmbSemicAfacEnum : return "SmbSemicAfac";
+		case SmbSemicTauAEnum : return "SmbSemicTauA";
+		case SmbSemicTauFEnum : return "SmbSemicTauF";
+		case SmbSemicTminEnum : return "SmbSemicTmin";
+		case SmbSemicTmidEnum : return "SmbSemicTmid";
+		case SmbSemicTmaxEnum : return "SmbSemicTmax";
 		case SmbStepsPerStepEnum : return "SmbStepsPerStep";
 		case SmbSwIdxEnum : return "SmbSwIdx";
+		case SmbSWgradEnum : return "SmbSWgrad";
 		case SmbT0dryEnum : return "SmbT0dry";
 		case SmbT0wetEnum : return "SmbT0wet";
@@ -566,4 +629,5 @@
 		case SmbTemperaturesReconstructedYearsEnum : return "SmbTemperaturesReconstructedYears";
 		case SmbPrecipitationsReconstructedYearsEnum : return "SmbPrecipitationsReconstructedYears";
+		case SmbWindspeedgradEnum : return "SmbWindspeedgrad";
 		case SmoothThicknessMultiplierEnum : return "SmoothThicknessMultiplier";
 		case SolutionTypeEnum : return "SolutionType";
@@ -668,6 +732,8 @@
 		case BasalforcingsDeepwaterMeltingRateValuesMovingaverageEnum : return "BasalforcingsDeepwaterMeltingRateValuesMovingaverage";
 		case BasalforcingsFloatingiceMeltingRateEnum : return "BasalforcingsFloatingiceMeltingRate";
+		case BasalforcingsFloatingiceMeltingRateObsEnum : return "BasalforcingsFloatingiceMeltingRateObs";
 		case BasalforcingsGeothermalfluxEnum : return "BasalforcingsGeothermalflux";
 		case BasalforcingsGroundediceMeltingRateEnum : return "BasalforcingsGroundediceMeltingRate";
+		case BasalforcingsGroundediceMeltingRateObsEnum : return "BasalforcingsGroundediceMeltingRateObs";
 		case BasalforcingsLinearBasinIdEnum : return "BasalforcingsLinearBasinId";
 		case BasalforcingsPerturbationMeltingRateEnum : return "BasalforcingsPerturbationMeltingRate";
@@ -711,4 +777,5 @@
 		case BottomPressureEnum : return "BottomPressure";
 		case BottomPressureOldEnum : return "BottomPressureOld";
+		case CalvingBasinIdEnum : return "CalvingBasinId";
 		case CalvingCalvingrateEnum : return "CalvingCalvingrate";
 		case CalvingHabFractionEnum : return "CalvingHabFraction";
@@ -781,4 +848,5 @@
 		case EtaDiffEnum : return "EtaDiff";
 		case FlowequationBorderFSEnum : return "FlowequationBorderFS";
+		case FrictionAlpha2Enum : return "FrictionAlpha2";
 		case FrictionAsEnum : return "FrictionAs";
 		case FrictionCEnum : return "FrictionC";
@@ -786,6 +854,6 @@
 		case FrictionCoefficientEnum : return "FrictionCoefficient";
 		case FrictionCoefficientcoulombEnum : return "FrictionCoefficientcoulomb";
-		case FrictionCoulombWaterPressureEnum : return "FrictionCoulombWaterPressure";
 		case FrictionEffectivePressureEnum : return "FrictionEffectivePressure";
+		case FrictionKEnum : return "FrictionK";
 		case FrictionMEnum : return "FrictionM";
 		case FrictionPEnum : return "FrictionP";
@@ -794,9 +862,10 @@
 		case FrictionSedimentCompressibilityCoefficientEnum : return "FrictionSedimentCompressibilityCoefficient";
 		case FrictionTillFrictionAngleEnum : return "FrictionTillFrictionAngle";
-		case FrictionSchoofWaterPressureEnum : return "FrictionSchoofWaterPressure";
 		case FrictionWaterLayerEnum : return "FrictionWaterLayer";
 		case FrictionWaterPressureEnum : return "FrictionWaterPressure";
+		case FrictionWaterPressureNoiseEnum : return "FrictionWaterPressureNoise";
 		case FrictionfEnum : return "Frictionf";
 		case FrontalForcingsBasinIdEnum : return "FrontalForcingsBasinId";
+		case FrontalForcingsSubglacialDischargearmaEnum : return "FrontalForcingsSubglacialDischargearma";
 		case FrontalForcingsSubglacialDischargeEnum : return "FrontalForcingsSubglacialDischarge";
 		case GeometryHydrostaticRatioEnum : return "GeometryHydrostaticRatio";
@@ -810,6 +879,8 @@
 		case HydraulicPotentialOldEnum : return "HydraulicPotentialOld";
 		case HydrologyBasalFluxEnum : return "HydrologyBasalFlux";
+		case HydrologyBasinsIdEnum : return "HydrologyBasinsId";
 		case HydrologyBumpHeightEnum : return "HydrologyBumpHeight";
 		case HydrologyBumpSpacingEnum : return "HydrologyBumpSpacing";
+		case HydrologyChannelConductivityEnum : return "HydrologyChannelConductivity";
 		case HydrologydcBasalMoulinInputEnum : return "HydrologydcBasalMoulinInput";
 		case HydrologydcEplThicknessEnum : return "HydrologydcEplThickness";
@@ -844,4 +915,5 @@
 		case HydrologyWaterVyEnum : return "HydrologyWaterVy";
 		case HydrologyMaskNodeActivationEnum : return "HydrologyMaskNodeActivation";
+		case DebrisMaskNodeActivationEnum : return "DebrisMaskNodeActivation";
 		case IceEnum : return "Ice";
 		case IceMaskNodeActivationEnum : return "IceMaskNodeActivation";
@@ -984,4 +1056,8 @@
 		case SmbAccumulatedRefreezeEnum : return "SmbAccumulatedRefreeze";
 		case SmbAccumulatedRunoffEnum : return "SmbAccumulatedRunoff";
+		case SmbAlbedoEnum : return "SmbAlbedo";
+		case SmbAlbedoInitEnum : return "SmbAlbedoInit";
+		case SmbAlbedoSnowEnum : return "SmbAlbedoSnow";
+		case SmbAlbedoSnowInitEnum : return "SmbAlbedoSnowInit";
 		case SmbAEnum : return "SmbA";
 		case SmbAdiffEnum : return "SmbAdiff";
@@ -1031,10 +1107,18 @@
 		case SmbGspEnum : return "SmbGsp";
 		case SmbGspiniEnum : return "SmbGspini";
+		case SmbHIceEnum : return "SmbHIce";
+		case SmbHIceInitEnum : return "SmbHIceInit";
+		case SmbHSnowEnum : return "SmbHSnow";
+		case SmbHSnowInitEnum : return "SmbHSnowInit";
 		case SmbHrefEnum : return "SmbHref";
 		case SmbIsInitializedEnum : return "SmbIsInitialized";
 		case SmbMAddEnum : return "SmbMAdd";
 		case SmbMassBalanceEnum : return "SmbMassBalance";
+		case SmbMassBalanceSnowEnum : return "SmbMassBalanceSnow";
+		case SmbMassBalanceIceEnum : return "SmbMassBalanceIce";
+		case SmbMassBalanceSemicEnum : return "SmbMassBalanceSemic";
 		case SmbMassBalanceSubstepEnum : return "SmbMassBalanceSubstep";
 		case SmbMassBalanceTransientEnum : return "SmbMassBalanceTransient";
+		case SmbMaskEnum : return "SmbMask";
 		case SmbMeanLHFEnum : return "SmbMeanLHF";
 		case SmbMeanSHFEnum : return "SmbMeanSHF";
@@ -1042,4 +1126,8 @@
 		case SmbMeltEnum : return "SmbMelt";
 		case SmbMonthlytemperaturesEnum : return "SmbMonthlytemperatures";
+		case SmbMonthlydsradiationEnum : return "SmbMonthlydsradiation";
+		case SmbMonthlydlradiationEnum : return "SmbMonthlydlradiation";
+		case SmbMonthlywindspeedEnum : return "SmbMonthlywindspeed";
+		case SmbMonthlyairhumidityEnum : return "SmbMonthlyairhumidity";
 		case SmbMSurfEnum : return "SmbMSurf";
 		case SmbNetLWEnum : return "SmbNetLW";
@@ -1051,4 +1139,8 @@
 		case SmbPrecipitationEnum : return "SmbPrecipitation";
 		case SmbPrecipitationsAnomalyEnum : return "SmbPrecipitationsAnomaly";
+		case SmbDsradiationAnomalyEnum : return "SmbDsradiationAnomaly";
+		case SmbDlradiationAnomalyEnum : return "SmbDlradiationAnomaly";
+		case SmbWindspeedAnomalyEnum : return "SmbWindspeedAnomaly";
+		case SmbAirhumidityAnomalyEnum : return "SmbAirhumidityAnomaly";
 		case SmbPrecipitationsLgmEnum : return "SmbPrecipitationsLgm";
 		case SmbPrecipitationsPresentdayEnum : return "SmbPrecipitationsPresentday";
@@ -1064,10 +1156,16 @@
 		case SmbS0pEnum : return "SmbS0p";
 		case SmbS0tEnum : return "SmbS0t";
+		case SmbSemicQmrEnum : return "SmbSemicQmr";
+		case SmbSemicQmrInitEnum : return "SmbSemicQmrInit";
 		case SmbSizeiniEnum : return "SmbSizeini";
 		case SmbSmbCorrEnum : return "SmbSmbCorr";
 		case SmbSmbrefEnum : return "SmbSmbref";
 		case SmbSzaValueEnum : return "SmbSzaValue";
+		case SmbSummerMeltEnum : return "SmbSummerMelt";
+		case SmbSummerAlbedoEnum : return "SmbSummerAlbedo";
+		case SmbSnowheightEnum : return "SmbSnowheight";
 		case SmbTEnum : return "SmbT";
 		case SmbTaEnum : return "SmbTa";
+		case SmbTampEnum : return "SmbTamp";
 		case SmbTeValueEnum : return "SmbTeValue";
 		case SmbTemperaturesAnomalyEnum : return "SmbTemperaturesAnomaly";
@@ -1111,4 +1209,7 @@
 		case StressTensoryzEnum : return "StressTensoryz";
 		case StressTensorzzEnum : return "StressTensorzz";
+		case SubglacialdischargeARMANoiseEnum : return "SubglacialdischargeARMANoise";
+		case SubglacialdischargeValuesAutoregressionEnum : return "SubglacialdischargeValuesAutoregression";
+		case SubglacialdischargeValuesMovingaverageEnum : return "SubglacialdischargeValuesMovingaverage";
 		case SurfaceAbsMisfitEnum : return "SurfaceAbsMisfit";
 		case SurfaceAbsVelMisfitEnum : return "SurfaceAbsVelMisfit";
@@ -1172,5 +1273,11 @@
 		case WaterfractionEnum : return "Waterfraction";
 		case WaterheightEnum : return "Waterheight";
+		case WaterPressureArmaPerturbationEnum : return "WaterPressureArmaPerturbation";
+		case WaterPressureValuesAutoregressionEnum : return "WaterPressureValuesAutoregression";
+		case WaterPressureValuesMovingaverageEnum : return "WaterPressureValuesMovingaverage";
 		case WeightsLevelsetObservationEnum : return "WeightsLevelsetObservation";
+		case WeightsMeltObservationEnum : return "WeightsMeltObservation";
+		case WeightsVxObservationEnum : return "WeightsVxObservation";
+		case WeightsVyObservationEnum : return "WeightsVyObservation";
 		case WeightsSurfaceObservationEnum : return "WeightsSurfaceObservation";
 		case OldAccumulatedDeltaBottomPressureEnum : return "OldAccumulatedDeltaBottomPressure";
@@ -1315,9 +1422,15 @@
 		case CalvingTestEnum : return "CalvingTest";
 		case CalvingParameterizationEnum : return "CalvingParameterization";
+		case CalvingCalvingMIPEnum : return "CalvingCalvingMIP";
 		case CalvingVonmisesEnum : return "CalvingVonmises";
+		case CalvingVonmisesADEnum : return "CalvingVonmisesAD";
 		case CalvingPollardEnum : return "CalvingPollard";
 		case CfdragcoeffabsgradEnum : return "Cfdragcoeffabsgrad";
+		case CfdragcoeffabsgradtransientEnum : return "Cfdragcoeffabsgradtransient";
+		case CfrheologybbarabsgradEnum : return "Cfrheologybbarabsgrad";
+		case CfrheologybbarabsgradtransientEnum : return "Cfrheologybbarabsgradtransient";
 		case CfsurfacelogvelEnum : return "Cfsurfacelogvel";
 		case CfsurfacesquareEnum : return "Cfsurfacesquare";
+		case CfsurfacesquaretransientEnum : return "Cfsurfacesquaretransient";
 		case CflevelsetmisfitEnum : return "Cflevelsetmisfit";
 		case ChannelEnum : return "Channel";
@@ -1336,4 +1449,5 @@
 		case ControlInputMinsEnum : return "ControlInputMins";
 		case ControlInputValuesEnum : return "ControlInputValues";
+		case ControlParamEnum : return "ControlParam";
 		case CrouzeixRaviartEnum : return "CrouzeixRaviart";
 		case CuffeyEnum : return "Cuffey";
@@ -1413,4 +1527,6 @@
 		case HOFSApproximationEnum : return "HOFSApproximation";
 		case HookEnum : return "Hook";
+		case HydrologyArmapwAnalysisEnum : return "HydrologyArmapwAnalysis";
+		case HydrologyarmapwEnum : return "Hydrologyarmapw";
 		case HydrologyDCEfficientAnalysisEnum : return "HydrologyDCEfficientAnalysis";
 		case HydrologyDCInefficientAnalysisEnum : return "HydrologyDCInefficientAnalysis";
@@ -1570,5 +1686,5 @@
 		case SMBarmaEnum : return "SMBarma";
 		case SMBcomponentsEnum : return "SMBcomponents";
-		case SMBdebrisMLEnum : return "SMBdebrisML";
+		case SMBdebrisEvattEnum : return "SMBdebrisEvatt";
 		case SMBd18opddEnum : return "SMBd18opdd";
 		case SMBforcingEnum : return "SMBforcing";
@@ -1642,4 +1758,6 @@
 		case TotalSmbEnum : return "TotalSmb";
 		case TotalSmbScaledEnum : return "TotalSmbScaled";
+		case TotalSmbRefreezeEnum : return "TotalSmbRefreeze";
+		case TotalSmbMeltEnum : return "TotalSmbMelt";
 		case TransientArrayParamEnum : return "TransientArrayParam";
 		case TransientInputEnum : return "TransientInput";
Index: /issm/trunk/src/c/shared/Enum/Enumjl.vim
===================================================================
--- /issm/trunk/src/c/shared/Enum/Enumjl.vim	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/Enumjl.vim	(revision 28013)
@@ -107,4 +107,6 @@
 syn keyword juliaConstC BasalforcingsUpperwaterElevationEnum
 syn keyword juliaConstC BasalforcingsUpperwaterMeltingRateEnum
+syn keyword juliaConstC CalvingADStressThresholdFloatingiceEnum
+syn keyword juliaConstC CalvingADStressThresholdGroundediceEnum
 syn keyword juliaConstC CalvingCrevasseDepthEnum
 syn keyword juliaConstC CalvingCrevasseThresholdEnum
@@ -124,4 +126,5 @@
 syn keyword juliaConstC CalvingVelUpperboundEnum
 syn keyword juliaConstC CalvingRcEnum
+syn keyword juliaConstC CalvingNumberofBasinsEnum
 syn keyword juliaConstC ConfigurationTypeEnum
 syn keyword juliaConstC ConstantsGEnum
@@ -202,5 +205,7 @@
 syn keyword juliaConstC FrictionGammaEnum
 syn keyword juliaConstC FrictionLawEnum
+syn keyword juliaConstC FrictionLinearizeEnum
 syn keyword juliaConstC FrictionPseudoplasticityExponentEnum
+syn keyword juliaConstC FrictionU0Enum
 syn keyword juliaConstC FrictionThresholdSpeedEnum
 syn keyword juliaConstC FrictionVoidRatioEnum
@@ -214,4 +219,5 @@
 syn keyword juliaConstC FrontalForcingsARMAmonthtrendsEnum
 syn keyword juliaConstC FrontalForcingsARMApolyparamsEnum
+syn keyword juliaConstC FrontalForcingsIsDischargeARMAEnum
 syn keyword juliaConstC FrontalForcingsNumberofBasinsEnum
 syn keyword juliaConstC FrontalForcingsNumberofBreaksEnum
@@ -221,4 +227,14 @@
 syn keyword juliaConstC FrontalForcingsARMAarlagcoefsEnum
 syn keyword juliaConstC FrontalForcingsARMAmalagcoefsEnum
+syn keyword juliaConstC FrontalForcingsSdarlagcoefsEnum
+syn keyword juliaConstC FrontalForcingsSdARMATimestepEnum
+syn keyword juliaConstC FrontalForcingsSdarOrderEnum
+syn keyword juliaConstC FrontalForcingsSddatebreaksEnum
+syn keyword juliaConstC FrontalForcingsSdmalagcoefsEnum
+syn keyword juliaConstC FrontalForcingsSdmaOrderEnum
+syn keyword juliaConstC FrontalForcingsSdMonthlyFracEnum
+syn keyword juliaConstC FrontalForcingsSdNumberofBreaksEnum
+syn keyword juliaConstC FrontalForcingsSdNumberofParamsEnum
+syn keyword juliaConstC FrontalForcingsSdpolyparamsEnum
 syn keyword juliaConstC GrdModelEnum
 syn keyword juliaConstC GroundinglineFrictionInterpolationEnum
@@ -227,17 +243,34 @@
 syn keyword juliaConstC GroundinglineNumRequestedOutputsEnum
 syn keyword juliaConstC GroundinglineRequestedOutputsEnum
+syn keyword juliaConstC HydrologyarmaarOrderEnum
+syn keyword juliaConstC HydrologyarmaarlagcoefsEnum
+syn keyword juliaConstC HydrologyarmadatebreaksEnum
+syn keyword juliaConstC HydrologyarmamalagcoefsEnum
+syn keyword juliaConstC HydrologyarmamaOrderEnum
+syn keyword juliaConstC HydrologyarmaMonthlyFactorsEnum
+syn keyword juliaConstC HydrologyarmaNumBreaksEnum
+syn keyword juliaConstC HydrologyarmaNumParamsEnum
+syn keyword juliaConstC HydrologyarmapolyparamsEnum
+syn keyword juliaConstC HydrologyarmaTimestepEnum
 syn keyword juliaConstC HydrologyAveragingEnum
+syn keyword juliaConstC HydrologyChannelAlphaEnum
+syn keyword juliaConstC HydrologyChannelBetaEnum
 syn keyword juliaConstC HydrologyCavitySpacingEnum
-syn keyword juliaConstC HydrologyChannelConductivityEnum
 syn keyword juliaConstC HydrologyChannelSheetWidthEnum
 syn keyword juliaConstC HydrologyEnglacialVoidRatioEnum
 syn keyword juliaConstC HydrologyIschannelsEnum
+syn keyword juliaConstC HydrologyIsTransitionEnum
+syn keyword juliaConstC HydrologyIsWaterPressureArmaEnum
 syn keyword juliaConstC HydrologyMeltFlagEnum
 syn keyword juliaConstC HydrologyModelEnum
+syn keyword juliaConstC HydrologyNumBasinsEnum
 syn keyword juliaConstC HydrologyNumRequestedOutputsEnum
+syn keyword juliaConstC HydrologyOmegaEnum
 syn keyword juliaConstC HydrologyPressureMeltCoefficientEnum
 syn keyword juliaConstC HydrologyRelaxationEnum
 syn keyword juliaConstC HydrologyRequestedOutputsEnum
 syn keyword juliaConstC HydrologySedimentKmaxEnum
+syn keyword juliaConstC HydrologySheetAlphaEnum
+syn keyword juliaConstC HydrologySheetBetaEnum
 syn keyword juliaConstC HydrologyStepsPerStepEnum
 syn keyword juliaConstC HydrologyStorageEnum
@@ -281,4 +314,5 @@
 syn keyword juliaConstC InversionCostFunctionsEnum
 syn keyword juliaConstC InversionDxminEnum
+syn keyword juliaConstC InversionDfminFracEnum
 syn keyword juliaConstC InversionGatolEnum
 syn keyword juliaConstC InversionGradientScalingEnum
@@ -294,4 +328,5 @@
 syn keyword juliaConstC InversionNumCostFunctionsEnum
 syn keyword juliaConstC InversionStepThresholdEnum
+syn keyword juliaConstC InversionStopFlagEnum
 syn keyword juliaConstC InversionTypeEnum
 syn keyword juliaConstC IvinsEnum
@@ -471,5 +506,7 @@
 syn keyword juliaConstC StochasticForcingNoisetermsEnum
 syn keyword juliaConstC StochasticForcingNumFieldsEnum
+syn keyword juliaConstC StochasticForcingNumTimesCovarianceEnum
 syn keyword juliaConstC StochasticForcingRandomflagEnum
+syn keyword juliaConstC StochasticForcingTimeCovarianceEnum
 syn keyword juliaConstC StochasticForcingTimestepEnum
 syn keyword juliaConstC SolidearthSettingsReltolEnum
@@ -492,9 +529,25 @@
 syn keyword juliaConstC SmbAccurefEnum
 syn keyword juliaConstC SmbAdThreshEnum
+syn keyword juliaConstC SmbAlbedoSchemeEnum
+syn keyword juliaConstC SmbAlbedoSnowMaxEnum
+syn keyword juliaConstC SmbAlbedoSnowMinEnum
+syn keyword juliaConstC SmbAlbedoIceEnum
+syn keyword juliaConstC SmbAlbedoLandEnum
 syn keyword juliaConstC SmbARMATimestepEnum
 syn keyword juliaConstC SmbARMAarOrderEnum
+syn keyword juliaConstC SmbARMAarlagcoefsEnum
+syn keyword juliaConstC SmbARMAdatebreaksEnum
 syn keyword juliaConstC SmbARMAmaOrderEnum
+syn keyword juliaConstC SmbARMAmalagcoefsEnum
+syn keyword juliaConstC SmbARMApolyparamsEnum
 syn keyword juliaConstC SmbAveragingEnum
+syn keyword juliaConstC SmbDebrisalbedoEnum
+syn keyword juliaConstC SmbIcealbedoEnum
+syn keyword juliaConstC SmbSnowalbedoEnum
+syn keyword juliaConstC SmbDebrisIsAndersonEnum
+syn keyword juliaConstC SmbDebrisIsCryokarstEnum
+syn keyword juliaConstC SmbDebrisAndersonD0Enum
 syn keyword juliaConstC SmbDesfacEnum
+syn keyword juliaConstC SmbDesfacElevEnum
 syn keyword juliaConstC SmbDpermilEnum
 syn keyword juliaConstC SmbDsnowIdxEnum
@@ -508,4 +561,5 @@
 syn keyword juliaConstC SmbEIdxEnum
 syn keyword juliaConstC SmbFEnum
+syn keyword juliaConstC SmbHumiditygradEnum
 syn keyword juliaConstC SmbInitDensityScalingEnum
 syn keyword juliaConstC SmbIsaccumulationEnum
@@ -528,4 +582,5 @@
 syn keyword juliaConstC SmbKEnum
 syn keyword juliaConstC SmbLapseRatesEnum
+syn keyword juliaConstC SmbLWgradEnum
 syn keyword juliaConstC SmbNumBasinsEnum
 syn keyword juliaConstC SmbNumBreaksEnum
@@ -534,8 +589,4 @@
 syn keyword juliaConstC SmbNumRequestedOutputsEnum
 syn keyword juliaConstC SmbPfacEnum
-syn keyword juliaConstC SmbARMAarlagcoefsEnum
-syn keyword juliaConstC SmbARMAdatebreaksEnum
-syn keyword juliaConstC SmbARMAmalagcoefsEnum
-syn keyword juliaConstC SmbARMApolyparamsEnum
 syn keyword juliaConstC SmbRdlEnum
 syn keyword juliaConstC SmbRefElevationEnum
@@ -547,6 +598,18 @@
 syn keyword juliaConstC SmbRunoffrefEnum
 syn keyword juliaConstC SmbSealevEnum
+syn keyword juliaConstC SmbSemicMethodEnum
+syn keyword juliaConstC SmbSemicHcritEnum
+syn keyword juliaConstC SmbSemicRcritEnum
+syn keyword juliaConstC SmbSemicWcritEnum
+syn keyword juliaConstC SmbSemicMcritEnum
+syn keyword juliaConstC SmbSemicAfacEnum
+syn keyword juliaConstC SmbSemicTauAEnum
+syn keyword juliaConstC SmbSemicTauFEnum
+syn keyword juliaConstC SmbSemicTminEnum
+syn keyword juliaConstC SmbSemicTmidEnum
+syn keyword juliaConstC SmbSemicTmaxEnum
 syn keyword juliaConstC SmbStepsPerStepEnum
 syn keyword juliaConstC SmbSwIdxEnum
+syn keyword juliaConstC SmbSWgradEnum
 syn keyword juliaConstC SmbT0dryEnum
 syn keyword juliaConstC SmbT0wetEnum
@@ -557,4 +620,5 @@
 syn keyword juliaConstC SmbTemperaturesReconstructedYearsEnum
 syn keyword juliaConstC SmbPrecipitationsReconstructedYearsEnum
+syn keyword juliaConstC SmbWindspeedgradEnum
 syn keyword juliaConstC SmoothThicknessMultiplierEnum
 syn keyword juliaConstC SolutionTypeEnum
@@ -659,6 +723,8 @@
 syn keyword juliaConstC BasalforcingsDeepwaterMeltingRateValuesMovingaverageEnum
 syn keyword juliaConstC BasalforcingsFloatingiceMeltingRateEnum
+syn keyword juliaConstC BasalforcingsFloatingiceMeltingRateObsEnum
 syn keyword juliaConstC BasalforcingsGeothermalfluxEnum
 syn keyword juliaConstC BasalforcingsGroundediceMeltingRateEnum
+syn keyword juliaConstC BasalforcingsGroundediceMeltingRateObsEnum
 syn keyword juliaConstC BasalforcingsLinearBasinIdEnum
 syn keyword juliaConstC BasalforcingsPerturbationMeltingRateEnum
@@ -702,4 +768,5 @@
 syn keyword juliaConstC BottomPressureEnum
 syn keyword juliaConstC BottomPressureOldEnum
+syn keyword juliaConstC CalvingBasinIdEnum
 syn keyword juliaConstC CalvingCalvingrateEnum
 syn keyword juliaConstC CalvingHabFractionEnum
@@ -772,4 +839,5 @@
 syn keyword juliaConstC EtaDiffEnum
 syn keyword juliaConstC FlowequationBorderFSEnum
+syn keyword juliaConstC FrictionAlpha2Enum
 syn keyword juliaConstC FrictionAsEnum
 syn keyword juliaConstC FrictionCEnum
@@ -777,6 +845,6 @@
 syn keyword juliaConstC FrictionCoefficientEnum
 syn keyword juliaConstC FrictionCoefficientcoulombEnum
-syn keyword juliaConstC FrictionCoulombWaterPressureEnum
 syn keyword juliaConstC FrictionEffectivePressureEnum
+syn keyword juliaConstC FrictionKEnum
 syn keyword juliaConstC FrictionMEnum
 syn keyword juliaConstC FrictionPEnum
@@ -785,9 +853,10 @@
 syn keyword juliaConstC FrictionSedimentCompressibilityCoefficientEnum
 syn keyword juliaConstC FrictionTillFrictionAngleEnum
-syn keyword juliaConstC FrictionSchoofWaterPressureEnum
 syn keyword juliaConstC FrictionWaterLayerEnum
 syn keyword juliaConstC FrictionWaterPressureEnum
+syn keyword juliaConstC FrictionWaterPressureNoiseEnum
 syn keyword juliaConstC FrictionfEnum
 syn keyword juliaConstC FrontalForcingsBasinIdEnum
+syn keyword juliaConstC FrontalForcingsSubglacialDischargearmaEnum
 syn keyword juliaConstC FrontalForcingsSubglacialDischargeEnum
 syn keyword juliaConstC GeometryHydrostaticRatioEnum
@@ -801,6 +870,8 @@
 syn keyword juliaConstC HydraulicPotentialOldEnum
 syn keyword juliaConstC HydrologyBasalFluxEnum
+syn keyword juliaConstC HydrologyBasinsIdEnum
 syn keyword juliaConstC HydrologyBumpHeightEnum
 syn keyword juliaConstC HydrologyBumpSpacingEnum
+syn keyword juliaConstC HydrologyChannelConductivityEnum
 syn keyword juliaConstC HydrologydcBasalMoulinInputEnum
 syn keyword juliaConstC HydrologydcEplThicknessEnum
@@ -835,4 +906,5 @@
 syn keyword juliaConstC HydrologyWaterVyEnum
 syn keyword juliaConstC HydrologyMaskNodeActivationEnum
+syn keyword juliaConstC DebrisMaskNodeActivationEnum
 syn keyword juliaConstC IceEnum
 syn keyword juliaConstC IceMaskNodeActivationEnum
@@ -975,4 +1047,8 @@
 syn keyword juliaConstC SmbAccumulatedRefreezeEnum
 syn keyword juliaConstC SmbAccumulatedRunoffEnum
+syn keyword juliaConstC SmbAlbedoEnum
+syn keyword juliaConstC SmbAlbedoInitEnum
+syn keyword juliaConstC SmbAlbedoSnowEnum
+syn keyword juliaConstC SmbAlbedoSnowInitEnum
 syn keyword juliaConstC SmbAEnum
 syn keyword juliaConstC SmbAdiffEnum
@@ -1022,10 +1098,18 @@
 syn keyword juliaConstC SmbGspEnum
 syn keyword juliaConstC SmbGspiniEnum
+syn keyword juliaConstC SmbHIceEnum
+syn keyword juliaConstC SmbHIceInitEnum
+syn keyword juliaConstC SmbHSnowEnum
+syn keyword juliaConstC SmbHSnowInitEnum
 syn keyword juliaConstC SmbHrefEnum
 syn keyword juliaConstC SmbIsInitializedEnum
 syn keyword juliaConstC SmbMAddEnum
 syn keyword juliaConstC SmbMassBalanceEnum
+syn keyword juliaConstC SmbMassBalanceSnowEnum
+syn keyword juliaConstC SmbMassBalanceIceEnum
+syn keyword juliaConstC SmbMassBalanceSemicEnum
 syn keyword juliaConstC SmbMassBalanceSubstepEnum
 syn keyword juliaConstC SmbMassBalanceTransientEnum
+syn keyword juliaConstC SmbMaskEnum
 syn keyword juliaConstC SmbMeanLHFEnum
 syn keyword juliaConstC SmbMeanSHFEnum
@@ -1033,4 +1117,8 @@
 syn keyword juliaConstC SmbMeltEnum
 syn keyword juliaConstC SmbMonthlytemperaturesEnum
+syn keyword juliaConstC SmbMonthlydsradiationEnum
+syn keyword juliaConstC SmbMonthlydlradiationEnum
+syn keyword juliaConstC SmbMonthlywindspeedEnum
+syn keyword juliaConstC SmbMonthlyairhumidityEnum
 syn keyword juliaConstC SmbMSurfEnum
 syn keyword juliaConstC SmbNetLWEnum
@@ -1042,4 +1130,8 @@
 syn keyword juliaConstC SmbPrecipitationEnum
 syn keyword juliaConstC SmbPrecipitationsAnomalyEnum
+syn keyword juliaConstC SmbDsradiationAnomalyEnum
+syn keyword juliaConstC SmbDlradiationAnomalyEnum
+syn keyword juliaConstC SmbWindspeedAnomalyEnum
+syn keyword juliaConstC SmbAirhumidityAnomalyEnum
 syn keyword juliaConstC SmbPrecipitationsLgmEnum
 syn keyword juliaConstC SmbPrecipitationsPresentdayEnum
@@ -1055,10 +1147,16 @@
 syn keyword juliaConstC SmbS0pEnum
 syn keyword juliaConstC SmbS0tEnum
+syn keyword juliaConstC SmbSemicQmrEnum
+syn keyword juliaConstC SmbSemicQmrInitEnum
 syn keyword juliaConstC SmbSizeiniEnum
 syn keyword juliaConstC SmbSmbCorrEnum
 syn keyword juliaConstC SmbSmbrefEnum
 syn keyword juliaConstC SmbSzaValueEnum
+syn keyword juliaConstC SmbSummerMeltEnum
+syn keyword juliaConstC SmbSummerAlbedoEnum
+syn keyword juliaConstC SmbSnowheightEnum
 syn keyword juliaConstC SmbTEnum
 syn keyword juliaConstC SmbTaEnum
+syn keyword juliaConstC SmbTampEnum
 syn keyword juliaConstC SmbTeValueEnum
 syn keyword juliaConstC SmbTemperaturesAnomalyEnum
@@ -1102,4 +1200,7 @@
 syn keyword juliaConstC StressTensoryzEnum
 syn keyword juliaConstC StressTensorzzEnum
+syn keyword juliaConstC SubglacialdischargeARMANoiseEnum
+syn keyword juliaConstC SubglacialdischargeValuesAutoregressionEnum
+syn keyword juliaConstC SubglacialdischargeValuesMovingaverageEnum
 syn keyword juliaConstC SurfaceAbsMisfitEnum
 syn keyword juliaConstC SurfaceAbsVelMisfitEnum
@@ -1163,5 +1264,11 @@
 syn keyword juliaConstC WaterfractionEnum
 syn keyword juliaConstC WaterheightEnum
+syn keyword juliaConstC WaterPressureArmaPerturbationEnum
+syn keyword juliaConstC WaterPressureValuesAutoregressionEnum
+syn keyword juliaConstC WaterPressureValuesMovingaverageEnum
 syn keyword juliaConstC WeightsLevelsetObservationEnum
+syn keyword juliaConstC WeightsMeltObservationEnum
+syn keyword juliaConstC WeightsVxObservationEnum
+syn keyword juliaConstC WeightsVyObservationEnum
 syn keyword juliaConstC WeightsSurfaceObservationEnum
 syn keyword juliaConstC OldAccumulatedDeltaBottomPressureEnum
@@ -1306,9 +1413,15 @@
 syn keyword juliaConstC CalvingTestEnum
 syn keyword juliaConstC CalvingParameterizationEnum
+syn keyword juliaConstC CalvingCalvingMIPEnum
 syn keyword juliaConstC CalvingVonmisesEnum
+syn keyword juliaConstC CalvingVonmisesADEnum
 syn keyword juliaConstC CalvingPollardEnum
 syn keyword juliaConstC CfdragcoeffabsgradEnum
+syn keyword juliaConstC CfdragcoeffabsgradtransientEnum
+syn keyword juliaConstC CfrheologybbarabsgradEnum
+syn keyword juliaConstC CfrheologybbarabsgradtransientEnum
 syn keyword juliaConstC CfsurfacelogvelEnum
 syn keyword juliaConstC CfsurfacesquareEnum
+syn keyword juliaConstC CfsurfacesquaretransientEnum
 syn keyword juliaConstC CflevelsetmisfitEnum
 syn keyword juliaConstC ChannelEnum
@@ -1327,4 +1440,5 @@
 syn keyword juliaConstC ControlInputMinsEnum
 syn keyword juliaConstC ControlInputValuesEnum
+syn keyword juliaConstC ControlParamEnum
 syn keyword juliaConstC CrouzeixRaviartEnum
 syn keyword juliaConstC CuffeyEnum
@@ -1404,4 +1518,6 @@
 syn keyword juliaConstC HOFSApproximationEnum
 syn keyword juliaConstC HookEnum
+syn keyword juliaConstC HydrologyArmapwAnalysisEnum
+syn keyword juliaConstC HydrologyarmapwEnum
 syn keyword juliaConstC HydrologyDCEfficientAnalysisEnum
 syn keyword juliaConstC HydrologyDCInefficientAnalysisEnum
@@ -1561,5 +1677,5 @@
 syn keyword juliaConstC SMBarmaEnum
 syn keyword juliaConstC SMBcomponentsEnum
-syn keyword juliaConstC SMBdebrisMLEnum
+syn keyword juliaConstC SMBdebrisEvattEnum
 syn keyword juliaConstC SMBd18opddEnum
 syn keyword juliaConstC SMBforcingEnum
@@ -1633,4 +1749,6 @@
 syn keyword juliaConstC TotalSmbEnum
 syn keyword juliaConstC TotalSmbScaledEnum
+syn keyword juliaConstC TotalSmbRefreezeEnum
+syn keyword juliaConstC TotalSmbMeltEnum
 syn keyword juliaConstC TransientArrayParamEnum
 syn keyword juliaConstC TransientInputEnum
Index: /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp
===================================================================
--- /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp	(revision 28013)
@@ -116,4 +116,6 @@
 	      else if (strcmp(name,"BasalforcingsUpperwaterElevation")==0) return BasalforcingsUpperwaterElevationEnum;
 	      else if (strcmp(name,"BasalforcingsUpperwaterMeltingRate")==0) return BasalforcingsUpperwaterMeltingRateEnum;
+	      else if (strcmp(name,"CalvingADStressThresholdFloatingice")==0) return CalvingADStressThresholdFloatingiceEnum;
+	      else if (strcmp(name,"CalvingADStressThresholdGroundedice")==0) return CalvingADStressThresholdGroundediceEnum;
 	      else if (strcmp(name,"CalvingCrevasseDepth")==0) return CalvingCrevasseDepthEnum;
 	      else if (strcmp(name,"CalvingCrevasseThreshold")==0) return CalvingCrevasseThresholdEnum;
@@ -133,12 +135,13 @@
 	      else if (strcmp(name,"CalvingVelUpperbound")==0) return CalvingVelUpperboundEnum;
 	      else if (strcmp(name,"CalvingRc")==0) return CalvingRcEnum;
+	      else if (strcmp(name,"CalvingNumberofBasins")==0) return CalvingNumberofBasinsEnum;
 	      else if (strcmp(name,"ConfigurationType")==0) return ConfigurationTypeEnum;
-	      else if (strcmp(name,"ConstantsG")==0) return ConstantsGEnum;
-	      else if (strcmp(name,"ConstantsNewtonGravity")==0) return ConstantsNewtonGravityEnum;
-	      else if (strcmp(name,"ConstantsReferencetemperature")==0) return ConstantsReferencetemperatureEnum;
          else stage=2;
    }
    if(stage==2){
-	      if (strcmp(name,"ConstantsYts")==0) return ConstantsYtsEnum;
+	      if (strcmp(name,"ConstantsG")==0) return ConstantsGEnum;
+	      else if (strcmp(name,"ConstantsNewtonGravity")==0) return ConstantsNewtonGravityEnum;
+	      else if (strcmp(name,"ConstantsReferencetemperature")==0) return ConstantsReferencetemperatureEnum;
+	      else if (strcmp(name,"ConstantsYts")==0) return ConstantsYtsEnum;
 	      else if (strcmp(name,"ControlInputSizeM")==0) return ControlInputSizeMEnum;
 	      else if (strcmp(name,"ControlInputSizeN")==0) return ControlInputSizeNEnum;
@@ -214,5 +217,7 @@
 	      else if (strcmp(name,"FrictionGamma")==0) return FrictionGammaEnum;
 	      else if (strcmp(name,"FrictionLaw")==0) return FrictionLawEnum;
+	      else if (strcmp(name,"FrictionLinearize")==0) return FrictionLinearizeEnum;
 	      else if (strcmp(name,"FrictionPseudoplasticityExponent")==0) return FrictionPseudoplasticityExponentEnum;
+	      else if (strcmp(name,"FrictionU0")==0) return FrictionU0Enum;
 	      else if (strcmp(name,"FrictionThresholdSpeed")==0) return FrictionThresholdSpeedEnum;
 	      else if (strcmp(name,"FrictionVoidRatio")==0) return FrictionVoidRatioEnum;
@@ -226,4 +231,5 @@
 	      else if (strcmp(name,"FrontalForcingsARMAmonthtrends")==0) return FrontalForcingsARMAmonthtrendsEnum;
 	      else if (strcmp(name,"FrontalForcingsARMApolyparams")==0) return FrontalForcingsARMApolyparamsEnum;
+	      else if (strcmp(name,"FrontalForcingsIsDischargeARMA")==0) return FrontalForcingsIsDischargeARMAEnum;
 	      else if (strcmp(name,"FrontalForcingsNumberofBasins")==0) return FrontalForcingsNumberofBasinsEnum;
 	      else if (strcmp(name,"FrontalForcingsNumberofBreaks")==0) return FrontalForcingsNumberofBreaksEnum;
@@ -233,4 +239,14 @@
 	      else if (strcmp(name,"FrontalForcingsARMAarlagcoefs")==0) return FrontalForcingsARMAarlagcoefsEnum;
 	      else if (strcmp(name,"FrontalForcingsARMAmalagcoefs")==0) return FrontalForcingsARMAmalagcoefsEnum;
+	      else if (strcmp(name,"FrontalForcingsSdarlagcoefs")==0) return FrontalForcingsSdarlagcoefsEnum;
+	      else if (strcmp(name,"FrontalForcingsSdARMATimestep")==0) return FrontalForcingsSdARMATimestepEnum;
+	      else if (strcmp(name,"FrontalForcingsSdarOrder")==0) return FrontalForcingsSdarOrderEnum;
+	      else if (strcmp(name,"FrontalForcingsSddatebreaks")==0) return FrontalForcingsSddatebreaksEnum;
+	      else if (strcmp(name,"FrontalForcingsSdmalagcoefs")==0) return FrontalForcingsSdmalagcoefsEnum;
+	      else if (strcmp(name,"FrontalForcingsSdmaOrder")==0) return FrontalForcingsSdmaOrderEnum;
+	      else if (strcmp(name,"FrontalForcingsSdMonthlyFrac")==0) return FrontalForcingsSdMonthlyFracEnum;
+	      else if (strcmp(name,"FrontalForcingsSdNumberofBreaks")==0) return FrontalForcingsSdNumberofBreaksEnum;
+	      else if (strcmp(name,"FrontalForcingsSdNumberofParams")==0) return FrontalForcingsSdNumberofParamsEnum;
+	      else if (strcmp(name,"FrontalForcingsSdpolyparams")==0) return FrontalForcingsSdpolyparamsEnum;
 	      else if (strcmp(name,"GrdModel")==0) return GrdModelEnum;
 	      else if (strcmp(name,"GroundinglineFrictionInterpolation")==0) return GroundinglineFrictionInterpolationEnum;
@@ -239,17 +255,37 @@
 	      else if (strcmp(name,"GroundinglineNumRequestedOutputs")==0) return GroundinglineNumRequestedOutputsEnum;
 	      else if (strcmp(name,"GroundinglineRequestedOutputs")==0) return GroundinglineRequestedOutputsEnum;
+	      else if (strcmp(name,"HydrologyarmaarOrder")==0) return HydrologyarmaarOrderEnum;
+	      else if (strcmp(name,"Hydrologyarmaarlagcoefs")==0) return HydrologyarmaarlagcoefsEnum;
+	      else if (strcmp(name,"Hydrologyarmadatebreaks")==0) return HydrologyarmadatebreaksEnum;
+	      else if (strcmp(name,"Hydrologyarmamalagcoefs")==0) return HydrologyarmamalagcoefsEnum;
+	      else if (strcmp(name,"HydrologyarmamaOrder")==0) return HydrologyarmamaOrderEnum;
+         else stage=3;
+   }
+   if(stage==3){
+	      if (strcmp(name,"HydrologyarmaMonthlyFactors")==0) return HydrologyarmaMonthlyFactorsEnum;
+	      else if (strcmp(name,"HydrologyarmaNumBreaks")==0) return HydrologyarmaNumBreaksEnum;
+	      else if (strcmp(name,"HydrologyarmaNumParams")==0) return HydrologyarmaNumParamsEnum;
+	      else if (strcmp(name,"Hydrologyarmapolyparams")==0) return HydrologyarmapolyparamsEnum;
+	      else if (strcmp(name,"HydrologyarmaTimestep")==0) return HydrologyarmaTimestepEnum;
 	      else if (strcmp(name,"HydrologyAveraging")==0) return HydrologyAveragingEnum;
+	      else if (strcmp(name,"HydrologyChannelAlpha")==0) return HydrologyChannelAlphaEnum;
+	      else if (strcmp(name,"HydrologyChannelBeta")==0) return HydrologyChannelBetaEnum;
 	      else if (strcmp(name,"HydrologyCavitySpacing")==0) return HydrologyCavitySpacingEnum;
-	      else if (strcmp(name,"HydrologyChannelConductivity")==0) return HydrologyChannelConductivityEnum;
 	      else if (strcmp(name,"HydrologyChannelSheetWidth")==0) return HydrologyChannelSheetWidthEnum;
 	      else if (strcmp(name,"HydrologyEnglacialVoidRatio")==0) return HydrologyEnglacialVoidRatioEnum;
 	      else if (strcmp(name,"HydrologyIschannels")==0) return HydrologyIschannelsEnum;
+	      else if (strcmp(name,"HydrologyIsTransition")==0) return HydrologyIsTransitionEnum;
+	      else if (strcmp(name,"HydrologyIsWaterPressureArma")==0) return HydrologyIsWaterPressureArmaEnum;
 	      else if (strcmp(name,"HydrologyMeltFlag")==0) return HydrologyMeltFlagEnum;
 	      else if (strcmp(name,"HydrologyModel")==0) return HydrologyModelEnum;
+	      else if (strcmp(name,"HydrologyNumBasins")==0) return HydrologyNumBasinsEnum;
 	      else if (strcmp(name,"HydrologyNumRequestedOutputs")==0) return HydrologyNumRequestedOutputsEnum;
+	      else if (strcmp(name,"HydrologyOmega")==0) return HydrologyOmegaEnum;
 	      else if (strcmp(name,"HydrologyPressureMeltCoefficient")==0) return HydrologyPressureMeltCoefficientEnum;
 	      else if (strcmp(name,"HydrologyRelaxation")==0) return HydrologyRelaxationEnum;
 	      else if (strcmp(name,"HydrologyRequestedOutputs")==0) return HydrologyRequestedOutputsEnum;
 	      else if (strcmp(name,"HydrologySedimentKmax")==0) return HydrologySedimentKmaxEnum;
+	      else if (strcmp(name,"HydrologySheetAlpha")==0) return HydrologySheetAlphaEnum;
+	      else if (strcmp(name,"HydrologySheetBeta")==0) return HydrologySheetBetaEnum;
 	      else if (strcmp(name,"HydrologyStepsPerStep")==0) return HydrologyStepsPerStepEnum;
 	      else if (strcmp(name,"HydrologyStorage")==0) return HydrologyStorageEnum;
@@ -260,8 +296,5 @@
 	      else if (strcmp(name,"HydrologydcEplMaxThickness")==0) return HydrologydcEplMaxThicknessEnum;
 	      else if (strcmp(name,"HydrologydcEplPoreWaterMass")==0) return HydrologydcEplPoreWaterMassEnum;
-         else stage=3;
-   }
-   if(stage==3){
-	      if (strcmp(name,"HydrologydcEplThickComp")==0) return HydrologydcEplThickCompEnum;
+	      else if (strcmp(name,"HydrologydcEplThickComp")==0) return HydrologydcEplThickCompEnum;
 	      else if (strcmp(name,"HydrologydcEplflipLock")==0) return HydrologydcEplflipLockEnum;
 	      else if (strcmp(name,"HydrologydcIsefficientlayer")==0) return HydrologydcIsefficientlayerEnum;
@@ -296,4 +329,5 @@
 	      else if (strcmp(name,"InversionCostFunctions")==0) return InversionCostFunctionsEnum;
 	      else if (strcmp(name,"InversionDxmin")==0) return InversionDxminEnum;
+	      else if (strcmp(name,"InversionDfminFrac")==0) return InversionDfminFracEnum;
 	      else if (strcmp(name,"InversionGatol")==0) return InversionGatolEnum;
 	      else if (strcmp(name,"InversionGradientScaling")==0) return InversionGradientScalingEnum;
@@ -309,4 +343,5 @@
 	      else if (strcmp(name,"InversionNumCostFunctions")==0) return InversionNumCostFunctionsEnum;
 	      else if (strcmp(name,"InversionStepThreshold")==0) return InversionStepThresholdEnum;
+	      else if (strcmp(name,"InversionStopFlag")==0) return InversionStopFlagEnum;
 	      else if (strcmp(name,"InversionType")==0) return InversionTypeEnum;
 	      else if (strcmp(name,"Ivins")==0) return IvinsEnum;
@@ -348,5 +383,8 @@
 	      else if (strcmp(name,"MassFluxSegments")==0) return MassFluxSegmentsEnum;
 	      else if (strcmp(name,"MassFluxSegmentsPresent")==0) return MassFluxSegmentsPresentEnum;
-	      else if (strcmp(name,"MasstransportHydrostaticAdjustment")==0) return MasstransportHydrostaticAdjustmentEnum;
+         else stage=4;
+   }
+   if(stage==4){
+	      if (strcmp(name,"MasstransportHydrostaticAdjustment")==0) return MasstransportHydrostaticAdjustmentEnum;
 	      else if (strcmp(name,"MasstransportIsfreesurface")==0) return MasstransportIsfreesurfaceEnum;
 	      else if (strcmp(name,"MasstransportMinThickness")==0) return MasstransportMinThicknessEnum;
@@ -383,8 +421,5 @@
 	      else if (strcmp(name,"OceanGridNx")==0) return OceanGridNxEnum;
 	      else if (strcmp(name,"OceanGridNy")==0) return OceanGridNyEnum;
-         else stage=4;
-   }
-   if(stage==4){
-	      if (strcmp(name,"OceanGridX")==0) return OceanGridXEnum;
+	      else if (strcmp(name,"OceanGridX")==0) return OceanGridXEnum;
 	      else if (strcmp(name,"OceanGridY")==0) return OceanGridYEnum;
 	      else if (strcmp(name,"OutputBufferPointer")==0) return OutputBufferPointerEnum;
@@ -471,5 +506,8 @@
 	      else if (strcmp(name,"SealevelchangeTidalK2")==0) return SealevelchangeTidalK2Enum;
 	      else if (strcmp(name,"SealevelchangeTidalH2")==0) return SealevelchangeTidalH2Enum;
-	      else if (strcmp(name,"SealevelchangeTidalL2")==0) return SealevelchangeTidalL2Enum;
+         else stage=5;
+   }
+   if(stage==5){
+	      if (strcmp(name,"SealevelchangeTidalL2")==0) return SealevelchangeTidalL2Enum;
 	      else if (strcmp(name,"SolidearthSettingsSealevelLoading")==0) return SolidearthSettingsSealevelLoadingEnum;
 	      else if (strcmp(name,"SolidearthSettingsGRD")==0) return SolidearthSettingsGRDEnum;
@@ -489,5 +527,7 @@
 	      else if (strcmp(name,"StochasticForcingNoiseterms")==0) return StochasticForcingNoisetermsEnum;
 	      else if (strcmp(name,"StochasticForcingNumFields")==0) return StochasticForcingNumFieldsEnum;
+	      else if (strcmp(name,"StochasticForcingNumTimesCovariance")==0) return StochasticForcingNumTimesCovarianceEnum;
 	      else if (strcmp(name,"StochasticForcingRandomflag")==0) return StochasticForcingRandomflagEnum;
+	      else if (strcmp(name,"StochasticForcingTimeCovariance")==0) return StochasticForcingTimeCovarianceEnum;
 	      else if (strcmp(name,"StochasticForcingTimestep")==0) return StochasticForcingTimestepEnum;
 	      else if (strcmp(name,"SolidearthSettingsReltol")==0) return SolidearthSettingsReltolEnum;
@@ -506,16 +546,29 @@
 	      else if (strcmp(name,"SmbAIdx")==0) return SmbAIdxEnum;
 	      else if (strcmp(name,"SmbASnow")==0) return SmbASnowEnum;
-         else stage=5;
-   }
-   if(stage==5){
-	      if (strcmp(name,"SmbAccualti")==0) return SmbAccualtiEnum;
+	      else if (strcmp(name,"SmbAccualti")==0) return SmbAccualtiEnum;
 	      else if (strcmp(name,"SmbAccugrad")==0) return SmbAccugradEnum;
 	      else if (strcmp(name,"SmbAccuref")==0) return SmbAccurefEnum;
 	      else if (strcmp(name,"SmbAdThresh")==0) return SmbAdThreshEnum;
+	      else if (strcmp(name,"SmbAlbedoScheme")==0) return SmbAlbedoSchemeEnum;
+	      else if (strcmp(name,"SmbAlbedoSnowMax")==0) return SmbAlbedoSnowMaxEnum;
+	      else if (strcmp(name,"SmbAlbedoSnowMin")==0) return SmbAlbedoSnowMinEnum;
+	      else if (strcmp(name,"SmbAlbedoIce")==0) return SmbAlbedoIceEnum;
+	      else if (strcmp(name,"SmbAlbedoLand")==0) return SmbAlbedoLandEnum;
 	      else if (strcmp(name,"SmbARMATimestep")==0) return SmbARMATimestepEnum;
 	      else if (strcmp(name,"SmbARMAarOrder")==0) return SmbARMAarOrderEnum;
+	      else if (strcmp(name,"SmbARMAarlagcoefs")==0) return SmbARMAarlagcoefsEnum;
+	      else if (strcmp(name,"SmbARMAdatebreaks")==0) return SmbARMAdatebreaksEnum;
 	      else if (strcmp(name,"SmbARMAmaOrder")==0) return SmbARMAmaOrderEnum;
+	      else if (strcmp(name,"SmbARMAmalagcoefs")==0) return SmbARMAmalagcoefsEnum;
+	      else if (strcmp(name,"SmbARMApolyparams")==0) return SmbARMApolyparamsEnum;
 	      else if (strcmp(name,"SmbAveraging")==0) return SmbAveragingEnum;
+	      else if (strcmp(name,"SmbDebrisalbedo")==0) return SmbDebrisalbedoEnum;
+	      else if (strcmp(name,"SmbIcealbedo")==0) return SmbIcealbedoEnum;
+	      else if (strcmp(name,"SmbSnowalbedo")==0) return SmbSnowalbedoEnum;
+	      else if (strcmp(name,"SmbDebrisIsAnderson")==0) return SmbDebrisIsAndersonEnum;
+	      else if (strcmp(name,"SmbDebrisIsCryokarst")==0) return SmbDebrisIsCryokarstEnum;
+	      else if (strcmp(name,"SmbDebrisAndersonD0")==0) return SmbDebrisAndersonD0Enum;
 	      else if (strcmp(name,"SmbDesfac")==0) return SmbDesfacEnum;
+	      else if (strcmp(name,"SmbDesfacElev")==0) return SmbDesfacElevEnum;
 	      else if (strcmp(name,"SmbDpermil")==0) return SmbDpermilEnum;
 	      else if (strcmp(name,"SmbDsnowIdx")==0) return SmbDsnowIdxEnum;
@@ -529,4 +582,5 @@
 	      else if (strcmp(name,"SmbEIdx")==0) return SmbEIdxEnum;
 	      else if (strcmp(name,"SmbF")==0) return SmbFEnum;
+	      else if (strcmp(name,"SmbHumiditygrad")==0) return SmbHumiditygradEnum;
 	      else if (strcmp(name,"SmbInitDensityScaling")==0) return SmbInitDensityScalingEnum;
 	      else if (strcmp(name,"SmbIsaccumulation")==0) return SmbIsaccumulationEnum;
@@ -549,4 +603,5 @@
 	      else if (strcmp(name,"SmbK")==0) return SmbKEnum;
 	      else if (strcmp(name,"SmbLapseRates")==0) return SmbLapseRatesEnum;
+	      else if (strcmp(name,"SmbLWgrad")==0) return SmbLWgradEnum;
 	      else if (strcmp(name,"SmbNumBasins")==0) return SmbNumBasinsEnum;
 	      else if (strcmp(name,"SmbNumBreaks")==0) return SmbNumBreaksEnum;
@@ -555,8 +610,4 @@
 	      else if (strcmp(name,"SmbNumRequestedOutputs")==0) return SmbNumRequestedOutputsEnum;
 	      else if (strcmp(name,"SmbPfac")==0) return SmbPfacEnum;
-	      else if (strcmp(name,"SmbARMAarlagcoefs")==0) return SmbARMAarlagcoefsEnum;
-	      else if (strcmp(name,"SmbARMAdatebreaks")==0) return SmbARMAdatebreaksEnum;
-	      else if (strcmp(name,"SmbARMAmalagcoefs")==0) return SmbARMAmalagcoefsEnum;
-	      else if (strcmp(name,"SmbARMApolyparams")==0) return SmbARMApolyparamsEnum;
 	      else if (strcmp(name,"SmbRdl")==0) return SmbRdlEnum;
 	      else if (strcmp(name,"SmbRefElevation")==0) return SmbRefElevationEnum;
@@ -568,6 +619,21 @@
 	      else if (strcmp(name,"SmbRunoffref")==0) return SmbRunoffrefEnum;
 	      else if (strcmp(name,"SmbSealev")==0) return SmbSealevEnum;
+	      else if (strcmp(name,"SmbSemicMethod")==0) return SmbSemicMethodEnum;
+	      else if (strcmp(name,"SmbSemicHcrit")==0) return SmbSemicHcritEnum;
+	      else if (strcmp(name,"SmbSemicRcrit")==0) return SmbSemicRcritEnum;
+	      else if (strcmp(name,"SmbSemicWcrit")==0) return SmbSemicWcritEnum;
+	      else if (strcmp(name,"SmbSemicMcrit")==0) return SmbSemicMcritEnum;
+	      else if (strcmp(name,"SmbSemicAfac")==0) return SmbSemicAfacEnum;
+	      else if (strcmp(name,"SmbSemicTauA")==0) return SmbSemicTauAEnum;
+	      else if (strcmp(name,"SmbSemicTauF")==0) return SmbSemicTauFEnum;
+	      else if (strcmp(name,"SmbSemicTmin")==0) return SmbSemicTminEnum;
+	      else if (strcmp(name,"SmbSemicTmid")==0) return SmbSemicTmidEnum;
+         else stage=6;
+   }
+   if(stage==6){
+	      if (strcmp(name,"SmbSemicTmax")==0) return SmbSemicTmaxEnum;
 	      else if (strcmp(name,"SmbStepsPerStep")==0) return SmbStepsPerStepEnum;
 	      else if (strcmp(name,"SmbSwIdx")==0) return SmbSwIdxEnum;
+	      else if (strcmp(name,"SmbSWgrad")==0) return SmbSWgradEnum;
 	      else if (strcmp(name,"SmbT0dry")==0) return SmbT0dryEnum;
 	      else if (strcmp(name,"SmbT0wet")==0) return SmbT0wetEnum;
@@ -578,4 +644,5 @@
 	      else if (strcmp(name,"SmbTemperaturesReconstructedYears")==0) return SmbTemperaturesReconstructedYearsEnum;
 	      else if (strcmp(name,"SmbPrecipitationsReconstructedYears")==0) return SmbPrecipitationsReconstructedYearsEnum;
+	      else if (strcmp(name,"SmbWindspeedgrad")==0) return SmbWindspeedgradEnum;
 	      else if (strcmp(name,"SmoothThicknessMultiplier")==0) return SmoothThicknessMultiplierEnum;
 	      else if (strcmp(name,"SolutionType")==0) return SolutionTypeEnum;
@@ -629,8 +696,5 @@
 	      else if (strcmp(name,"TransientAmrFrequency")==0) return TransientAmrFrequencyEnum;
 	      else if (strcmp(name,"TransientIsage")==0) return TransientIsageEnum;
-         else stage=6;
-   }
-   if(stage==6){
-	      if (strcmp(name,"TransientIsdamageevolution")==0) return TransientIsdamageevolutionEnum;
+	      else if (strcmp(name,"TransientIsdamageevolution")==0) return TransientIsdamageevolutionEnum;
 	      else if (strcmp(name,"TransientIsdebris")==0) return TransientIsdebrisEnum;
 	      else if (strcmp(name,"TransientIsesa")==0) return TransientIsesaEnum;
@@ -683,8 +747,13 @@
 	      else if (strcmp(name,"BasalforcingsDeepwaterMeltingRateValuesMovingaverage")==0) return BasalforcingsDeepwaterMeltingRateValuesMovingaverageEnum;
 	      else if (strcmp(name,"BasalforcingsFloatingiceMeltingRate")==0) return BasalforcingsFloatingiceMeltingRateEnum;
+	      else if (strcmp(name,"BasalforcingsFloatingiceMeltingRateObs")==0) return BasalforcingsFloatingiceMeltingRateObsEnum;
 	      else if (strcmp(name,"BasalforcingsGeothermalflux")==0) return BasalforcingsGeothermalfluxEnum;
 	      else if (strcmp(name,"BasalforcingsGroundediceMeltingRate")==0) return BasalforcingsGroundediceMeltingRateEnum;
+	      else if (strcmp(name,"BasalforcingsGroundediceMeltingRateObs")==0) return BasalforcingsGroundediceMeltingRateObsEnum;
 	      else if (strcmp(name,"BasalforcingsLinearBasinId")==0) return BasalforcingsLinearBasinIdEnum;
-	      else if (strcmp(name,"BasalforcingsPerturbationMeltingRate")==0) return BasalforcingsPerturbationMeltingRateEnum;
+         else stage=7;
+   }
+   if(stage==7){
+	      if (strcmp(name,"BasalforcingsPerturbationMeltingRate")==0) return BasalforcingsPerturbationMeltingRateEnum;
 	      else if (strcmp(name,"BasalforcingsSpatialDeepwaterElevation")==0) return BasalforcingsSpatialDeepwaterElevationEnum;
 	      else if (strcmp(name,"BasalforcingsSpatialDeepwaterMeltingRate")==0) return BasalforcingsSpatialDeepwaterMeltingRateEnum;
@@ -726,4 +795,5 @@
 	      else if (strcmp(name,"BottomPressure")==0) return BottomPressureEnum;
 	      else if (strcmp(name,"BottomPressureOld")==0) return BottomPressureOldEnum;
+	      else if (strcmp(name,"CalvingBasinId")==0) return CalvingBasinIdEnum;
 	      else if (strcmp(name,"CalvingCalvingrate")==0) return CalvingCalvingrateEnum;
 	      else if (strcmp(name,"CalvingHabFraction")==0) return CalvingHabFractionEnum;
@@ -752,8 +822,5 @@
 	      else if (strcmp(name,"DeltaDsl")==0) return DeltaDslEnum;
 	      else if (strcmp(name,"DslOld")==0) return DslOldEnum;
-         else stage=7;
-   }
-   if(stage==7){
-	      if (strcmp(name,"Dsl")==0) return DslEnum;
+	      else if (strcmp(name,"Dsl")==0) return DslEnum;
 	      else if (strcmp(name,"DeltaStr")==0) return DeltaStrEnum;
 	      else if (strcmp(name,"StrOld")==0) return StrOldEnum;
@@ -799,4 +866,5 @@
 	      else if (strcmp(name,"EtaDiff")==0) return EtaDiffEnum;
 	      else if (strcmp(name,"FlowequationBorderFS")==0) return FlowequationBorderFSEnum;
+	      else if (strcmp(name,"FrictionAlpha2")==0) return FrictionAlpha2Enum;
 	      else if (strcmp(name,"FrictionAs")==0) return FrictionAsEnum;
 	      else if (strcmp(name,"FrictionC")==0) return FrictionCEnum;
@@ -804,17 +872,21 @@
 	      else if (strcmp(name,"FrictionCoefficient")==0) return FrictionCoefficientEnum;
 	      else if (strcmp(name,"FrictionCoefficientcoulomb")==0) return FrictionCoefficientcoulombEnum;
-	      else if (strcmp(name,"FrictionCoulombWaterPressure")==0) return FrictionCoulombWaterPressureEnum;
 	      else if (strcmp(name,"FrictionEffectivePressure")==0) return FrictionEffectivePressureEnum;
+	      else if (strcmp(name,"FrictionK")==0) return FrictionKEnum;
 	      else if (strcmp(name,"FrictionM")==0) return FrictionMEnum;
-	      else if (strcmp(name,"FrictionP")==0) return FrictionPEnum;
+         else stage=8;
+   }
+   if(stage==8){
+	      if (strcmp(name,"FrictionP")==0) return FrictionPEnum;
 	      else if (strcmp(name,"FrictionPressureAdjustedTemperature")==0) return FrictionPressureAdjustedTemperatureEnum;
 	      else if (strcmp(name,"FrictionQ")==0) return FrictionQEnum;
 	      else if (strcmp(name,"FrictionSedimentCompressibilityCoefficient")==0) return FrictionSedimentCompressibilityCoefficientEnum;
 	      else if (strcmp(name,"FrictionTillFrictionAngle")==0) return FrictionTillFrictionAngleEnum;
-	      else if (strcmp(name,"FrictionSchoofWaterPressure")==0) return FrictionSchoofWaterPressureEnum;
 	      else if (strcmp(name,"FrictionWaterLayer")==0) return FrictionWaterLayerEnum;
 	      else if (strcmp(name,"FrictionWaterPressure")==0) return FrictionWaterPressureEnum;
+	      else if (strcmp(name,"FrictionWaterPressureNoise")==0) return FrictionWaterPressureNoiseEnum;
 	      else if (strcmp(name,"Frictionf")==0) return FrictionfEnum;
 	      else if (strcmp(name,"FrontalForcingsBasinId")==0) return FrontalForcingsBasinIdEnum;
+	      else if (strcmp(name,"FrontalForcingsSubglacialDischargearma")==0) return FrontalForcingsSubglacialDischargearmaEnum;
 	      else if (strcmp(name,"FrontalForcingsSubglacialDischarge")==0) return FrontalForcingsSubglacialDischargeEnum;
 	      else if (strcmp(name,"GeometryHydrostaticRatio")==0) return GeometryHydrostaticRatioEnum;
@@ -828,6 +900,8 @@
 	      else if (strcmp(name,"HydraulicPotentialOld")==0) return HydraulicPotentialOldEnum;
 	      else if (strcmp(name,"HydrologyBasalFlux")==0) return HydrologyBasalFluxEnum;
+	      else if (strcmp(name,"HydrologyBasinsId")==0) return HydrologyBasinsIdEnum;
 	      else if (strcmp(name,"HydrologyBumpHeight")==0) return HydrologyBumpHeightEnum;
 	      else if (strcmp(name,"HydrologyBumpSpacing")==0) return HydrologyBumpSpacingEnum;
+	      else if (strcmp(name,"HydrologyChannelConductivity")==0) return HydrologyChannelConductivityEnum;
 	      else if (strcmp(name,"HydrologydcBasalMoulinInput")==0) return HydrologydcBasalMoulinInputEnum;
 	      else if (strcmp(name,"HydrologydcEplThickness")==0) return HydrologydcEplThicknessEnum;
@@ -862,4 +936,5 @@
 	      else if (strcmp(name,"HydrologyWaterVy")==0) return HydrologyWaterVyEnum;
 	      else if (strcmp(name,"HydrologyMaskNodeActivation")==0) return HydrologyMaskNodeActivationEnum;
+	      else if (strcmp(name,"DebrisMaskNodeActivation")==0) return DebrisMaskNodeActivationEnum;
 	      else if (strcmp(name,"Ice")==0) return IceEnum;
 	      else if (strcmp(name,"IceMaskNodeActivation")==0) return IceMaskNodeActivationEnum;
@@ -875,8 +950,5 @@
 	      else if (strcmp(name,"LevelsetObservation")==0) return LevelsetObservationEnum;
 	      else if (strcmp(name,"LoadingforceX")==0) return LoadingforceXEnum;
-         else stage=8;
-   }
-   if(stage==8){
-	      if (strcmp(name,"LoadingforceY")==0) return LoadingforceYEnum;
+	      else if (strcmp(name,"LoadingforceY")==0) return LoadingforceYEnum;
 	      else if (strcmp(name,"LoadingforceZ")==0) return LoadingforceZEnum;
 	      else if (strcmp(name,"MaskOceanLevelset")==0) return MaskOceanLevelsetEnum;
@@ -926,5 +998,8 @@
 	      else if (strcmp(name,"SamplingBeta")==0) return SamplingBetaEnum;
 	      else if (strcmp(name,"SamplingKappa")==0) return SamplingKappaEnum;
-	      else if (strcmp(name,"SamplingPhi")==0) return SamplingPhiEnum;
+         else stage=9;
+   }
+   if(stage==9){
+	      if (strcmp(name,"SamplingPhi")==0) return SamplingPhiEnum;
 	      else if (strcmp(name,"SamplingTau")==0) return SamplingTauEnum;
 	      else if (strcmp(name,"Sealevel")==0) return SealevelEnum;
@@ -998,8 +1073,5 @@
 	      else if (strcmp(name,"SigmaNN")==0) return SigmaNNEnum;
 	      else if (strcmp(name,"SigmaVM")==0) return SigmaVMEnum;
-         else stage=9;
-   }
-   if(stage==9){
-	      if (strcmp(name,"SmbAccumulatedEC")==0) return SmbAccumulatedECEnum;
+	      else if (strcmp(name,"SmbAccumulatedEC")==0) return SmbAccumulatedECEnum;
 	      else if (strcmp(name,"SmbAccumulatedMassBalance")==0) return SmbAccumulatedMassBalanceEnum;
 	      else if (strcmp(name,"SmbAccumulatedMelt")==0) return SmbAccumulatedMeltEnum;
@@ -1008,4 +1080,8 @@
 	      else if (strcmp(name,"SmbAccumulatedRefreeze")==0) return SmbAccumulatedRefreezeEnum;
 	      else if (strcmp(name,"SmbAccumulatedRunoff")==0) return SmbAccumulatedRunoffEnum;
+	      else if (strcmp(name,"SmbAlbedo")==0) return SmbAlbedoEnum;
+	      else if (strcmp(name,"SmbAlbedoInit")==0) return SmbAlbedoInitEnum;
+	      else if (strcmp(name,"SmbAlbedoSnow")==0) return SmbAlbedoSnowEnum;
+	      else if (strcmp(name,"SmbAlbedoSnowInit")==0) return SmbAlbedoSnowInitEnum;
 	      else if (strcmp(name,"SmbA")==0) return SmbAEnum;
 	      else if (strcmp(name,"SmbAdiff")==0) return SmbAdiffEnum;
@@ -1045,5 +1121,8 @@
 	      else if (strcmp(name,"SmbDzini")==0) return SmbDziniEnum;
 	      else if (strcmp(name,"SmbEAir")==0) return SmbEAirEnum;
-	      else if (strcmp(name,"SmbEC")==0) return SmbECEnum;
+         else stage=10;
+   }
+   if(stage==10){
+	      if (strcmp(name,"SmbEC")==0) return SmbECEnum;
 	      else if (strcmp(name,"SmbECDt")==0) return SmbECDtEnum;
 	      else if (strcmp(name,"SmbECini")==0) return SmbECiniEnum;
@@ -1055,10 +1134,18 @@
 	      else if (strcmp(name,"SmbGsp")==0) return SmbGspEnum;
 	      else if (strcmp(name,"SmbGspini")==0) return SmbGspiniEnum;
+	      else if (strcmp(name,"SmbHIce")==0) return SmbHIceEnum;
+	      else if (strcmp(name,"SmbHIceInit")==0) return SmbHIceInitEnum;
+	      else if (strcmp(name,"SmbHSnow")==0) return SmbHSnowEnum;
+	      else if (strcmp(name,"SmbHSnowInit")==0) return SmbHSnowInitEnum;
 	      else if (strcmp(name,"SmbHref")==0) return SmbHrefEnum;
 	      else if (strcmp(name,"SmbIsInitialized")==0) return SmbIsInitializedEnum;
 	      else if (strcmp(name,"SmbMAdd")==0) return SmbMAddEnum;
 	      else if (strcmp(name,"SmbMassBalance")==0) return SmbMassBalanceEnum;
+	      else if (strcmp(name,"SmbMassBalanceSnow")==0) return SmbMassBalanceSnowEnum;
+	      else if (strcmp(name,"SmbMassBalanceIce")==0) return SmbMassBalanceIceEnum;
+	      else if (strcmp(name,"SmbMassBalanceSemic")==0) return SmbMassBalanceSemicEnum;
 	      else if (strcmp(name,"SmbMassBalanceSubstep")==0) return SmbMassBalanceSubstepEnum;
 	      else if (strcmp(name,"SmbMassBalanceTransient")==0) return SmbMassBalanceTransientEnum;
+	      else if (strcmp(name,"SmbMask")==0) return SmbMaskEnum;
 	      else if (strcmp(name,"SmbMeanLHF")==0) return SmbMeanLHFEnum;
 	      else if (strcmp(name,"SmbMeanSHF")==0) return SmbMeanSHFEnum;
@@ -1066,4 +1153,8 @@
 	      else if (strcmp(name,"SmbMelt")==0) return SmbMeltEnum;
 	      else if (strcmp(name,"SmbMonthlytemperatures")==0) return SmbMonthlytemperaturesEnum;
+	      else if (strcmp(name,"SmbMonthlydsradiation")==0) return SmbMonthlydsradiationEnum;
+	      else if (strcmp(name,"SmbMonthlydlradiation")==0) return SmbMonthlydlradiationEnum;
+	      else if (strcmp(name,"SmbMonthlywindspeed")==0) return SmbMonthlywindspeedEnum;
+	      else if (strcmp(name,"SmbMonthlyairhumidity")==0) return SmbMonthlyairhumidityEnum;
 	      else if (strcmp(name,"SmbMSurf")==0) return SmbMSurfEnum;
 	      else if (strcmp(name,"SmbNetLW")==0) return SmbNetLWEnum;
@@ -1075,4 +1166,8 @@
 	      else if (strcmp(name,"SmbPrecipitation")==0) return SmbPrecipitationEnum;
 	      else if (strcmp(name,"SmbPrecipitationsAnomaly")==0) return SmbPrecipitationsAnomalyEnum;
+	      else if (strcmp(name,"SmbDsradiationAnomaly")==0) return SmbDsradiationAnomalyEnum;
+	      else if (strcmp(name,"SmbDlradiationAnomaly")==0) return SmbDlradiationAnomalyEnum;
+	      else if (strcmp(name,"SmbWindspeedAnomaly")==0) return SmbWindspeedAnomalyEnum;
+	      else if (strcmp(name,"SmbAirhumidityAnomaly")==0) return SmbAirhumidityAnomalyEnum;
 	      else if (strcmp(name,"SmbPrecipitationsLgm")==0) return SmbPrecipitationsLgmEnum;
 	      else if (strcmp(name,"SmbPrecipitationsPresentday")==0) return SmbPrecipitationsPresentdayEnum;
@@ -1088,10 +1183,16 @@
 	      else if (strcmp(name,"SmbS0p")==0) return SmbS0pEnum;
 	      else if (strcmp(name,"SmbS0t")==0) return SmbS0tEnum;
+	      else if (strcmp(name,"SmbSemicQmr")==0) return SmbSemicQmrEnum;
+	      else if (strcmp(name,"SmbSemicQmrInit")==0) return SmbSemicQmrInitEnum;
 	      else if (strcmp(name,"SmbSizeini")==0) return SmbSizeiniEnum;
 	      else if (strcmp(name,"SmbSmbCorr")==0) return SmbSmbCorrEnum;
 	      else if (strcmp(name,"SmbSmbref")==0) return SmbSmbrefEnum;
 	      else if (strcmp(name,"SmbSzaValue")==0) return SmbSzaValueEnum;
+	      else if (strcmp(name,"SmbSummerMelt")==0) return SmbSummerMeltEnum;
+	      else if (strcmp(name,"SmbSummerAlbedo")==0) return SmbSummerAlbedoEnum;
+	      else if (strcmp(name,"SmbSnowheight")==0) return SmbSnowheightEnum;
 	      else if (strcmp(name,"SmbT")==0) return SmbTEnum;
 	      else if (strcmp(name,"SmbTa")==0) return SmbTaEnum;
+	      else if (strcmp(name,"SmbTamp")==0) return SmbTampEnum;
 	      else if (strcmp(name,"SmbTeValue")==0) return SmbTeValueEnum;
 	      else if (strcmp(name,"SmbTemperaturesAnomaly")==0) return SmbTemperaturesAnomalyEnum;
@@ -1121,8 +1222,5 @@
 	      else if (strcmp(name,"StrainRateeffective")==0) return StrainRateeffectiveEnum;
 	      else if (strcmp(name,"StrainRateparallel")==0) return StrainRateparallelEnum;
-         else stage=10;
-   }
-   if(stage==10){
-	      if (strcmp(name,"StrainRateperpendicular")==0) return StrainRateperpendicularEnum;
+	      else if (strcmp(name,"StrainRateperpendicular")==0) return StrainRateperpendicularEnum;
 	      else if (strcmp(name,"StrainRatexx")==0) return StrainRatexxEnum;
 	      else if (strcmp(name,"StrainRatexy")==0) return StrainRatexyEnum;
@@ -1138,4 +1236,7 @@
 	      else if (strcmp(name,"StressTensoryz")==0) return StressTensoryzEnum;
 	      else if (strcmp(name,"StressTensorzz")==0) return StressTensorzzEnum;
+	      else if (strcmp(name,"SubglacialdischargeARMANoise")==0) return SubglacialdischargeARMANoiseEnum;
+	      else if (strcmp(name,"SubglacialdischargeValuesAutoregression")==0) return SubglacialdischargeValuesAutoregressionEnum;
+	      else if (strcmp(name,"SubglacialdischargeValuesMovingaverage")==0) return SubglacialdischargeValuesMovingaverageEnum;
 	      else if (strcmp(name,"SurfaceAbsMisfit")==0) return SurfaceAbsMisfitEnum;
 	      else if (strcmp(name,"SurfaceAbsVelMisfit")==0) return SurfaceAbsVelMisfitEnum;
@@ -1143,5 +1244,8 @@
 	      else if (strcmp(name,"SealevelArea")==0) return SealevelAreaEnum;
 	      else if (strcmp(name,"SurfaceArea")==0) return SurfaceAreaEnum;
-	      else if (strcmp(name,"SurfaceAverageVelMisfit")==0) return SurfaceAverageVelMisfitEnum;
+         else stage=11;
+   }
+   if(stage==11){
+	      if (strcmp(name,"SurfaceAverageVelMisfit")==0) return SurfaceAverageVelMisfitEnum;
 	      else if (strcmp(name,"SurfaceCrevasse")==0) return SurfaceCrevasseEnum;
 	      else if (strcmp(name,"Surface")==0) return SurfaceEnum;
@@ -1199,5 +1303,11 @@
 	      else if (strcmp(name,"Waterfraction")==0) return WaterfractionEnum;
 	      else if (strcmp(name,"Waterheight")==0) return WaterheightEnum;
+	      else if (strcmp(name,"WaterPressureArmaPerturbation")==0) return WaterPressureArmaPerturbationEnum;
+	      else if (strcmp(name,"WaterPressureValuesAutoregression")==0) return WaterPressureValuesAutoregressionEnum;
+	      else if (strcmp(name,"WaterPressureValuesMovingaverage")==0) return WaterPressureValuesMovingaverageEnum;
 	      else if (strcmp(name,"WeightsLevelsetObservation")==0) return WeightsLevelsetObservationEnum;
+	      else if (strcmp(name,"WeightsMeltObservation")==0) return WeightsMeltObservationEnum;
+	      else if (strcmp(name,"WeightsVxObservation")==0) return WeightsVxObservationEnum;
+	      else if (strcmp(name,"WeightsVyObservation")==0) return WeightsVyObservationEnum;
 	      else if (strcmp(name,"WeightsSurfaceObservation")==0) return WeightsSurfaceObservationEnum;
 	      else if (strcmp(name,"OldAccumulatedDeltaBottomPressure")==0) return OldAccumulatedDeltaBottomPressureEnum;
@@ -1244,8 +1354,5 @@
 	      else if (strcmp(name,"Outputdefinition45")==0) return Outputdefinition45Enum;
 	      else if (strcmp(name,"Outputdefinition46")==0) return Outputdefinition46Enum;
-         else stage=11;
-   }
-   if(stage==11){
-	      if (strcmp(name,"Outputdefinition47")==0) return Outputdefinition47Enum;
+	      else if (strcmp(name,"Outputdefinition47")==0) return Outputdefinition47Enum;
 	      else if (strcmp(name,"Outputdefinition48")==0) return Outputdefinition48Enum;
 	      else if (strcmp(name,"Outputdefinition49")==0) return Outputdefinition49Enum;
@@ -1260,5 +1367,8 @@
 	      else if (strcmp(name,"Outputdefinition57")==0) return Outputdefinition57Enum;
 	      else if (strcmp(name,"Outputdefinition58")==0) return Outputdefinition58Enum;
-	      else if (strcmp(name,"Outputdefinition59")==0) return Outputdefinition59Enum;
+         else stage=12;
+   }
+   if(stage==12){
+	      if (strcmp(name,"Outputdefinition59")==0) return Outputdefinition59Enum;
 	      else if (strcmp(name,"Outputdefinition5")==0) return Outputdefinition5Enum;
 	      else if (strcmp(name,"Outputdefinition60")==0) return Outputdefinition60Enum;
@@ -1345,9 +1455,15 @@
 	      else if (strcmp(name,"CalvingTest")==0) return CalvingTestEnum;
 	      else if (strcmp(name,"CalvingParameterization")==0) return CalvingParameterizationEnum;
+	      else if (strcmp(name,"CalvingCalvingMIP")==0) return CalvingCalvingMIPEnum;
 	      else if (strcmp(name,"CalvingVonmises")==0) return CalvingVonmisesEnum;
+	      else if (strcmp(name,"CalvingVonmisesAD")==0) return CalvingVonmisesADEnum;
 	      else if (strcmp(name,"CalvingPollard")==0) return CalvingPollardEnum;
 	      else if (strcmp(name,"Cfdragcoeffabsgrad")==0) return CfdragcoeffabsgradEnum;
+	      else if (strcmp(name,"Cfdragcoeffabsgradtransient")==0) return CfdragcoeffabsgradtransientEnum;
+	      else if (strcmp(name,"Cfrheologybbarabsgrad")==0) return CfrheologybbarabsgradEnum;
+	      else if (strcmp(name,"Cfrheologybbarabsgradtransient")==0) return CfrheologybbarabsgradtransientEnum;
 	      else if (strcmp(name,"Cfsurfacelogvel")==0) return CfsurfacelogvelEnum;
 	      else if (strcmp(name,"Cfsurfacesquare")==0) return CfsurfacesquareEnum;
+	      else if (strcmp(name,"Cfsurfacesquaretransient")==0) return CfsurfacesquaretransientEnum;
 	      else if (strcmp(name,"Cflevelsetmisfit")==0) return CflevelsetmisfitEnum;
 	      else if (strcmp(name,"Channel")==0) return ChannelEnum;
@@ -1366,9 +1482,7 @@
 	      else if (strcmp(name,"ControlInputMins")==0) return ControlInputMinsEnum;
 	      else if (strcmp(name,"ControlInputValues")==0) return ControlInputValuesEnum;
+	      else if (strcmp(name,"ControlParam")==0) return ControlParamEnum;
 	      else if (strcmp(name,"CrouzeixRaviart")==0) return CrouzeixRaviartEnum;
-         else stage=12;
-   }
-   if(stage==12){
-	      if (strcmp(name,"Cuffey")==0) return CuffeyEnum;
+	      else if (strcmp(name,"Cuffey")==0) return CuffeyEnum;
 	      else if (strcmp(name,"CuffeyTemperate")==0) return CuffeyTemperateEnum;
 	      else if (strcmp(name,"DamageEvolutionAnalysis")==0) return DamageEvolutionAnalysisEnum;
@@ -1376,5 +1490,8 @@
 	      else if (strcmp(name,"DataSet")==0) return DataSetEnum;
 	      else if (strcmp(name,"DataSetParam")==0) return DataSetParamEnum;
-	      else if (strcmp(name,"DatasetInput")==0) return DatasetInputEnum;
+         else stage=13;
+   }
+   if(stage==13){
+	      if (strcmp(name,"DatasetInput")==0) return DatasetInputEnum;
 	      else if (strcmp(name,"DebrisAnalysis")==0) return DebrisAnalysisEnum;
 	      else if (strcmp(name,"DebrisSolution")==0) return DebrisSolutionEnum;
@@ -1446,4 +1563,6 @@
 	      else if (strcmp(name,"HOFSApproximation")==0) return HOFSApproximationEnum;
 	      else if (strcmp(name,"Hook")==0) return HookEnum;
+	      else if (strcmp(name,"HydrologyArmapwAnalysis")==0) return HydrologyArmapwAnalysisEnum;
+	      else if (strcmp(name,"Hydrologyarmapw")==0) return HydrologyarmapwEnum;
 	      else if (strcmp(name,"HydrologyDCEfficientAnalysis")==0) return HydrologyDCEfficientAnalysisEnum;
 	      else if (strcmp(name,"HydrologyDCInefficientAnalysis")==0) return HydrologyDCInefficientAnalysisEnum;
@@ -1490,12 +1609,12 @@
 	      else if (strcmp(name,"LevelsetAnalysis")==0) return LevelsetAnalysisEnum;
 	      else if (strcmp(name,"LevelsetfunctionPicard")==0) return LevelsetfunctionPicardEnum;
-         else stage=13;
-   }
-   if(stage==13){
-	      if (strcmp(name,"LinearFloatingMeltRate")==0) return LinearFloatingMeltRateEnum;
+	      else if (strcmp(name,"LinearFloatingMeltRate")==0) return LinearFloatingMeltRateEnum;
 	      else if (strcmp(name,"LinearFloatingMeltRatearma")==0) return LinearFloatingMeltRatearmaEnum;
 	      else if (strcmp(name,"LliboutryDuval")==0) return LliboutryDuvalEnum;
 	      else if (strcmp(name,"Loads")==0) return LoadsEnum;
-	      else if (strcmp(name,"LoveAnalysis")==0) return LoveAnalysisEnum;
+         else stage=14;
+   }
+   if(stage==14){
+	      if (strcmp(name,"LoveAnalysis")==0) return LoveAnalysisEnum;
 	      else if (strcmp(name,"LoveHf")==0) return LoveHfEnum;
 	      else if (strcmp(name,"LoveHt")==0) return LoveHtEnum;
@@ -1606,5 +1725,5 @@
 	      else if (strcmp(name,"SMBarma")==0) return SMBarmaEnum;
 	      else if (strcmp(name,"SMBcomponents")==0) return SMBcomponentsEnum;
-	      else if (strcmp(name,"SMBdebrisML")==0) return SMBdebrisMLEnum;
+	      else if (strcmp(name,"SMBdebrisEvatt")==0) return SMBdebrisEvattEnum;
 	      else if (strcmp(name,"SMBd18opdd")==0) return SMBd18opddEnum;
 	      else if (strcmp(name,"SMBforcing")==0) return SMBforcingEnum;
@@ -1613,12 +1732,12 @@
 	      else if (strcmp(name,"SMBgradients")==0) return SMBgradientsEnum;
 	      else if (strcmp(name,"SMBgradientscomponents")==0) return SMBgradientscomponentsEnum;
-         else stage=14;
-   }
-   if(stage==14){
-	      if (strcmp(name,"SMBgradientsela")==0) return SMBgradientselaEnum;
+	      else if (strcmp(name,"SMBgradientsela")==0) return SMBgradientselaEnum;
 	      else if (strcmp(name,"SMBhenning")==0) return SMBhenningEnum;
 	      else if (strcmp(name,"SMBmeltcomponents")==0) return SMBmeltcomponentsEnum;
 	      else if (strcmp(name,"SMBpdd")==0) return SMBpddEnum;
-	      else if (strcmp(name,"SMBpddSicopolis")==0) return SMBpddSicopolisEnum;
+         else stage=15;
+   }
+   if(stage==15){
+	      if (strcmp(name,"SMBpddSicopolis")==0) return SMBpddSicopolisEnum;
 	      else if (strcmp(name,"SMBsemic")==0) return SMBsemicEnum;
 	      else if (strcmp(name,"SSAApproximation")==0) return SSAApproximationEnum;
@@ -1681,4 +1800,6 @@
 	      else if (strcmp(name,"TotalSmb")==0) return TotalSmbEnum;
 	      else if (strcmp(name,"TotalSmbScaled")==0) return TotalSmbScaledEnum;
+	      else if (strcmp(name,"TotalSmbRefreeze")==0) return TotalSmbRefreezeEnum;
+	      else if (strcmp(name,"TotalSmbMelt")==0) return TotalSmbMeltEnum;
 	      else if (strcmp(name,"TransientArrayParam")==0) return TransientArrayParamEnum;
 	      else if (strcmp(name,"TransientInput")==0) return TransientInputEnum;
@@ -1717,5 +1838,5 @@
 	      else if (strcmp(name,"TriangleInterp")==0) return TriangleInterpEnum;
 	      else if (strcmp(name,"MaximumNumberOfDefinitions")==0) return MaximumNumberOfDefinitionsEnum;
-         else stage=15;
+         else stage=16;
    }
 	/*If we reach this point, the string provided has not been found*/
Index: /issm/trunk/src/c/shared/Enum/Synchronize.sh
===================================================================
--- /issm/trunk/src/c/shared/Enum/Synchronize.sh	(revision 28012)
+++ /issm/trunk/src/c/shared/Enum/Synchronize.sh	(revision 28013)
@@ -213,4 +213,12 @@
 cat <<END >> $ISSM_DIR/src/c/shared/Enum/issmenums.jl
 end
+
+function StringToEnum(name::String)
+END
+cat temp |  awk '{print "\tif(name==\"" substr($1,1,length($1)-4) "\") return " $1 "  end"}' >> $ISSM_DIR/src/c/shared/Enum/issmenums.jl
+
+cat <<END >> $ISSM_DIR/src/c/shared/Enum/issmenums.jl
+	error("Enum ", name, " not found");
+end
 END
 #}}}
Index: /issm/trunk/src/c/shared/Exceptions/exceptions.h
===================================================================
--- /issm/trunk/src/c/shared/Exceptions/exceptions.h	(revision 28012)
+++ /issm/trunk/src/c/shared/Exceptions/exceptions.h	(revision 28013)
@@ -34,5 +34,5 @@
 #ifdef _ISSM_DEBUG_ 
 #define _assert_(statement)\
-  if (!(statement)) _error_("Assertion \""<<#statement<<"\" failed, please report bug to "<<PACKAGE_BUGREPORT)
+  if (!(statement)) _error_("Assertion \""<<#statement<<"\" failed, please report bug at "<<PACKAGE_BUGREPORT)
 #else
 #define _assert_(ignore)\
Index: /issm/trunk/src/c/shared/Exp/exp.h
===================================================================
--- /issm/trunk/src/c/shared/Exp/exp.h	(revision 28012)
+++ /issm/trunk/src/c/shared/Exp/exp.h	(revision 28013)
@@ -115,5 +115,5 @@
 	closed=xNew<bool>(nprof);
 
-	/*Reaset file pointer to beginning of file: */
+	/*Reset file pointer to beginning of file: */
 	fseek(fid,0,SEEK_SET);
 
Index: /issm/trunk/src/c/shared/MemOps/MemOps.h
===================================================================
--- /issm/trunk/src/c/shared/MemOps/MemOps.h	(revision 28012)
+++ /issm/trunk/src/c/shared/MemOps/MemOps.h	(revision 28013)
@@ -20,9 +20,5 @@
 
 /* AD (mostly ADOLC) is sensitive to calls to ensurecontiguous. These changes limit its use.*/
-#ifdef _HAVE_AD_
 template <class T> T* xNew(unsigned int size, const char* const contig = &DEFCONTIG){
-#else
-template <class T> T* xNew(unsigned int size){
-#endif
 #ifdef USE_CXX_MEMORY_MANAGMENT_FOR_NON_POD_TYPES
   T* aT_p=new T[size];
@@ -58,9 +54,5 @@
 }/*}}}*/
 // AD (mostly ADOLC) is sensitive to calls to ensurecontiguous. These changes limit its use.
-#ifdef _HAVE_AD_
 template <class T> T* xNewZeroInit(unsigned int size,const char* const contig = &DEFCONTIG){
-#else
-template <class T> T* xNewZeroInit(unsigned int size){
-#endif
 #ifdef USE_CXX_MEMORY_MANAGMENT_FOR_NON_POD_TYPES
 #ifdef _HAVE_AD_
Index: /issm/trunk/src/c/shared/Numerics/types.h
===================================================================
--- /issm/trunk/src/c/shared/Numerics/types.h	(revision 28012)
+++ /issm/trunk/src/c/shared/Numerics/types.h	(revision 28013)
@@ -34,4 +34,5 @@
 /*CoDiPack typedefs*/
 #include <codi.hpp>
+//typedef codi::RealReverseIndex          IssmDouble;
 typedef codi::RealReverse               IssmDouble;
 typedef std::complex<codi::RealReverse> IssmComplex;
Index: /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp
===================================================================
--- /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp	(revision 28012)
+++ /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp	(revision 28013)
@@ -26,4 +26,9 @@
 		const char* field = "md.materials.rheology_B";
 		input_enum        = MaterialsRheologyBEnum;
+		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
+	}
+	else if(strcmp(string_in,"MaterialsRheologyN")==0){
+		const char* field = "md.materials.rheology_n";
+		input_enum        = MaterialsRheologyNEnum;
 		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
 	}
@@ -163,4 +168,9 @@
 		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
 	}
+	else if(strcmp(string_in,"CalvingADStressThresholdGroundedice")==0){
+		const char* field = "md.calving.stress_threshold_groundedice";
+		input_enum        = CalvingADStressThresholdGroundediceEnum;
+		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
+	}
 	else if(strcmp(string_in,"DamageDbar")==0){
 		const char* field = "md.damage.D";
@@ -216,4 +226,14 @@
 		const char* field = "md.basalforcings.meltrate_factor";
 		input_enum        = BasalforcingsMeltrateFactorEnum;
+		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
+	}
+	else if(strcmp(string_in,"BasalforcingsSpatialDeepwaterMeltingRate")==0){
+		const char* field = "md.basalforcings.deepwater_melting_rate";
+		input_enum        = BasalforcingsSpatialDeepwaterMeltingRateEnum;
+		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
+	}
+	else if(strcmp(string_in,"BasalforcingsDeepwaterMeltingRate")==0){
+		const char* field = "md.basalforcings.deepwater_melting_rate";
+		input_enum        = BasalforcingsDeepwaterMeltingRateEnum;
 		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
 	}
@@ -242,5 +262,5 @@
 		case 12: return SMBsemicEnum;	 
 		case 13: return SMBarmaEnum;
-		case 14: return SMBdebrisMLEnum;
+		case 14: return SMBdebrisEvattEnum;
 		default: _error_("Marshalled SMB code \""<<enum_in<<"\" not supported yet");
 	}
@@ -272,4 +292,6 @@
 		case 9:  return CalvingParameterizationEnum;
 		case 10: return CalvingPollardEnum;
+		case 11: return CalvingVonmisesADEnum;
+		case 12:  return CalvingCalvingMIPEnum;
 		default: _error_("Marshalled Calving law code \""<<enum_in<<"\" not supported yet");
 	}
@@ -291,4 +313,5 @@
 		case 5: return HydrologyGlaDSEnum;
 		case 6: return HydrologyTwsEnum;
+		case 7: return HydrologyarmapwEnum;
 		default: _error_("Marshalled hydrology code \""<<enum_in<<"\" not supported yet");
 	}
Index: /issm/trunk/src/c/solutionsequences/solutionsequence_hydro_nonlinear.cpp
===================================================================
--- /issm/trunk/src/c/solutionsequences/solutionsequence_hydro_nonlinear.cpp	(revision 28012)
+++ /issm/trunk/src/c/solutionsequences/solutionsequence_hydro_nonlinear.cpp	(revision 28013)
@@ -65,4 +65,5 @@
 	GetBasalSolutionFromInputsx(&ug_sed,femmodel);
 	/*Initialize the IDS element mask to exclude frozen nodes*/
+	inefanalysis = new HydrologyDCInefficientAnalysis();
 	inefanalysis->ElementizeIdsMask(femmodel);
 
@@ -72,5 +73,4 @@
 
 	if(isefficientlayer) {
-		inefanalysis = new HydrologyDCInefficientAnalysis();
 		effanalysis = new HydrologyDCEfficientAnalysis();
 		femmodel->SetCurrentConfiguration(HydrologyDCEfficientAnalysisEnum);
@@ -303,5 +303,5 @@
 				_error_("   maximum number for hydrological global iterations (" << hydro_maxiter << ") exceeded");
 				delete ug_sed;delete uf_sed;delete effanalysis;
-				delete ug_epl;	delete uf_epl;	delete inefanalysis;
+				delete ug_epl;	delete uf_epl;
 			}
 		}
Index: /issm/trunk/src/c/solutionsequences/solutionsequence_newton.cpp
===================================================================
--- /issm/trunk/src/c/solutionsequences/solutionsequence_newton.cpp	(revision 28012)
+++ /issm/trunk/src/c/solutionsequences/solutionsequence_newton.cpp	(revision 28013)
@@ -40,5 +40,5 @@
 	femmodel->UpdateConstraintsx();
 
-	count=1;
+	count=0;
 	converged=false;
 
@@ -57,5 +57,5 @@
 
 		/*Solver forward model*/
-		if(count==1 || newton==2){
+		if(count==0 || newton==2){
 			SystemMatricesx(&Kff,&Kfs,&pf,&df,NULL,femmodel);
 			CreateNodalConstraintsx(&ys,femmodel->nodes);
@@ -87,20 +87,18 @@
 		Mergesolutionfromftogx(&ug,uf,ys,femmodel->nodes,femmodel->parameters);delete ys;
 		InputUpdateFromSolutionx(femmodel,ug);
+		count++;
 
 		/*Check convergence*/
 		convergence(&converged,Kff,pf,uf,old_uf,eps_res,eps_rel,eps_abs); 
 		delete Kff; delete pf;
-		if(converged==true){	
-			break;
-		}
+		if(converged==true) break;
 		if(count>=max_nonlinear_iterations){
 			_printf0_("   maximum number of Newton iterations (" << max_nonlinear_iterations << ") exceeded\n"); 
 			break;
 		}
-
-		count++;
 	}
 
-	if(VerboseConvergence()) _printf0_("\n   total number of iterations: " << count-1 << "\n");
+	if(VerboseConvergence()) _printf0_("\n   total number of iterations: " << count << "\n");
+	femmodel->results->AddResult(new GenericExternalResult<int>(femmodel->results->Size()+1,StressbalanceConvergenceNumStepsEnum,count));
 
 	/*clean-up*/
Index: /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp
===================================================================
--- /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp	(revision 28012)
+++ /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp	(revision 28013)
@@ -58,5 +58,5 @@
 	PetscOptionsGetInt(PETSC_NULL,"-schur_pc",&precond,NULL);
 	PetscOptionsGetInt(PETSC_NULL,"-max_iter",&maxiter,NULL);
-	#else
+	#elif PETSC_VERSION_LT(3,19,0)
 	PetscOptionsGetString(NULL,PETSC_NULL,"-ksp_type",ksp_type,49,&flg);
 	PetscOptionsGetString(NULL,PETSC_NULL,"-pc_type",pc_type,49,&flg);
@@ -65,4 +65,11 @@
 	PetscOptionsGetInt(NULL,PETSC_NULL,"-schur_pc",&precond,NULL);
 	PetscOptionsGetInt(NULL,PETSC_NULL,"-max_iter",&maxiter,NULL);
+	#else
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-ksp_type",ksp_type,49,&flg);
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-pc_type",pc_type,49,&flg);
+	PetscOptionsGetReal(NULL,PETSC_NULLPTR,"-tol",&TOL,NULL);
+	PetscOptionsGetReal(NULL,PETSC_NULLPTR,"-elltol",&ELLTOL,NULL);
+	PetscOptionsGetInt(NULL,PETSC_NULLPTR,"-schur_pc",&precond,NULL);
+	PetscOptionsGetInt(NULL,PETSC_NULLPTR,"-max_iter",&maxiter,NULL);
 	#endif
 
@@ -689,6 +696,8 @@
 		#if PETSC_VERSION_LT(3,7,0)
 		PetscOptionsGetInt(PETSC_NULL,"-schur_pc",&precond,NULL);
+		#elif PETSC_VERSION_LT(3,19,0)
+		PetscOptionsGetInt(NULL,PETSC_NULL,"-schur_pc",&precond,NULL);
 		#else
-		PetscOptionsGetInt(NULL,PETSC_NULL,"-schur_pc",&precond,NULL);
+		PetscOptionsGetInt(NULL,PETSC_NULLPTR,"-schur_pc",&precond,NULL);
 		#endif
 
Index: /issm/trunk/src/c/toolkits/ToolkitOptions.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/ToolkitOptions.cpp	(revision 28012)
+++ /issm/trunk/src/c/toolkits/ToolkitOptions.cpp	(revision 28013)
@@ -31,4 +31,10 @@
 	toolkittype    = NULL;
 	toolkitoptions = NULL;
+}/*}}}*/
+void  ToolkitOptions::Delete(){ /*{{{*/
+
+	xDelete<char>(toolkitoptions);
+	xDelete<char>(toolkittype);
+
 }/*}}}*/
 char* ToolkitOptions::GetToolkitType(){  /*{{{*/
Index: /issm/trunk/src/c/toolkits/ToolkitOptions.h
===================================================================
--- /issm/trunk/src/c/toolkits/ToolkitOptions.h	(revision 28012)
+++ /issm/trunk/src/c/toolkits/ToolkitOptions.h	(revision 28013)
@@ -17,4 +17,5 @@
 		static void  Init(const char* type_in,const char* options);
 		static void  Init(void);
+		static void  Delete(void);
 		static char* GetToolkitType(void);
 		static char* GetToolkitOptionValue(const char* option);
Index: /issm/trunk/src/c/toolkits/mpi/issmmpi.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/mpi/issmmpi.cpp	(revision 28012)
+++ /issm/trunk/src/c/toolkits/mpi/issmmpi.cpp	(revision 28013)
@@ -509,4 +509,46 @@
   return rc;
 }/*}}}*/
+int ISSM_MPI_Isend(void *buf, int count, ISSM_MPI_Datatype datatype, int dest, int tag, ISSM_MPI_Comm comm, ISSM_MPI_Request* req){ /*{{{*/
+
+  int rc=0;
+#ifdef _HAVE_MPI_
+#if defined(_HAVE_AMPI_) &&  !defined(_WRAPPERS_)
+  rc=AMPI_Isend(buf,
+	       count,
+	       datatype,
+	       dest,
+	       tag,
+			 #if !defined(_HAVE_ADJOINTMPI_) && !defined(_HAVE_MEDIPACK_)
+	       AMPI_TO_RECV, // as long as there are no other variants
+			 #endif
+	       comm,
+		   req);
+# else
+  rc=MPI_Isend(buf,
+	      count,
+	      datatype,
+	      dest,
+	      tag,
+	      comm,
+	      req);
+# endif
+#else
+// nothing to be done here
+#endif
+  return rc;
+}/*}}}*/
+int ISSM_MPI_Wait(ISSM_MPI_Request *req, ISSM_MPI_Status *status){/*{{{*/
+	int rc=0;
+#ifdef _HAVE_MPI_
+#if defined(_HAVE_AMPI_) &&  !defined(_WRAPPERS_)
+	rc=AMPI_Wait(req, status);
+# else
+	rc=MPI_Wait(req, status);
+# endif
+#else
+// nothing to be done here
+#endif
+	return rc;
+}/*}}}*/
 double ISSM_MPI_Wtime(void){/*{{{*/
 
Index: /issm/trunk/src/c/toolkits/mpi/issmmpi.h
===================================================================
--- /issm/trunk/src/c/toolkits/mpi/issmmpi.h	(revision 28012)
+++ /issm/trunk/src/c/toolkits/mpi/issmmpi.h	(revision 28013)
@@ -68,13 +68,19 @@
 	// types
 	#if defined(_HAVE_MEDIPACK_) && !defined(_WRAPPERS_)
-	typedef AMPI_Comm             ISSM_MPI_Comm;
-	typedef AMPI_Datatype         ISSM_MPI_Datatype;
-	typedef AMPI_Op               ISSM_MPI_Op;
-	typedef AMPI_Status           ISSM_MPI_Status;
-	#else
-	typedef MPI_Comm             ISSM_MPI_Comm;
-	typedef MPI_Datatype         ISSM_MPI_Datatype;
-	typedef MPI_Op               ISSM_MPI_Op;
-	typedef MPI_Status           ISSM_MPI_Status;
+	typedef AMPI_Comm     ISSM_MPI_Comm;
+	typedef AMPI_Datatype ISSM_MPI_Datatype;
+	typedef AMPI_Op       ISSM_MPI_Op;
+	typedef AMPI_Status   ISSM_MPI_Status;
+	typedef AMPI_Request  ISSM_MPI_Request;
+	#else
+	typedef MPI_Comm      ISSM_MPI_Comm;
+	typedef MPI_Datatype  ISSM_MPI_Datatype;
+	typedef MPI_Op        ISSM_MPI_Op;
+	typedef MPI_Status    ISSM_MPI_Status;
+	#if defined(_HAVE_AMPI_) && !defined(_WRAPPERS_)
+	typedef AMPI_Request   ISSM_MPI_Request;
+	#else
+	typedef MPI_Request  ISSM_MPI_Request;
+	#endif
 	#endif
 
@@ -97,4 +103,5 @@
 	#define ISSM_MPI_ANY_TAG       AMPI_ANY_TAG
 	#define ISSM_MPI_ANY_SOURCE    AMPI_ANY_SOURCE
+	#define ISSM_MPI_REQUEST_NULL  AMPI_Request()
 
 	#else
@@ -120,4 +127,9 @@
 		#define ISSM_MPI_ANY_TAG       MPI_ANY_TAG
 		#define ISSM_MPI_ANY_SOURCE    MPI_ANY_SOURCE
+		#if defined(_HAVE_AMPI_) && !defined(_WRAPPERS_)
+			#define ISSM_MPI_REQUEST_NULL  AMPI_Request()
+		#else
+			#define ISSM_MPI_REQUEST_NULL  0
+		#endif
 	#endif
 
@@ -128,8 +140,9 @@
 	/*Our ISSM MPI defines: {{{*/
 	// types
-	typedef int                  ISSM_MPI_Comm;
-	typedef int                  ISSM_MPI_Datatype;
-	typedef int                  ISSM_MPI_Op;
-	typedef int                  ISSM_MPI_Status;
+	typedef int  ISSM_MPI_Comm;
+	typedef int  ISSM_MPI_Datatype;
+	typedef int  ISSM_MPI_Op;
+	typedef int  ISSM_MPI_Status;
+	typedef int  ISSM_MPI_Request;
 
 	// data types
@@ -152,4 +165,5 @@
 	#define ISSM_MPI_ANY_TAG       2
 	#define ISSM_MPI_ANY_SOURCE    3
+	#define ISSM_MPI_REQUEST_NULL  0
 	/*}}}*/
 #endif
@@ -208,4 +222,6 @@
 int ISSM_MPI_Scatterv(void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm);
 int ISSM_MPI_Send(void *buf, int count, ISSM_MPI_Datatype datatype, int dest, int tag, ISSM_MPI_Comm comm);
+int ISSM_MPI_Isend(void* buf, int count, ISSM_MPI_Datatype datatype, int dest, int tag, ISSM_MPI_Comm comm, ISSM_MPI_Request* req);
+int ISSM_MPI_Wait(ISSM_MPI_Request* req, ISSM_MPI_Status* status);
 double ISSM_MPI_Wtime(void);
 int ISSM_MPI_Comm_split(ISSM_MPI_Comm comm, int color, int key, ISSM_MPI_Comm *newcomm);
Index: /issm/trunk/src/c/toolkits/petsc/objects/PetscSolver.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/petsc/objects/PetscSolver.cpp	(revision 28012)
+++ /issm/trunk/src/c/toolkits/petsc/objects/PetscSolver.cpp	(revision 28013)
@@ -80,6 +80,8 @@
 	#if PETSC_VERSION_LT(3,7,0)
 	PetscOptionsGetString(PETSC_NULL,"-ksp_type",ksp_type,49,&flg);
-	#else
+	#elif PETSC_VERSION_LT(3,19,0)
 	PetscOptionsGetString(NULL,PETSC_NULL,"-ksp_type",ksp_type,49,&flg);
+	#else
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-ksp_type",ksp_type,49,&flg);
 	#endif
 	if(flg!=PETSC_TRUE) _error_("could not find option -ksp_type, maybe you are not using the right toolkit?");
@@ -132,11 +134,16 @@
 		/*Set field splits: */
 		KSPGetPC(ksp,&pc);
-		#if (PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR == 1)
+
+		#if PETSC_VERSION_LT(3,1,0)
 		PCFieldSplitSetIS(pc,isv);
 		PCFieldSplitSetIS(pc,isp);
-		#else
+		#elif PETSC_VERSION_LT(3,19,0)
 		PCFieldSplitSetIS(pc,PETSC_NULL,isv);
 		PCFieldSplitSetIS(pc,PETSC_NULL,isp);
-		#endif
+		#else
+		PCFieldSplitSetIS(pc,PETSC_NULLPTR,isv);
+		PCFieldSplitSetIS(pc,PETSC_NULLPTR,isp);
+		#endif
+
 
 	}
Index: /issm/trunk/src/c/toolkits/petsc/patches/PetscOptionsDetermineSolverType.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/petsc/patches/PetscOptionsDetermineSolverType.cpp	(revision 28012)
+++ /issm/trunk/src/c/toolkits/petsc/patches/PetscOptionsDetermineSolverType.cpp	(revision 28013)
@@ -29,8 +29,10 @@
 
 	/*retrieve mat_type option: */
-	#if PETSC_VERSION_GE(3,7,0)
+	#if PETSC_VERSION_LT(3,7,0)
+	PetscOptionsGetString(PETSC_NULL,"-mat_type",&option[0],100,&flag);
+	#elif PETSC_VERSION_LT(3,19,0)
 	PetscOptionsGetString(NULL,PETSC_NULL,"-mat_type",&option[0],100,&flag);
-	#else
-	PetscOptionsGetString(PETSC_NULL,"-mat_type",&option[0],100,&flag);
+	#else /*newest version*/
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-mat_type",&option[0],100,&flag);
 	#endif
 
@@ -54,23 +56,22 @@
 	}
 
-	#if PETSC_VERSION_MAJOR >= 3
-		#if PETSC_VERSION_MINOR >= 7
-		PetscOptionsGetString(NULL,PETSC_NULL,"-pc_factor_mat_solver_package",&option[0],100,&flag);
-		#else
-		PetscOptionsGetString(PETSC_NULL,"-pc_factor_mat_solver_package",&option[0],100,&flag);
-		#endif
-	if (strcmp(option,"mumps")==0){
-		solver_type=MUMPSPACKAGE_LU;
-	}
-	#endif
+	#if PETSC_VERSION_LT(3,7,0)
+	PetscOptionsGetString(PETSC_NULL,"-pc_factor_mat_solver_package",&option[0],100,&flag);
+   #elif PETSC_VERSION_LT(3,19,0)
+	PetscOptionsGetString(NULL,PETSC_NULL,"-pc_factor_mat_solver_package",&option[0],100,&flag);
+   #else
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-pc_factor_mat_solver_package",&option[0],100,&flag);
+   #endif
 
-	#if PETSC_VERSION_GE(3,7,0)
+	#if PETSC_VERSION_LT(3,7,0)
+	PetscOptionsGetString(PETSC_NULL,"-issm_option_solver",&option[0],100,&flag);
+   #elif PETSC_VERSION_LT(3,19,0)
 	PetscOptionsGetString(NULL,PETSC_NULL,"-issm_option_solver",&option[0],100,&flag);
-	#else
-	PetscOptionsGetString(PETSC_NULL,"-issm_option_solver",&option[0],100,&flag);
-	#endif
-	if(strcmp(option,"FS")==0 || strcmp(option,"stokes")==0){
-		solver_type=FSSolverEnum;
-	}
+   #else
+	PetscOptionsGetString(NULL,PETSC_NULLPTR,"-issm_option_solver",&option[0],100,&flag);
+   #endif
+
+	if(strcmp(option,"mumps")==0) solver_type=MUMPSPACKAGE_LU;
+	if(strcmp(option,"FS")==0 || strcmp(option,"stokes")==0) solver_type=FSSolverEnum;
 
 	*psolver_type=solver_type;
Index: /issm/trunk/src/m/Makefile.am
===================================================================
--- /issm/trunk/src/m/Makefile.am	(revision 28012)
+++ /issm/trunk/src/m/Makefile.am	(revision 28013)
@@ -37,4 +37,5 @@
 	${ISSM_DIR}/src/m/miscellaneous/*.m \
 	${ISSM_DIR}/src/m/modules/*.m \
+	${ISSM_DIR}/src/m/modeldata/*.m \
 	${ISSM_DIR}/src/m/os/*.m \
 	${ISSM_DIR}/src/m/parameterization/*.m \
Index: /issm/trunk/src/m/archive/arch.py
===================================================================
--- /issm/trunk/src/m/archive/arch.py	(revision 28012)
+++ /issm/trunk/src/m/archive/arch.py	(revision 28013)
@@ -12,6 +12,5 @@
     """
 
-    nargs = len(args)
-    if nargs % 2 != 0:
+    if len(args) % 2 != 0:
         raise ValueError('Incorrect number of arguments.')
     # open file
Index: /issm/trunk/src/m/boundaryconditions/getlovenumbers.py
===================================================================
--- /issm/trunk/src/m/boundaryconditions/getlovenumbers.py	(revision 28012)
+++ /issm/trunk/src/m/boundaryconditions/getlovenumbers.py	(revision 28013)
@@ -4,5 +4,5 @@
 
 
-def getlovenumbers(*args): #{{{
+def getlovenumbers(*args):  # {{{
     """GETLOVENUMBERS - provide love numbers retrieved from: 
     http://www.srosat.com/iag-jsg/loveNb.php in a chosen reference frame
@@ -10085,3 +10085,3 @@
 
     return series
-#}}}
+# }}}
Index: /issm/trunk/src/m/classes/SMBarma.m
===================================================================
--- /issm/trunk/src/m/classes/SMBarma.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBarma.m	(revision 28013)
@@ -10,16 +10,16 @@
 		num_params        = 0;
 		arma_timestep     = 0;
-		ar_order          = 0;
+		ar_order			 = 0;
 		arlag_coefs       = NaN;
-		ma_order          = 0;
+		ma_order			 = 0;
 		malag_coefs       = NaN;
 		polynomialparams  = NaN;
 		datebreaks        = NaN;
-		basin_id          = NaN;
+		basin_id			 = NaN;
 		lapserates        = NaN;
 		elevationbins     = NaN;
 		refelevation      = NaN;
 		steps_per_step    = 1;
-		averaging         = 0;
+		averaging			= 0;
 		requested_outputs = {};
 	end
@@ -37,11 +37,7 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
-			if isnan(self.trend)
-				self.trend = zeros(1,self.num_basins); %no trend in SMB
-				disp('      smb.trend (trend) not specified: value set to 0');
-			end
 			if (self.ar_order==0)
 				self.ar_order = 1; %dummy 1 value for autoregression
@@ -51,5 +47,5 @@
 			if (self.ma_order==0)
 				self.ma_order = 1; %dummy 1 value for moving-average
-				self.arlag_coefs      = zeros(self.num_basins,self.ma_order); %moving-average coefficients all set to 0 
+				self.malag_coefs      = zeros(self.num_basins,self.ma_order); %moving-average coefficients all set to 0 
 				disp('      smb.ma_order (order of moving-average model) not specified: order of moving-average model set to 0');
 			end
@@ -80,5 +76,5 @@
 				md = checkfield(md,'fieldname','smb.num_params','numel',1,'NaN',1,'Inf',1,'>',0);
 				md = checkfield(md,'fieldname','smb.num_breaks','numel',1,'NaN',1,'Inf',1,'>=',0);
-				md = checkfield(md,'fieldname','smb.basin_id','Inf',1,'>=',0,'<=',md.smb.num_basins,'size',[md.mesh.numberofelements,1]);
+				md = checkfield(md,'fieldname','smb.basin_id','Inf',1,'>=',0,'<=',nbas,'size',[md.mesh.numberofelements,1]);
 				if(nbas>1 && nbrk>=1 && nprm>1)
 					md = checkfield(md,'fieldname','smb.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1,nprm],'numel',nbas*(nbrk+1)*nprm); 
@@ -93,6 +89,6 @@
 				md = checkfield(md,'fieldname','smb.ma_order','numel',1,'NaN',1,'Inf',1,'>=',0);
 				md = checkfield(md,'fieldname','smb.arma_timestep','numel',1,'NaN',1,'Inf',1,'>=',md.timestepping.time_step); %arma time step cannot be finer than ISSM timestep
-				md = checkfield(md,'fieldname','smb.arlag_coefs','NaN',1,'Inf',1,'size',[md.smb.num_basins,md.smb.ar_order]);
-				md = checkfield(md,'fieldname','smb.malag_coefs','NaN',1,'Inf',1,'size',[md.smb.num_basins,md.smb.ma_order]);
+				md = checkfield(md,'fieldname','smb.arlag_coefs','NaN',1,'Inf',1,'size',[nbas,md.smb.ar_order]);
+				md = checkfield(md,'fieldname','smb.malag_coefs','NaN',1,'Inf',1,'size',[nbas,md.smb.ma_order]);
 				
 				if(nbrk>0)
@@ -104,19 +100,26 @@
 				end
 				if (any(isnan(md.smb.refelevation)==0) || numel(md.smb.refelevation)>1)
-               md = checkfield(md,'fieldname','smb.refelevation','NaN',1,'Inf',1,'>=',0,'size',[1,md.smb.num_basins],'numel',md.smb.num_basins);
-            end
-				[nbas,nbins] = size(md.smb.lapserates);
-				if (any(isnan(reshape(md.smb.lapserates,[1,nbas*nbins]))==0) || numel(md.smb.lapserates)>1)
-					md = checkfield(md,'fieldname','smb.lapserates','NaN',1,'Inf',1,'size',[md.smb.num_basins,nbins],'numel',md.smb.num_basins*nbins);
-					md = checkfield(md,'fieldname','smb.elevationbins','NaN',1,'Inf',1,'size',[md.smb.num_basins,nbins-1],'numel',md.smb.num_basins*(nbins-1));
+					md = checkfield(md,'fieldname','smb.refelevation','NaN',1,'Inf',1,'>=',0,'size',[1,nbas],'numel',nbas);
+				end
+				nbas     = size(md.smb.lapserates,1);
+				nbins    = size(md.smb.lapserates,2);
+				ntmlapse = size(md.smb.lapserates,3);
+				if(ntmlapse>1 && ntmlapse~=12)
+					error('3rd dimension of md.smb.lapserates must be of size 1 or 12 (for monthly lapse rates)');
+				end
+				if (any(isnan(reshape(md.smb.lapserates,[1,nbas*nbins*ntmlapse]))==0) || numel(md.smb.lapserates)>1)
+					md = checkfield(md,'fieldname','smb.lapserates','NaN',1,'Inf',1,'size',[nbas,nbins,ntmlapse],'numel',nbas*nbins*ntmlapse);
+					md = checkfield(md,'fieldname','smb.elevationbins','NaN',1,'Inf',1,'size',[nbas,max(1,nbins-1),ntmlapse],'numel',nbas*max(1,nbins-1)*ntmlapse);
 					if(issorted(md.smb.elevationbins,2)==0)
 						error('md.smb.elevationbins should have rows in order of increasing elevation');
 					end
-				elseif (isnan(md.smb.elevationbins(1,1))==0 || numel(md.smb.elevationbins)>1)
+				elseif (isnan(md.smb.elevationbins(1,1,1))==0 || numel(md.smb.elevationbins)>1)
 					%elevationbins specified but not lapserates: this will inevitably lead to inconsistencies
-					[nbas,nbins] = size(md.smb.elevationbins);
-					nbins        = nbins+1;
-					md = checkfield(md,'fieldname','smb.lapserates','NaN',1,'Inf',1,'size',[md.smb.num_basins,nbins],'numel',md.smb.num_basins*nbins);
-					md = checkfield(md,'fieldname','smb.elevationbins','NaN',1,'Inf',1,'size',[md.smb.num_basins,nbins-1],'numel',md.smb.num_basins*(nbins-1));
+					nbas     = size(md.smb.elevationbins,1);
+					nbins    = size(md.smb.elevationbins,2);
+					nbins    = nbins+1;
+					ntmlapse = size(md.smb.elevationbins,3);
+					md = checkfield(md,'fieldname','smb.lapserates','NaN',1,'Inf',1,'size',[nbas,max(1,nbins-1),ntmlapse],'numel',nbas*nbins*ntmlapse);
+					md = checkfield(md,'fieldname','smb.elevationbins','NaN',1,'Inf',1,'size',[nbas,max(1,nbins-1),ntmlapse],'numel',nbas*max(1,nbins-1)*ntmlapse);
 				end
 			end
@@ -130,7 +133,7 @@
 			fielddisplay(self,'basin_id','basin number assigned to each element [unitless]');
 			fielddisplay(self,'num_breaks','number of different breakpoints in the piecewise-polynomial (separating num_breaks+1 periods)');
-         fielddisplay(self,'num_params','number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)');
-         fielddisplay(self,'polynomialparams','coefficients for the polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders');
-         disp(sprintf('%51s  ex: polyparams=cat(3,intercepts,trendlinearcoefs,trendquadraticcoefs)',' '));
+			fielddisplay(self,'num_params','number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)');
+			fielddisplay(self,'polynomialparams','coefficients for the polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders');
+			disp(sprintf('%51s  ex: polyparams=cat(3,intercepts,trendlinearcoefs,trendquadraticcoefs)',' '));
 			fielddisplay(self,'datebreaks','dates at which the breakpoints in the piecewise polynomial occur (1 row per basin) [yr]');
 			fielddisplay(self,'ar_order','order of the autoregressive model [unitless]');
@@ -139,6 +142,6 @@
 			fielddisplay(self,'arlag_coefs','basin-specific vectors of AR lag coefficients [unitless]');
 			fielddisplay(self,'malag_coefs','basin-specific vectors of MA lag coefficients [unitless]');
-			fielddisplay(self,'lapserates','basin-specific SMB lapse rates applied in each elevation bin, 1 row per basin, 1 column per bin [m ice eq yr^-1 m^-1] (default: no lapse rate)');
-			fielddisplay(self,'elevationbins','basin-specific separations between elevation bins, 1 row per basin, 1 column per limit between bins [m] (default: no basin separation)');
+			fielddisplay(self,'lapserates','basin-specific SMB lapse rates applied in each elevation bin, 1 row per basin, 1 column per bin, dimension 3 can be of size 12 to prescribe monthly varying values [m ice eq yr^-1 m^-1] (default: no lapse rate)');
+			fielddisplay(self,'elevationbins','basin-specific separations between elevation bins, 1 row per basin, 1 column per limit between bins, dimension 3 can be of size 12 to prescribe monthly varying values [m] (default: no basin separation)');
 			fielddisplay(self,'refelevation','basin-specific reference elevations at which SMB is calculated, and from which SMB is downscaled using lapserates (default: basin mean elevation) [m]');
 			fielddisplay(self, 'steps_per_step', 'number of smb steps per time step');
@@ -154,16 +157,24 @@
 			yts=md.constants.yts;
 			nbas = md.smb.num_basins;
-         nprm = md.smb.num_params;
-         nper = md.smb.num_breaks+1;
+			nprm = md.smb.num_params;
+			nper = md.smb.num_breaks+1;
 
 			templapserates    = md.smb.lapserates;
 			tempelevationbins = md.smb.elevationbins;
 			temprefelevation  = md.smb.refelevation;
-			[nbas,nbins]      = size(md.smb.lapserates);
-			if(any(isnan(reshape(md.smb.lapserates,[1,nbas*nbins]))))
-				templapserates = zeros(md.smb.num_basins,2);
+			nbas     = size(md.smb.lapserates,1);
+			nbins    = size(md.smb.lapserates,2);
+			ntmlapse = size(md.smb.lapserates,3);
+			if(any(isnan(reshape(md.smb.lapserates,[1,nbas*nbins*ntmlapse]))))
+				templapserates = zeros(md.smb.num_basins,2,12);
 				disp('      smb.lapserates not specified: set to 0');
-			   tempelevationbins = zeros(md.smb.num_basins,1); %dummy elevation bins
-			end
+			   tempelevationbins = zeros(md.smb.num_basins,1,12); %dummy elevation bins
+			elseif(ntmlapse==1)
+				templapserates    = repmat(templapserates,1,1,12); %same values each month
+				tempelevationbins = repmat(tempelevationbins,1,1,12); %same values each month
+			end
+			nbas     = size(templapserates,1);
+			nbins    = size(templapserates,2);
+			ntmlapse = size(templapserates,3);
 			if(any(isnan(md.smb.refelevation)))
 				temprefelevation = zeros(1,md.smb.num_basins);
@@ -177,52 +188,59 @@
 					temprefelevation(ii) = sum(areas(indices).*elemsh)/sum(areas(indices));
 				end
-				if(any(reshape(md.smb.lapserates,[1,nbas*nbins])~=0))
+				if(any(reshape(templapserates,[1,nbas*nbins*12])~=0))
 					disp('      smb.refelevation not specified: Reference elevations set to mean surface elevation of basins');
 				end
 			end
-			[nbas,nbins] = size(templapserates);
+			temp2dlapserates    = zeros(nbas,nbins*12);
+			temp2delevationbins = zeros(nbas,max(1,nbins-1)*12);
+			for(ii=[1:12])
+				jj = 1+(ii-1)*nbins;
+				temp2dlapserates(:,jj:jj+nbins-1)    = templapserates(:,:,ii);
+				kk = 1+(ii-1)*(nbins-1);
+				temp2delevationbins(:,kk:kk+nbins-2) = tempelevationbins(:,:,ii);
+			end
 
 			% Scale the parameters %
-         polyparamsScaled   = md.smb.polynomialparams;
-         polyparams2dScaled = zeros(nbas,nper*nprm);
+			polyparamsScaled   = md.smb.polynomialparams;
+			polyparams2dScaled = zeros(nbas,nper*nprm);
 			if(nprm>1)
-            % Case 3D %
-            if(nbas>1 && nper>1)
-               for(ii=[1:nprm])
-                  polyparamsScaled(:,:,ii) = polyparamsScaled(:,:,ii)*((1/yts)^(ii));
-               end
-               % Fit in 2D array %
-               for(ii=[1:nprm])
-                  jj = 1+(ii-1)*nper;
-                  polyparams2dScaled(:,jj:jj+nper-1) = polyparamsScaled(:,:,ii);
-               end
-            % Case 2D and higher-order params at increasing row index %
-            elseif(nbas==1)
-               for(ii=[1:nprm])
-                  polyparamsScaled(ii,:) = polyparamsScaled(ii,:)*((1/yts)^(ii));
-               end
-               % Fit in row array %
-               for(ii=[1:nprm])
-                  jj = 1+(ii-1)*nper;
-                  polyparams2dScaled(1,jj:jj+nper-1) = polyparamsScaled(ii,:);
-               end
-            % Case 2D and higher-order params at incrasing column index %
-            elseif(nper==1)
-               for(ii=[1:nprm])
-                  polyparamsScaled(:,ii) = polyparamsScaled(:,ii)*((1/yts)^(ii));
-               end
-               % 2D array is already in correct format %
-               polyparams2dScaled = polyparamsScaled;
-            end
-         else
+				% Case 3D %
+				if(nbas>1 && nper>1)
+					for(ii=[1:nprm])
+						polyparamsScaled(:,:,ii) = polyparamsScaled(:,:,ii)*((1/yts)^(ii));
+					end
+					% Fit in 2D array %
+					for(ii=[1:nprm])
+						jj = 1+(ii-1)*nper;
+						polyparams2dScaled(:,jj:jj+nper-1) = polyparamsScaled(:,:,ii);
+					end
+				% Case 2D and higher-order params at increasing row index %
+				elseif(nbas==1)
+					for(ii=[1:nprm])
+						polyparamsScaled(ii,:) = polyparamsScaled(ii,:)*((1/yts)^(ii));
+					end
+					% Fit in row array %
+					for(ii=[1:nprm])
+						jj = 1+(ii-1)*nper;
+						polyparams2dScaled(1,jj:jj+nper-1) = polyparamsScaled(ii,:);
+					end
+				% Case 2D and higher-order params at incrasing column index %
+				elseif(nper==1)
+					for(ii=[1:nprm])
+						polyparamsScaled(:,ii) = polyparamsScaled(:,ii)*((1/yts)^(ii));
+					end
+					% 2D array is already in correct format %
+					polyparams2dScaled = polyparamsScaled;
+				end
+			else
 				polyparamsScaled   = polyparamsScaled*(1/yts);
-            % 2D array is already in correct format %
-            polyparams2dScaled = polyparamsScaled;
-         end
+				% 2D array is already in correct format %
+				polyparams2dScaled = polyparamsScaled;
+			end
 			if(nper==1) %a single period (no break date)
-            dbreaks = zeros(nbas,1); %dummy
-         else
-            dbreaks = md.smb.datebreaks;
-         end
+				dbreaks = zeros(nbas,1); %dummy
+			else
+				dbreaks = md.smb.datebreaks;
+			end
 
 			WriteData(fid,prefix,'name','md.smb.model','data',13,'format','Integer');
@@ -238,6 +256,6 @@
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','malag_coefs','format','DoubleMat','name','md.smb.malag_coefs','yts',yts);
 			WriteData(fid,prefix,'data',dbreaks,'name','md.smb.datebreaks','format','DoubleMat','scale',yts);
-			WriteData(fid,prefix,'data',templapserates,'format','DoubleMat','name','md.smb.lapserates','scale',1./yts,'yts',yts);
-			WriteData(fid,prefix,'data',tempelevationbins,'format','DoubleMat','name','md.smb.elevationbins');
+			WriteData(fid,prefix,'data',temp2dlapserates,'format','DoubleMat','name','md.smb.lapserates','scale',1./yts,'yts',yts);
+			WriteData(fid,prefix,'data',temp2delevationbins,'format','DoubleMat','name','md.smb.elevationbins');
 			WriteData(fid,prefix,'data',temprefelevation,'format','DoubleMat','name','md.smb.refelevation');
 			WriteData(fid,prefix,'data',nbins,'format','Integer','name','md.smb.num_bins');
@@ -249,6 +267,6 @@
 			pos  = find(ismember(outputs,'default'));
 			if ~isempty(pos),
-				outputs(pos) = [];                         %remove 'default' from outputs
-				outputs      = [outputs defaultoutputs(self,md)]; %add defaults
+				outputs(pos) = [];											%remove 'default' from outputs
+				outputs      = [outputs defaultoutputs(self,md)];	%add defaults
 			end
 			WriteData(fid,prefix,'data',outputs,'name','md.smb.requested_outputs','format','StringArray');
Index: /issm/trunk/src/m/classes/SMBarma.py
===================================================================
--- /issm/trunk/src/m/classes/SMBarma.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBarma.py	(revision 28013)
@@ -3,10 +3,10 @@
 from checkfield import *
 from fielddisplay import fielddisplay
+from GetAreas import *
 from project3d import *
 from WriteData import *
-from GetAreas import *
 
 class SMBarma(object):
-    """SMBARMA class definition
+    """SMBarma class definition
 
     Usage:
@@ -20,16 +20,20 @@
         self.polynomialparams = np.nan
         self.ar_order = 0
+        self.ma_order = 0
         self.arlag_coefs = np.nan
+        self.ma_order = 0
         self.malag_coefs = np.nan
+        self.polynomialparams = np.nan
+        self.datebreaks = np.nan
         self.basin_id = np.nan
         self.lapserates = np.nan
         self.elevationbins = np.nan
         self.refelevation = np.nan
+        self.datebreaks = np.nan
         self.steps_per_step = 1
         self.averaging = 0
         self.requested_outputs = []
 
-        nargs = len(args)
-        if nargs == 0:
+        if len(args) == 0:
             self.setdefaultparameters()
         else:
@@ -50,6 +54,6 @@
         s += '{}\n'.format(fielddisplay(self, 'arlag_coefs', 'basin-specific vectors of AR lag coefficients [unitless]'))
         s += '{}\n'.format(fielddisplay(self, 'malag_coefs', 'basin-specific vectors of MA lag coefficients [unitless]'))
-        s += '{}\n'.format(fielddisplay(self, 'lapserates', 'basin-specific SMB lapse rates applied in each elevation bin, 1 row per basin, 1 column per bin [m ice eq yr^-1 m^-1] (default: no lapse rate)'))
-        s += '{}\n'.format(fielddisplay(self, 'elevationbins', 'basin-specific SMB lapse rates applied in range of SMB<0 [m ice eq yr^-1 m^-1] (default: no lapse rate)'))
+        s += '{}\n'.format(fielddisplay(self, 'lapserates', 'basin-specific SMB lapse rates applied in each elevation bin, 1 row per basin, 1 column per bin, dimension 3 can be of size 12 to prescribe monthly varying values [m ice eq yr^-1 m^-1] (default: no lapse rate)'))
+        s += '{}\n'.format(fielddisplay(self, 'elevationbins', 'basin-specific separations between elevation bins, 1 row per basin, 1 column per limit between bins, dimension 3 can be of size 12 to prescribe monthly varying values [m] (default: no basin separation)'))
         s += '{}\n'.format(fielddisplay(self, 'refelevation', 'basin-specific reference elevations at which SMB is calculated, and from which SMB is downscaled using lapserates (default: basin mean elevation) [m]'))
         s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
@@ -62,5 +66,5 @@
     # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         self.ar_order = 0.0 # Autoregression model of order 0
         self.ma_order = 0.0 # Moving-average model of order 0
@@ -72,15 +76,16 @@
 
     def defaultoutputs(self, md):  # {{{
-        return []
+        return ['SmbMassBalance']
     # }}}
 
     def initialize(self, md):  # {{{
-        if np.all(np.isnan(self.trend)):
-            self.trend = np.zeros((1, self.num_basins)) # No trend in SMB
-            print('      smb.trend (trend) not specified: value set to 0')
         if self.ar_order == 0:
             self.ar_order = 1 # Dummy 1 value for autoregression
             self.arlag_coefs = np.zeros((self.num_basins, self.ar_order)) # Autoregression coefficients all set to 0
             print('      smb.ar_order (order of autoregressive model) not specified: order of autoregressive model set to 0')
+        if self.ma_order == 0:
+            self.ma_order = 1 # Dummy 1 value for moving-average
+            self.malag_coefs = np.zeros((self.num_basins, self.ma_order)) # Moving-average coefficients all set to 0
+            print('      smb.ma_order (order of moving-average model) not specified: order of moving-average model set to 0')
         if self.arma_timestep == 0:
             self.arma_timestep = md.timestepping.time_step # ARMA model has no prescribed time step
@@ -96,60 +101,76 @@
 
     def checkconsistency(self, md, solution, analyses):  # {{{
+        """
+        TODO:
+        - Ensure that checks on shape of self.lapserates are same as those under MATLAB as matrix addressing is quite different here
+        """
         if 'MasstransportAnalysis' in analyses:
-            nbas = md.smb.num_basins;
-            nprm = md.smb.num_params;
-            nbrk = md.smb.num_breaks;
+            nbas = md.smb.num_basins
+            nprm = md.smb.num_params
+            nbrk = md.smb.num_breaks
             md = checkfield(md, 'fieldname', 'smb.num_basins', 'numel', 1, 'NaN', 1, 'Inf', 1, '>', 0)
             md = checkfield(md, 'fieldname', 'smb.num_params', 'numel', 1, 'NaN', 1, 'Inf', 1, '>', 0)
             md = checkfield(md, 'fieldname', 'smb.num_breaks', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
             md = checkfield(md, 'fieldname', 'smb.basin_id', 'Inf', 1, '>=', 0, '<=', md.smb.num_basins, 'size', [md.mesh.numberofelements])
-            if len(np.shape(self.polynomialparams)) == 1:
-                self.polynomialparams = np.array([[self.polynomialparams]])
-            if(nbas>1 and nbrk>=1 and nprm>1):
-                md = checkfield(md,'fieldname','smb.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1,nprm],'numel',nbas*(nbrk+1)*nprm)
-            elif(nbas==1):
-                md = checkfield(md,'fieldname','smb.polynomialparams','NaN',1,'Inf',1,'size',[nprm,nbrk+1],'numel',nbas*(nbrk+1)*nprm)
-            elif(nbrk==0):
-                md = checkfield(md,'fieldname','smb.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nprm],'numel',nbas*(nbrk+1)*nprm)
-            elif(nprm==1):
-                md = checkfield(md,'fieldname','smb.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk],'numel',nbas*(nbrk+1)*nprm)
+            # if len(np.shape(self.polynomialparams)) == 1:
+            #     self.polynomialparams = np.array([[self.polynomialparams]])
+            if nbas > 1 and nbrk >= 1 and nprm > 1:
+                md = checkfield(md, 'fieldname', 'smb.polynomialparams', 'NaN', 1, 'Inf', 1, 'size', [nbas, nbrk + 1, nprm], 'numel', nbas * (nbrk + 1) * nprm)
+            elif nbas == 1:
+                md = checkfield(md, 'fieldname', 'smb.polynomialparams', 'NaN', 1, 'Inf', 1, 'size', [nprm, nbrk + 1], 'numel', nbas * (nbrk + 1) * nprm)
+            elif nbrk == 0:
+                md = checkfield(md, 'fieldname', 'smb.polynomialparams', 'NaN', 1, 'Inf', 1, 'size', [nbas, nprm], 'numel', nbas * (nbrk + 1) * nprm)
+            elif nprm == 1:
+                md = checkfield(md, 'fieldname', 'smb.polynomialparams', 'NaN', 1, 'Inf', 1, 'size', [nbas, nbrk], 'numel', nbas * (nbrk + 1) * nprm)
             md = checkfield(md, 'fieldname', 'smb.ar_order', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
+            md = checkfield(md, 'fieldname', 'smb.ma_order', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
             md = checkfield(md, 'fieldname', 'smb.arma_timestep', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', md.timestepping.time_step) # Autoregression time step cannot be finer than ISSM timestep
-            md = checkfield(md, 'fieldname', 'smb.arlag_coefs', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, md.smb.ar_order])
-            md = checkfield(md, 'fieldname', 'smb.malag_coefs', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, md.smb.ma_order])
-            if(nbrk>0):
+            md = checkfield(md, 'fieldname', 'smb.arlag_coefs', 'NaN', 1, 'Inf', 1, 'size', [nbas, md.smb.ar_order])
+            md = checkfield(md, 'fieldname', 'smb.malag_coefs', 'NaN', 1, 'Inf', 1, 'size', [nbas, md.smb.ma_order])
+            if nbrk > 0:
                 md = checkfield(md, 'fieldname', 'smb.datebreaks', 'NaN', 1, 'Inf', 1, 'size', [nbas,nbrk])
-            elif(np.size(md.smb.datebreaks)==0 or np.all(np.isnan(md.smb.datebreaks))):
+            elif np.size(md.smb.datebreaks) == 0 or np.all(np.isnan(md.smb.datebreaks)):
                 pass
             else:
                 raise RuntimeError('md.smb.num_breaks is 0 but md.smb.datebreaks is not empty')
 
-            if(np.any(np.isnan(self.refelevation) is False) or np.size(self.refelevation) > 1):
+            if np.any(np.isnan(self.refelevation) is False) or np.size(self.refelevation) > 1:
                 if len(np.shape(self.refelevation)) == 1:
                     self.refelevation = np.array([self.refelevation])
-                md = checkfield(md, 'fieldname', 'smb.refelevation', 'NaN', 1, 'Inf', 1, '>=', 0, 'size', [1, md.smb.num_basins], 'numel', md.smb.num_basins)
-
-            if(np.any(np.isnan(self.lapserates) is False) or np.size(self.lapserates) > 1):
+                md = checkfield(md, 'fieldname', 'smb.refelevation', 'NaN', 1, 'Inf', 1, '>=', 0, 'size', [1, nbas], 'numel', nbas)
+
+            if (np.any(np.isnan(self.lapserates) is False) or np.size(self.lapserates) > 1):
+                nbas = md.smb.num_basins
                 if len(np.shape(self.lapserates)) == 1:
-                    self.lapserates = np.array([self.lapserates])
                     nbins = 1
-                else:
+                    self.lapserates = np.reshape(self.lapserates,[nbas,nbins,1])
+                elif(len(np.shape(self.lapserates)) == 2):
                     nbins = np.shape(self.lapserates)[1]
-                if len(np.shape(self.elevationbins)) == 1:
-                    self.elevationbins = np.array([self.elevationbins])
-                md = checkfield(md, 'fieldname', 'smb.lapserates', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, nbins], 'numel', md.smb.num_basins*nbins)
-                md = checkfield(md, 'fieldname', 'smb.elevationbins', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, nbins-1], 'numel', md.smb.num_basins*(nbins-1))
-                for rr in range(md.smb.num_basins):
+                    self.lapserates = np.reshape(self.lapserates,[nbas,nbins,1])
+                elif(len(np.shape(self.lapserates)) == 3):
+                    nbins = np.shape(self.lapserates)[1]
+                ntmlapse = np.shape(self.lapserates)[2]
+                if len(np.shape(self.elevationbins)) < 3:
+                    self.elevationbins = np.reshape(self.elevationbins,[nbas,max(1,nbins-1),ntmlapse])
+                md = checkfield(md, 'fieldname', 'smb.lapserates', 'NaN', 1, 'Inf', 1, 'size', [nbas,nbins,ntmlapse], 'numel', md.smb.num_basins*nbins*ntmlapse)
+                md = checkfield(md, 'fieldname', 'smb.elevationbins', 'NaN', 1, 'Inf', 1, 'size', [nbas,max(1,nbins-1),ntmlapse], 'numel', nbas*max(1,nbins-1)*ntmlapse)
+                for rr in range(nbas):
                     if(np.all(self.elevationbins[rr,0:-1]<=self.elevationbins[rr,1:])==False):
                         raise TypeError('md.smb.elevationbins should have rows in order of increasing elevation')
-            elif(np.any(np.isnan(self.elevationbins) is False) or np.size(self.elevationbins) > 1):
-                #elevationbins specified but not lapserates: this will inevitably lead to inconsistencies
+            elif (np.any(np.isnan(self.elevationbins) is False) or np.size(self.elevationbins) > 1):
+                # Elevationbins specified but not lapserates: this will inevitably lead to inconsistencies
+                nbas = md.smb.num_basins
                 if len(np.shape(self.elevationbins)) == 1:
-                    self.elevationbins = np.array([self.elevationbins])
                     nbins = 1
-                else:
-                    nbins = np.shape(self.elevationbins)[1]+1
-                md = checkfield(md, 'fieldname', 'smb.lapserates', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, nbins], 'numel', md.smb.num_basins*nbins)
-                md = checkfield(md, 'fieldname', 'smb.elevationbins', 'NaN', 1, 'Inf', 1, 'size', [md.smb.num_basins, nbins-1], 'numel', md.smb.num_basins*(nbins-1))
+                    self.elevationbins = np.reshape(self.elevationbins,[nbas,nbins,1])
+                elif(len(np.shape(self.lapserates)) == 2):
+                    nbins = np.shape(self.elevationbins)[1]
+                    self.elevationbins = np.reshape(self.elevationbins,[nbas,nbins,1])
+                elif(len(np.shape(self.lapserates)) == 3):
+                    nbins = np.shape(self.lapserates)[1]
+                nbins = nbins - 1
+                ntmlapse = np.shape(self.lapserates)[2]
+                md = checkfield(md, 'fieldname', 'smb.lapserates', 'NaN', 1, 'Inf', 1, 'size', [nbas, nbins * ntmlapse], 'numel', nbas * nbins * ntmlapse)
+                md = checkfield(md, 'fieldname', 'smb.elevationbins', 'NaN', 1, 'Inf', 1, 'size', [nbas, max(1, nbins - 1) * ntmlapse], 'numel', nbas * max(1, nbins - 1) * ntmlapse)
 
         md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1])
@@ -161,50 +182,65 @@
     def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
-        nbas = md.smb.num_basins;
-        nprm = md.smb.num_params;
-        nper = md.smb.num_breaks+1;
-        templapserates    = np.copy(md.smb.lapserates)
-        tempelevationbins = np.copy(md.smb.elevationbins)
+        nbas = md.smb.num_basins
+        nprm = md.smb.num_params
+        nper = md.smb.num_breaks + 1
+        if(np.any(np.isnan(md.smb.lapserates))):
+            templapserates = np.zeros((nbas, 2, 12))
+            print('      smb.lapserates not specified: set to 0')
+            tempelevationbins = np.zeros((nbas, 1, 12)) # Dummy elevation bins
+            nbins    = 2
+            ntmlapse = 12
+        else:
+            if len(np.shape(md.smb.lapserates)) == 1:
+                nbins    = 1
+                ntmlapse = 1
+            elif len(np.shape(md.smb.lapserates)) == 2:
+                nbins    = np.shape(md.smb.lapserates)[1]
+                ntmlapse = 1
+            elif len(np.shape(md.smb.lapserates)) == 3:
+                nbins    = np.shape(md.smb.lapserates)[1]
+                ntmlapse = np.shape(md.smb.lapserates)[2]
+            templapserates    = np.reshape(md.smb.lapserates,[nbas, nbins, ntmlapse])
+            tempelevationbins = np.reshape(md.smb.elevationbins, [nbas, max(1, nbins - 1), ntmlapse])
         temprefelevation  = np.copy(md.smb.refelevation)
-        # Scale the parameters #
+        # Scale the parameters
         polyparamsScaled   = np.copy(md.smb.polynomialparams)
-        polyparams2dScaled = np.zeros((nbas,nper*nprm))
-        if(nprm>1):
-            # Case 3D #
-            if(nbas>1 and nper>1):
-                for ii in range(nprm):
-                    polyparamsScaled[:,:,ii] = polyparamsScaled[:,:,ii]*(1/yts)**(ii+1)
-                # Fit in 2D array #
-                for ii in range(nprm):
-                    polyparams2dScaled[:,ii*nper:(ii+1)*nper] = 1*polyparamsScaled[:,:,ii]
-            # Case 2D and higher-order params at increasing row index #
-            elif(nbas==1):
-                for ii in range(nprm):
-                    polyparamsScaled[ii,:] = polyparamsScaled[ii,:]*(1/yts)**(ii+1)
-                # Fit in row array #
-                for ii in range(nprm):
-                    polyparams2dScaled[0,ii*nper:(ii+1)*nper] = 1*polyparamsScaled[ii,:]
-            # Case 2D and higher-order params at incrasing column index #
-            elif(nper==1):
-                for ii in range(nprm):
-                    polyparamsScaled[:,ii] = polyparamsScaled[:,ii]*(1/yts)**(ii+1)
-                # 2D array is already in correct format #
+        polyparams2dScaled = np.zeros((nbas, nper * nprm))
+        if nprm > 1:
+            # Case 3D
+            if nbas > 1 and nper > 1:
+                for ii in range(nprm):
+                    polyparamsScaled[:, :, ii] = polyparamsScaled[:, :, ii] * (1 / yts) ** (ii + 1)
+                # Fit in 2D array
+                for ii in range(nprm):
+                    polyparams2dScaled[:, ii * nper:(ii + 1) * nper] = 1 * polyparamsScaled[:, :, ii]
+            # Case 2D and higher-order params at increasing row index
+            elif nbas == 1:
+                for ii in range(nprm):
+                    polyparamsScaled[ii, :] = polyparamsScaled[ii, :] * (1 / yts) ** (ii + 1)
+                # Fit in row array
+                for ii in range(nprm):
+                    polyparams2dScaled[0, ii * nper:(ii + 1) * nper] = 1 * polyparamsScaled[ii, :]
+            # Case 2D and higher-order params at increasing column index
+            elif nper == 1:
+                for ii in range(nprm):
+                    polyparamsScaled[:, ii] = polyparamsScaled[:, ii] * (1 / yts) ** (ii + 1)
+                # 2D array is already in correct format
                 polyparams2dScaled = np.copy(polyparamsScaled)
         else:
-            polyparamsScaled   = polyparamsScaled*(1/yts)
-            # 2D array is already in correct format #
+            polyparamsScaled   = polyparamsScaled * (1 / yts)
+            # 2D array is already in correct format
             polyparams2dScaled = np.copy(polyparamsScaled)
-        
-        if(nper==1):
-            dbreaks = np.zeros((nbas,1))
+
+        if nper == 1:
+            dbreaks = np.zeros((nbas, 1))
         else:
             dbreaks = np.copy(md.smb.datebreaks)
 
-        if(np.any(np.isnan(md.smb.lapserates))):
-            templapserates = np.zeros((md.smb.num_basins,2))
-            print('      smb.lapserates not specified: set to 0')
-            tempelevationbins = np.zeros((md.smb.num_basins,1)) #dummy elevation bins
-        if(np.any(np.isnan(md.smb.refelevation))):
-            temprefelevation = np.zeros((md.smb.num_basins)).reshape(1,md.smb.num_basins)
+        if ntmlapse == 1:
+            templapserates    = np.repeat(templapserates, 12, axis = 2)
+            tempelevationbins = np.repeat(tempelevationbins, 12, axis = 2)
+        if np.any(np.isnan(md.smb.refelevation)):
+            temprefelevation = np.zeros((nbas)).reshape(1, nbas)
             areas = GetAreas(md.mesh.elements, md.mesh.x, md.mesh.y)
             for ii, bid in enumerate(np.unique(md.smb.basin_id)):
@@ -217,4 +253,9 @@
                 print('      smb.refelevation not specified: Reference elevations set to mean surface elevation of basins')
         nbins = np.shape(templapserates)[1]
+        temp2dlapserates    = np.zeros((nbas, nbins * 12))
+        temp2delevationbins = np.zeros((nbas, max(12, (nbins - 1) * 12)))
+        for ii in range(12):
+            temp2dlapserates[:, ii * nbins:(ii + 1) * nbins] = templapserates[:, :, ii]
+            temp2delevationbins[:, ii * (nbins - 1):(ii + 1) * (nbins - 1)] = tempelevationbins[:, :, ii]
 
         WriteData(fid, prefix, 'name', 'md.smb.model', 'data', 13, 'format', 'Integer')
@@ -230,6 +271,6 @@
         WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'malag_coefs', 'format', 'DoubleMat', 'name', 'md.smb.malag_coefs', 'yts', yts)
         WriteData(fid, prefix, 'data', dbreaks, 'name', 'md.smb.datebreaks', 'format', 'DoubleMat','scale',yts)
-        WriteData(fid, prefix, 'data', templapserates, 'name', 'md.smb.lapserates', 'format', 'DoubleMat', 'scale', 1 / yts, 'yts', yts)
-        WriteData(fid, prefix, 'data', tempelevationbins, 'name', 'md.smb.elevationbins', 'format', 'DoubleMat')
+        WriteData(fid, prefix, 'data', temp2dlapserates, 'name', 'md.smb.lapserates', 'format', 'DoubleMat', 'scale', 1 / yts, 'yts', yts)
+        WriteData(fid, prefix, 'data', temp2delevationbins, 'name', 'md.smb.elevationbins', 'format', 'DoubleMat')
         WriteData(fid, prefix, 'data', temprefelevation, 'name', 'md.smb.refelevation', 'format', 'DoubleMat')
         WriteData(fid, prefix, 'data', nbins, 'name', 'md.smb.num_bins', 'format', 'Integer')
Index: /issm/trunk/src/m/classes/SMBcomponents.m
===================================================================
--- /issm/trunk/src/m/classes/SMBcomponents.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBcomponents.m	(revision 28013)
@@ -6,11 +6,10 @@
 classdef SMBcomponents
 	properties (SetAccess=public)
-		accumulation = NaN;
-		runoff = NaN;
-		evaporation = NaN;
-		steps_per_step = 1;
-		averaging = 0;
-		requested_outputs     = {};
-		isclimatology;
+		accumulation      = NaN;
+		runoff            = NaN;
+		evaporation       = NaN;
+		steps_per_step    = 1;
+		averaging         = 0;
+		requested_outputs = {};
 	end
 	methods
@@ -18,4 +17,5 @@
 			switch nargin
 				case 0
+					self=setdefaultparameters(self);
 				otherwise
 					error('constructor not supported');
@@ -43,5 +43,5 @@
 		function list = defaultoutputs(self,md) % {{{
 
-			list = {''};
+			list = {'SmbMassBalance'};
 
 		end % }}}
@@ -98,4 +98,10 @@
 
 		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			%output default:
+			self.requested_outputs={'default'};
+
+		end % }}}
 	end
 end
Index: /issm/trunk/src/m/classes/SMBcomponents.py
===================================================================
--- /issm/trunk/src/m/classes/SMBcomponents.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBcomponents.py	(revision 28013)
@@ -21,9 +21,7 @@
         self.averaging = 0
         self.requested_outputs = []
-        self.isclimatology = np.nan
 
-        nargs = len(args)
-        if nargs == 0:
-            pass
+        if len(args) == 0:
+            self.setdefaultparameters()
         else:
             raise Exception('constructor not supported')
@@ -52,5 +50,5 @@
 
     def defaultoutputs(self, md):  # {{{
-        return []
+        return ['SmbMassBalance']
     # }}}
 
@@ -101,2 +99,7 @@
 
     # }}}
+
+    def setdefaultparameters(self):  # {{{
+        self.requested_outputs = ['default']
+        return self
+    # }}}
Index: /issm/trunk/src/m/classes/SMBd18opdd.m
===================================================================
--- /issm/trunk/src/m/classes/SMBd18opdd.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBd18opdd.m	(revision 28013)
@@ -53,5 +53,5 @@
 		function list = defaultoutputs(self,md) % {{{
 
-			list = {''};
+			list = {'SmbMassBalance'};
 
 		end % }}}
@@ -80,5 +80,5 @@
 			self.f          = 0.169;
 			self.issetpddfac = 0;
-
+			self.requested_outputs={'default'};
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -172,6 +172,6 @@
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','Tdiff','format','DoubleMat','mattype',1,'timeserieslength',2,'yts',md.constants.yts);
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','sealev','format','DoubleMat','mattype',1,'timeserieslength',2,'yts',md.constants.yts);
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer');
 
 			if self.isd18opd
Index: /issm/trunk/src/m/classes/SMBd18opdd.py
===================================================================
--- /issm/trunk/src/m/classes/SMBd18opdd.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBd18opdd.py	(revision 28013)
@@ -13,5 +13,5 @@
           SMBd18opdd = SMBd18opdd()
     """
-    def __init__(self):  # {{{
+    def __init__(self, *args):  # {{{
         self.desfac = 0.
         self.s0p = float('NaN')
@@ -38,47 +38,48 @@
         self.steps_per_step = 1
         self.averaging = 0
-
-    #set defaults
-        self.setdefaultparameters()
         self.requested_outputs = []
-    #}}}
+
+        if len(args) == 0:
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
     def __repr__(self):  # {{{
-        string = "   surface forcings parameters:"
-
-        string = "%s\n%s" % (string, fielddisplay(self, 'isd18opd', 'is delta18o parametrisation from present day temperature and precipitation activated (0 or 1, default is 0)'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'issetpddfac', 'is user passing in defined pdd factors (0 or 1, default is 0)'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'desfac', 'desertification elevation factor (between 0 and 1, default is 0.5) [m]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 's0p', 'should be set to elevation from precip source (between 0 and a few 1000s m, default is 0) [m]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 's0t', 'should be set to elevation from temperature source (between 0 and a few 1000s m, default is 0) [m]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'rlaps', 'present day lapse rate [degree/km]'))
-        if self.isd18opd:
-            string = "%s\n%s" % (string, fielddisplay(self, 'temperatures_presentday', 'monthly present day surface temperatures [K], required if delta18o/mungsm is activated'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'precipitations_presentday', 'monthly surface precipitation [m/yr water eq], required if delta18o or mungsm is activated'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'istemperaturescaled', 'if delta18o parametrisation from present day temperature and precipitation is activated, is temperature scaled to delta18o value? (0 or 1, default is 1)'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'isprecipscaled', 'if delta18o parametrisation from present day temperature and precipitation is activated, is precipitation scaled to delta18o value? (0 or 1, default is 1)'))
+        s = '   surface forcings parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'isd18opd', 'is delta18o parametrisation from present day temperature and precipitation activated (0 or 1, default is 0)'))
+        s += '{}\n'.format(fielddisplay(self, 'issetpddfac', 'is user passing in defined pdd factors (0 or 1, default is 0)'))
+        s += '{}\n'.format(fielddisplay(self, 'desfac', 'desertification elevation factor (between 0 and 1, default is 0.5) [m]'))
+        s += '{}\n'.format(ielddisplay(self, 's0p', 'should be set to elevation from precip source (between 0 and a few 1000s m, default is 0) [m]'))
+        s += '{}\n'.format(fielddisplay(self, 's0t', 'should be set to elevation from temperature source (between 0 and a few 1000s m, default is 0) [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'rlaps', 'present day lapse rate [degree/km]'))
+
+        if self.isd18opd:
+            s += '{}\n'.format(fielddisplay(self, 'temperatures_presentday', 'monthly present day surface temperatures [K], required if delta18o/mungsm is activated'))
+            s += '{}\n'.format(fielddisplay(self, 'precipitations_presentday', 'monthly surface precipitation [m/yr water eq], required if delta18o or mungsm is activated'))
+            s += '{}\n'.format(fielddisplay(self, 'istemperaturescaled', 'if delta18o parametrisation from present day temperature and precipitation is activated, is temperature scaled to delta18o value? (0 or 1, default is 1)'))
+            s += '{}\n'.format(fielddisplay(self, 'isprecipscaled', 'if delta18o parametrisation from present day temperature and precipitation is activated, is precipitation scaled to delta18o value? (0 or 1, default is 1)'))
 
             if self.istemperaturescaled == 0:
-                string = "%s\n%s" % (string, fielddisplay(self, 'temperatures_reconstructed', 'monthly historical surface temperatures [K], required if delta18o/mungsm/d18opd is activated and istemperaturescaled is not activated'))
+                s += '{}\n'.format(fielddisplay(self, 'temperatures_reconstructed', 'monthly historical surface temperatures [K], required if delta18o/mungsm/d18opd is activated and istemperaturescaled is not activated'))
 
             if self.isprecipscaled == 0:
-                string = "%s\n%s" % (string, fielddisplay(self, 'precipitations_reconstructed', 'monthly historical precipitation [m/yr water eq], required if delta18o/mungsm/d18opd is activated and isprecipscaled is not activated'))
-
-            string = "%s\n%s" % (string, fielddisplay(self, 'delta18o', 'delta18o [per mil], required if pdd is activated and delta18o activated'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'dpermil', 'degree per mil, required if d18opd is activated'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'f', 'precip/temperature scaling factor, required if d18opd is activated'))
+                s += '{}\n'.format(fielddisplay(self, 'precipitations_reconstructed', 'monthly historical precipitation [m/yr water eq], required if delta18o/mungsm/d18opd is activated and isprecipscaled is not activated'))
+
+            s += '{}\n'.format(fielddisplay(self, 'delta18o', 'delta18o [per mil], required if pdd is activated and delta18o activated'))
+            s += '{}\n'.format(fielddisplay(self, 'dpermil', 'degree per mil, required if d18opd is activated'))
+            s += '{}\n'.format(fielddisplay(self, 'f', 'precip/temperature scaling factor, required if d18opd is activated'))
 
         if self.issetpddfac == 1:
-            string = "%s\n%s" % (string, fielddisplay(self, 'pddfac_snow', 'Pdd factor for snow for all the domain [mm ice equiv/day/degree C]'))
-            string = "%s\n%s" % (string, fielddisplay(self, 'pddfac_ice', 'Pdd factor for ice for all the domain [mm ice equiv/day/degree C]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
-        string = "%s\n\t\t%s" % (string, '0: Arithmetic (default)')
-        string = "%s\n\t\t%s" % (string, '1: Geometric')
-        string = "%s\n\t\t%s" % (string, '2: Harmonic')
-
-        string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
-
-        return string
-    #}}}
+            s += '{}\n'.format(fielddisplay(self, 'pddfac_snow', 'Pdd factor for snow for all the domain [mm ice equiv/day/degree C]'))
+            s += '{}\n'.format(fielddisplay(self, 'pddfac_ice', 'Pdd factor for ice for all the domain [mm ice equiv/day/degree C]'))
+
+        s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
+        s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
+        s += '\t\t{}\n'.format('0: Arithmetic (default)')
+        s += '\t\t{}\n'.format('1: Geometric')
+        s += '\t\t{}\n'.format('2: Harmonic')
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
+        return s
+    # }}}
     def extrude(self, md):  # {{{
         if self.isd18opd:
@@ -93,8 +94,8 @@
         self.s0t = project3d(md, 'vector', self.s0t, 'type', 'node')
         return self
-    #}}}
+    # }}}
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
     def initialize(self, md):  # {{{
         if np.all(np.isnan(self.s0p)):
@@ -108,5 +109,5 @@
     # }}}
     def setdefaultparameters(self):  # {{{
-        #pdd method not used in default mode
+        # pdd method not used in default mode
         self.ismungsm = 0
         self.isd18opd = 1
@@ -119,6 +120,7 @@
         self.f = 0.169
         self.issetpddfac = 0
+        self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         if 'MasstransportAnalysis' in analyses:
@@ -197,5 +199,5 @@
             WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'pddfac_ice', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
 
-    #process requested outputs
+        # Process requested outputs
         outputs = self.requested_outputs
         indices = [i for i, x in enumerate(outputs) if x == 'default']
@@ -204,4 +206,3 @@
             outputs = outputscopy
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
-
-    # }}}
+    # }}}
Index: /issm/trunk/src/m/classes/SMBdebrisEvatt.m
===================================================================
--- /issm/trunk/src/m/classes/SMBdebrisEvatt.m	(revision 28013)
+++ /issm/trunk/src/m/classes/SMBdebrisEvatt.m	(revision 28013)
@@ -0,0 +1,204 @@
+%SMBdebrisEvatt Class definition
+%
+%   Usage:
+%      SMBdebrisEvatt=SMBdebrisEvatt();
+
+classdef SMBdebrisEvatt
+	properties (SetAccess=public)
+
+		precipitation  = NaN;
+		temperature   = NaN;
+		dsradiation    = NaN;
+                dlradiation    = NaN;
+                windspeed      = NaN;
+                airhumidity    = NaN;
+		precipitation_anomaly = NaN;
+		temperature_anomaly   = NaN;
+		dsradiation_anomaly   = NaN;
+		dlradiation_anomaly   = NaN;
+		windspeed_anomaly     = NaN;
+		airhumidity_anomaly   = NaN;
+		s0t                   = NaN;
+		snowheight            = NaN;
+		qlaps                 = 0;
+		rlaps                 = 0;
+		dsgrad		      = 0;
+		dlgrad		      = 0;
+		windspeedgrad	      = 0;
+		humiditygrad	      = 0;
+		isAnderson	      = 0;
+		iscryokarst	      = 0;
+		AndersonD0	      = 0;
+		steps_per_step        = 1;
+		averaging             = 0;
+		requested_outputs     = {};
+		icealbedo	      = NaN;
+		snowalbedo            = NaN;
+		debrisalbedo	      = NaN;
+	end
+	methods
+		function self = SMBdebrisEvatt(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = extrude(self,md) % {{{
+			self.precipitation=project3d(md,'vector',self.precipitation,'type','node');
+			self.temperature=project3d(md,'vector',self.temperature,'type','node');
+			self.dsradiation=project3d(md,'vector',self.dsradiation,'type','node');
+			self.dlradiation=project3d(md,'vector',self.dlradiation,'type','node');
+			self.windspeed=project3d(md,'vector',self.windspeed,'type','node');
+			self.airhumidity=project3d(md,'vector',self.airhumidity,'type','node');
+			self.temperature_anomaly=project3d(md,'vector',self.temperature_anomaly,'type','node');
+                        self.precipitation_anomaly=project3d(md,'vector',self.precipitation_anomaly,'type','node');
+			self.dsradiation_anomaly=project3d(md,'vector',self.temperature_anomaly,'type','node');
+	                self.dlradiation_anomaly=project3d(md,'vector',self.temperature_anomaly,'type','node');
+         	        self.windspeed_anomaly=project3d(md,'vector',self.temperature_anomaly,'type','node');
+                	self.airhumidity_anomaly=project3d(md,'vector',self.temperature_anomaly,'type','node');
+
+			self.s0t=project3d(md,'vector',self.s0t,'type','node');
+			self.snowheight=project3d(md,'vector',self.snowheight,'type','node');
+
+		end % }}}
+		function list = defaultoutputs(self,md) % {{{
+			list = {'SmbMassBalance'};
+		end % }}}
+		function self = initialize(self,md) % {{{
+
+			if isnan(self.s0t),
+				self.s0t=zeros(md.mesh.numberofvertices,1);
+				disp('      no SMBdebrisEvatt.s0t specified: values set as zero');
+			end
+
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.qlaps         = 0.1;
+			self.rlaps         = 7.4;
+			self.dsgrad	   = 13.;
+			self.dlgrad	   = 29;
+			self.windspeedgrad = -0.2;
+			self.humiditygrad  = 0;
+			self.icealbedo	   = 0.3;
+			self.snowalbedo    = 0.75;
+		 	self.debrisalbedo  = 0.07;
+			self.isAnderson    = 0;
+			self.iscryokarst   = 0;
+			self.AndersonD0    = 0.5;
+			self.requested_outputs={'default'};
+
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if (strcmp(solution,'TransientSolution') & md.transient.issmb == 0), return; end
+
+			if ismember('MasstransportAnalysis',analyses),
+				md = checkfield(md,'fieldname','smb.temperature','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.precipitation','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.dsradiation','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.dlradiation','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.windspeed','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.airhumidity','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 365]);
+				md = checkfield(md,'fieldname','smb.snowheight','>=',0,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
+			end
+			md = checkfield(md,'fieldname','smb.steps_per_step','>=',1,'numel',[1]);
+			md = checkfield(md,'fieldname','smb.averaging', 'numel', [1], 'values', [0, 1, 2]);
+			md = checkfield(md,'fieldname','smb.requested_outputs','stringrow',1);
+			md = checkfield(md,'fieldname','smb.icealbedo','>=',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','smb.snowalbedo','>=',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','smb.debrisalbedo','>=',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','smb.isAnderson','numel', [1], 'values', [0, 1]);
+			md = checkfield(md,'fieldname','smb.iscryokarst','numel', [1], 'values', [0, 1]);
+
+		end % }}}
+		function disp(self) % {{{
+	
+			disp(sprintf('Evatt et al. (2015) Debris Model (doi: 10.3189/2015JoG14J235)'));
+			disp(sprintf('If isAnderson==1 -> Eq. 6 from Ferguson & Vieli (2021) ist taken (https://doi.org/10.5194/tc-15-3377-2021)'));
+			disp(sprintf('If iscryokarst==1 -> Eqs. 9,10 from Ferguson & Vieli (2021) are taken (https://doi.org/10.5194/tc-15-3377-2021)'));
+			disp(sprintf('Clean-ice SMB is taken from the Evatt et al. (2015) EBM with debris=0'));
+
+			fielddisplay(self,'isAnderson','do we use the Anderson parametrization (default is 0)');
+			fielddisplay(self,'iscryokarst','do we use a cryokarst parametrization (default is 0)');
+			fielddisplay(self,'temperature',' surface temperature [K]');
+			fielddisplay(self,'precipitation',' surface precipitation [m/yr water eq]');
+			fielddisplay(self,'dsradiation',' downwelling shortwave radiation [W m-2]');
+                        fielddisplay(self,'dlradiation',' downwelling longwave radiation [W m-2]');
+			fielddisplay(self,'windspeed',' surface wind speed [m s-1]');
+                        fielddisplay(self,'airhumidity',' near-surface specific humidity [kg kg-1]');
+			fielddisplay(self,'temperature_anomaly','anomaly to  reference temperature (additive)');
+                        fielddisplay(self,'precipitation_anomaly','anomaly to  precipitation (multiplicative)');
+                        fielddisplay(self,'dsradiation_anomaly','anomaly to  reference downwelling shortwave radiation');
+                        fielddisplay(self,'dlradiation_anomaly','anomaly to  reference downwelling longwave radiation (additive');
+                        fielddisplay(self,'windspeed_anomaly','anomaly to  reference surface wind speed (additive)');
+                        fielddisplay(self,'airhumidity_anomaly','anomaly to  reference near-surface specific humidity (additive)');
+
+			fielddisplay(self,'s0t','should be set to elevation from RCM/GCM source (between 0 and a few 1000s m, default is 0) [m]');
+			fielddisplay(self,'snowheight','guess of snowheight at the end of the summer, will be further evolved');
+			fielddisplay(self,'rlaps','present day temperature lapse rate (default is 7.4 degree/km)');
+			fielddisplay(self,'dsgrad','present day SW height gradient (default is 1.3 W/m^2/km)');
+			fielddisplay(self,'dlgrad','present day LW height gradient (default is 2.9 W/m^2/km)');
+			fielddisplay(self,'windspeedgrad','present day wind speed height gradient (default is 0.02 m/s/km)');
+			fielddisplay(self,'humiditygrad','present day humidity height gradient (default is 0)');
+			fielddisplay(self,'qlaps','precip change (default is 0.1/km');
+			fielddisplay(self,'icealbedo','albedo for ice (default is 0.3)');
+			fielddisplay(self,'snowalbedo','albedo for snow (default is 0.75)');
+			fielddisplay(self,'debrisalbedo','albedo for debris (default is 0.07)');
+			fielddisplay(self,'AndersonD0','parameter to represent the debris effect (default is 0.5)');
+			fielddisplay(self,'steps_per_step', 'number of smb steps per time step');
+			fielddisplay(self,'averaging','averaging methods from short to long steps');
+			disp(sprintf('%51s  0: Arithmetic (default)',' '));
+			disp(sprintf('%51s  1: Geometric',' '));
+			disp(sprintf('%51s  2: Harmonic',' '));
+			fielddisplay(self,'requested_outputs','additional outputs requested (TemperaturePDD, SmbAccumulation, SmbMelt, SmbSummerMelt, SmbAlbedo, SmbSummerAlbedo, SmbSnowheight)');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+
+			yts=md.constants.yts;
+
+			WriteData(fid,prefix,'name','md.smb.model','data',14,'format','Integer');
+
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','qlaps','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','s0t','format','DoubleMat','mattype',1);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','snowheight','format','DoubleMat','mattype',1);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','rlaps','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dsgrad','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dlgrad','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','windspeedgrad','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','humiditygrad','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','icealbedo','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','snowalbedo','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','debrisalbedo','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','isAnderson','format','Boolean');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','iscryokarst','format','Boolean');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','AndersonD0','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','temperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','precipitation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dsradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dlradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','windspeed','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','airhumidity','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','temperature_anomaly','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+                        WriteData(fid,prefix,'object',self,'class','smb','fieldname','precipitation_anomaly','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dsradiation_anomaly','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dlradiation_anomaly','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','windspeed_anomaly','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','airhumidity_anomaly','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer')
+
+			%process requested outputs
+			outputs = self.requested_outputs;
+			pos  = find(ismember(outputs,'default'));
+			if ~isempty(pos),
+				outputs(pos) = [];                         %remove 'default' from outputs
+				outputs      = [outputs defaultoutputs(self,md)]; %add defaults
+			end
+			WriteData(fid,prefix,'data',outputs,'name','md.smb.requested_outputs','format','StringArray');
+
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/SMBdebrisML.m
===================================================================
--- /issm/trunk/src/m/classes/SMBdebrisML.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBdebrisML.m	(revision 28013)
@@ -11,5 +11,5 @@
 	end
 	methods
-		function self = SMBhenning(varargin) % {{{
+		function self = SMBdebrisML(varargin) % {{{
 			switch nargin
 				case 0
@@ -32,5 +32,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -57,6 +57,6 @@
 
 			WriteData(fid,prefix,'name','md.smb.model','data',14,'format','Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer');
 
 			%process requested outputs
Index: /issm/trunk/src/m/classes/SMBforcing.js
===================================================================
--- /issm/trunk/src/m/classes/SMBforcing.js	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBforcing.js	(revision 28013)
@@ -20,5 +20,5 @@
 	} // }}}
 	this.defaultoutputs = function(){ // {{{
-		return '';
+		return 'SmbMassBalance';
 	}//}}}
     this.classname = function(){ // {{{
Index: /issm/trunk/src/m/classes/SMBforcing.m
===================================================================
--- /issm/trunk/src/m/classes/SMBforcing.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBforcing.m	(revision 28013)
@@ -10,5 +10,4 @@
 		requested_outputs = {};
 		averaging         = 0;
-		isclimatology;
 	end
 	methods
@@ -16,5 +15,5 @@
 			switch nargin
 				case 0
-
+					self=setdefaultparameters(self);
 				case 1
 					inputstruct=varargin{1};
@@ -32,5 +31,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = extrude(self,md) % {{{
@@ -55,5 +54,5 @@
 			md = checkfield(md,'fieldname','smb.steps_per_step','>=',1,'numel',[1]);
 			md = checkfield(md,'fieldname','smb.requested_outputs','stringrow',1);
-			md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2]);
+			md = checkfield(md,'fieldname','smb.averaging','numel',[1],'values',[0 1 2]);
 		end % }}}
 		function disp(self) % {{{
@@ -72,7 +71,7 @@
 
 			WriteData(fid,prefix,'name','md.smb.model','data',1,'format','Integer');
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','mass_balance','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','mass_balance','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer');
 
 			%process requested outputs
@@ -92,4 +91,10 @@
 
 		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			%output default:
+			self.requested_outputs={'default'};
+
+		end % }}}
 	end
 end
Index: /issm/trunk/src/m/classes/SMBforcing.py
===================================================================
--- /issm/trunk/src/m/classes/SMBforcing.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBforcing.py	(revision 28013)
@@ -19,12 +19,14 @@
         self.requested_outputs = []
         self.averaging = 0
-        self.isclimatology = np.nan
 
         nargs = len(args)
         if nargs == 0:
-            pass
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -32,20 +34,20 @@
         s += '{}\n'.format(fielddisplay(self, 'mass_balance', 'surface mass balance [m/yr ice eq]'))
         s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
-        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
         s += '\t\t{}\n'.format('0: Arithmetic (default)')
         s += '\t\t{}\n'.format('1: Geometric')
         s += '\t\t{}\n'.format('2: Harmonic')
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.mass_balance = project3d(md, 'vector', self.mass_balance, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
 
     def initialize(self, md):  # {{{
@@ -54,5 +56,5 @@
             print("      no smb.mass_balance specified: values set as zero")
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -69,5 +71,5 @@
     # }}}
 
-    def marshall(self, prefix, md, fid):    # {{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
         WriteData(fid, prefix, 'name', 'md.smb.model', 'data', 1, 'format', 'Integer')
@@ -84,2 +86,7 @@
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
     # }}}
+
+    def setdefaultparameters(self):  #{{{
+        self.requested_outputs = ['SmbMassBalance']
+        return self
+    # }}}
Index: /issm/trunk/src/m/classes/SMBgemb.m
===================================================================
--- /issm/trunk/src/m/classes/SMBgemb.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgemb.m	(revision 28013)
@@ -14,15 +14,14 @@
 
 		%solution choices
-		isgraingrowth;
-		isalbedo;
-		isshortwave;
-		isthermal;
-		isaccumulation;
-		ismelt;
-		isdensification;
-		isturbulentflux;
-		isconstrainsurfaceT;
-		isclimatology;
-		isdeltaLWup;
+		isgraingrowth       = 0;
+		isalbedo            = 0;
+		isshortwave         = 0;
+		isthermal           = 0;
+		isaccumulation      = 0;
+		ismelt              = 0;
+		isdensification     = 0;
+		isturbulentflux     = 0;
+		isconstrainsurfaceT = 0;
+		isdeltaLWup         = 0;
 
 		%inputs:
@@ -123,5 +122,5 @@
 		%or else apply direct input value from aValue, allowing albedo to be altered.
 		%Default value is rho water (1023 kg m-3).
-		teThresh = NaN; %Apply eIdx method to all areas with grain radii below this value,
+		teThresh = NaN; %Apply eIdx method to all areas with grain radii above this value (mm),
 		%or else apply direct input value from teValue, allowing emissivity to be altered.
 		%Default value is a effective grain radius of 10 mm.
@@ -201,5 +200,5 @@
 			fielddisplay(self,'dulwrfValue','Specified bias to be applied to the outward long wave radiation every element (W/m-2, +upward)');
 			fielddisplay(self,'teValue','Outward longwave radiation thermal emissivity forcing at every element (default in code is 1)');
-			fielddisplay(self,'teThresh',{'Apply eIdx method to all areas with effective grain radius below this value,','or else apply direct input value from teValue, allowing emissivity to be altered.'});
+			fielddisplay(self,'teThresh',{'Apply eIdx method to all areas with effective grain radius above this value (mm),','or else apply direct input value from teValue, allowing emissivity to be altered.'});
 			fielddisplay(self,'eIdx',{'method for calculating emissivity (default is 1)',...
 				'0: direct input from teValue parameter, no use of teThresh',...
@@ -234,5 +233,5 @@
 						fielddisplay(self,'cotValue','Cloud Optical Thickness');
 						fielddisplay(self,'ccsnowValue','concentration of light absorbing carbon for snow [ppm1]');
-						fielddisplay(self,'cciceValue','concentration of light absorbing carbon for snow [ppm1]');
+						fielddisplay(self,'cciceValue','concentration of light absorbing carbon for ice [ppm1]');
 					end
 				case 3
@@ -274,5 +273,5 @@
 			self.V=project3d(md,'vector',self.V,'type','element');
 			self.dswrf=project3d(md,'vector',self.dswrf,'type','element');
-			self.dslrf=project3d(md,'vector',self.dslrf,'type','element');
+			self.dlwrf=project3d(md,'vector',self.dlwrf,'type','element');
 			self.P=project3d(md,'vector',self.P,'type','element');
 			self.eAir=project3d(md,'vector',self.eAir,'type','element');
Index: /issm/trunk/src/m/classes/SMBgemb.py
===================================================================
--- /issm/trunk/src/m/classes/SMBgemb.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgemb.py	(revision 28013)
@@ -22,15 +22,14 @@
 
         #solution choices
-        self.isgraingrowth = 0
-        self.isalbedo = 0
-        self.isshortwave = 0
-        self.isthermal = 0
-        self.isaccumulation = 0
-        self.ismelt = 0
-        self.isdensification = 0
-        self.isturbulentflux = 0
+        self.isgraingrowth       = 0
+        self.isalbedo            = 0
+        self.isshortwave         = 0
+        self.isthermal           = 0
+        self.isaccumulation      = 0
+        self.ismelt              = 0
+        self.isdensification     = 0
+        self.isturbulentflux     = 0
         self.isconstrainsurfaceT = 0
-        self.isdeltaLWup = 0
-        self.isclimatology = np.nan
+        self.isdeltaLWup         = 0
 
         # Inputs
@@ -130,5 +129,5 @@
         # or else apply direct input value from aValue, allowing albedo to be altered.
         # Default value is rho water (1023 kg m-3).
-        teThresh                    = np.nan    #Apply eIdx method to all areas with grain radii below this value,
+        teThresh                    = np.nan    #Apply eIdx method to all areas with grain radii above this value (mm),
         #or else apply direct input value from teValue, allowing emissivity to be altered.
         #Default value is a effective grain radius of 10 mm.
@@ -157,5 +156,5 @@
         else:
             raise Exception('constructor not supported: need mesh and geometry to set defaults')
-        #}}}
+        # }}}
 
     def __repr__(self):  # {{{
@@ -206,5 +205,5 @@
         s += '{}\n'.format(fielddisplay(self, 'dulwrfValue', 'Specified bias to be applied to the outward long wave radiation every element (W/m-2, +upward)'))
         s += '{}\n'.format(fielddisplay(self, 'teValue', 'Outward longwave radiation thermal emissivity forcing at every element (default in code is 1)'))
-        s += '{}\n'.format(fielddisplay(self, 'teThresh', ['Apply eIdx method to all areas with effective grain radius below this value,', 'or else apply direct input value from teValue, allowing emissivity to be altered.']))
+        s += '{}\n'.format(fielddisplay(self, 'teThresh', ['Apply eIdx method to all areas with effective grain radius above this value (mm),', 'or else apply direct input value from teValue, allowing emissivity to be altered.']))
         s += '{}\n'.format(fielddisplay(self, 'eIdx', ['method for calculating emissivity (default is 1)',
             '0: direct input from teValue parameter, no use of teThresh',
@@ -237,5 +236,5 @@
                 s += '{}\n'.format(fielddisplay(self,'cotValue','Cloud Optical Thickness'))
                 s += '{}\n'.format(fielddisplay(self,'ccsnowValue','concentration of light absorbing carbon for snow [ppm1]'))
-                s += '{}\n'.format(fielddisplay(self,'cciceValue','concentration of light absorbing carbon for snow [ppm1]'))
+                s += '{}\n'.format(fielddisplay(self,'cciceValue','concentration of light absorbing carbon for ice [ppm1]'))
         elif self.aIdx == 3:
             s += '{}\n'.format(fielddisplay(self, 'cldFrac', 'average cloud amount'))
@@ -268,5 +267,5 @@
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -274,5 +273,5 @@
         self.V = project3d(md, 'vector', self.V, 'type', 'element')
         self.dswrf = project3d(md, 'vector', self.dswrf, 'type', 'element')
-        self.dslrf = project3d(md, 'vector', self.dslrf, 'type', 'element')
+        self.dlwrf = project3d(md, 'vector', self.dlwrf, 'type', 'element')
         self.P = project3d(md, 'vector', self.P, 'type', 'element')
         self.eAir = project3d(md, 'vector', self.eAir, 'type', 'element')
@@ -317,9 +316,9 @@
 
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
         return ['SmbMassBalance','SmbAccumulatedMassBalance']
-    #}}}
+    # }}}
 
     def setdefaultparameters(self, mesh, geometry):  # {{{
@@ -391,5 +390,5 @@
         #           Element.cpp
         self.Sizeini = 2 * np.ones((mesh.numberofelements,))
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):    # {{{
Index: /issm/trunk/src/m/classes/SMBgradients.m
===================================================================
--- /issm/trunk/src/m/classes/SMBgradients.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradients.m	(revision 28013)
@@ -29,5 +29,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -38,5 +38,6 @@
 		function self = setdefaultparameters(self) % {{{
 
-			%Nothing for now
+			%output default:
+			self.requested_outputs={'default'};
 
 		end % }}}
@@ -46,4 +47,7 @@
 				md = checkfield(md,'fieldname','smb.href','timeseries',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.smbref','timeseries',1,'NaN',1,'Inf',1);
+				if max(max(abs(md.smb.smbref(1:end-1,:))))<1
+					disp('!!! Warning: SMBgradients now expects smbref to be in m/yr ice eq. instead of mm/yr water eq.');
+				end
 				md = checkfield(md,'fieldname','smb.b_pos','timeseries',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.b_neg','timeseries',1,'NaN',1,'Inf',1);
@@ -57,8 +61,8 @@
 
 			disp(sprintf('\n   SMB gradients parameters:'));
-			fielddisplay(self,'href',' reference elevation from which deviation is used to calculate SMB adjustment in smb gradients method [m]');
-			fielddisplay(self,'smbref',' reference smb from which deviation is calculated in smb gradients method [mm/yr water equiv]');
-			fielddisplay(self,'b_pos',' slope of hs - smb regression line for accumulation regime required if smb gradients is activated');
-			fielddisplay(self,'b_neg',' slope of hs - smb regression line for ablation regime required if smb gradients is activated');
+			fielddisplay(self,'href','reference elevation from which deviation is used to calculate SMB adjustment in smb gradients method [m]');
+			fielddisplay(self,'smbref','reference smb from which deviation is calculated in smb gradients method [m/yr ice equiv]');
+			fielddisplay(self,'b_pos','slope of hs - smb regression line for accumulation regime required if smb gradients is activated');
+			fielddisplay(self,'b_neg','slope of hs - smb regression line for ablation regime required if smb gradients is activated');
 			fielddisplay(self, 'steps_per_step', 'number of smb steps per time step');
 			fielddisplay(self, 'averaging', 'averaging methods from short to long steps');
@@ -86,5 +90,5 @@
 			pos  = find(ismember(outputs,'default'));
 			if ~isempty(pos),
-				outputs(pos) = [];                         %remove 'default' from outputs
+				outputs(pos) = [];                                %remove 'default' from outputs
 				outputs      = [outputs defaultoutputs(self,md)]; %add defaults
 			end
Index: /issm/trunk/src/m/classes/SMBgradients.py
===================================================================
--- /issm/trunk/src/m/classes/SMBgradients.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradients.py	(revision 28013)
@@ -1,2 +1,3 @@
+import numpy as np
 from fielddisplay import fielddisplay
 from checkfield import checkfield
@@ -5,57 +6,66 @@
 
 class SMBgradients(object):
-    """
-    SMBgradients Class definition
+    """SMBgradients Class definition
 
-       Usage:
-          SMBgradients = SMBgradients();
+    Usage:
+        SMBgradients = SMBgradients();
     """
 
     def __init__(self):  # {{{
-        self.href = float('NaN')
-        self.smbref = float('NaN')
-        self.b_pos = float('NaN')
-        self.b_neg = float('NaN')
+        self.href = np.nan
+        self.smbref = np.nan
+        self.b_pos = np.nan
+        self.b_neg = np.nan
         self.steps_per_step = 1
         self.averaging = 0
         self.requested_outputs = []
-    #}}}
+
+        # Set defaults
+        self.setdefaultparameters()
+    # }}}
 
     def __repr__(self):  # {{{
-        string = "   surface forcings parameters:"
+        s = '   surface forcings parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'issmbgradients', 'is smb gradients method activated (0 or 1, default is 0)'))
+        s += '{}\n'.format(fielddisplay(self, 'href', 'reference elevation from which deviation is used to calculate SMB adjustment in smb gradients method'))
+        s += '{}\n'.format(fielddisplay(self, 'smbref', 'reference smb from which deviation is calculated in smb gradients method [m/yr ice equiv]'))
+        s += '{}\n'.format(fielddisplay(self, 'b_pos', 'slope of hs - smb regression line for accumulation regime required if smb gradients is activated'))
+        s += '{}\n'.format(fielddisplay(self, 'b_neg', 'slope of hs - smb regression line for ablation regime required if smb gradients is activated'))
+        s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
+        s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
+        s += '\t\t{}\n'.format('0: Arithmetic (default)')
+        s += '\t\t{}\n'.format('1: Geometric')
+        s += '\t\t{}\n'.format('2: Harmonic')
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
 
-        string = "%s\n%s" % (string, fielddisplay(self, 'issmbgradients', 'is smb gradients method activated (0 or 1, default is 0)'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'href', ' reference elevation from which deviation is used to calculate SMB adjustment in smb gradients method'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'smbref', ' reference smb from which deviation is calculated in smb gradients method'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'b_pos', ' slope of hs - smb regression line for accumulation regime required if smb gradients is activated'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'b_neg', ' slope of hs - smb regression line for ablation regime required if smb gradients is activated'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
-        string = "%s\n\t\t%s" % (string, '0: Arithmetic (default)')
-        string = "%s\n\t\t%s" % (string, '1: Geometric')
-        string = "%s\n\t\t%s" % (string, '2: Harmonic')
-        string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
-
-        return string
-    #}}}
+        return s
+    # }}}
 
     def extrude(self, md):  # {{{
-        #Nothing for now
+        # Nothing for now
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
+        # Output default
+        self.requested_outputs = ['default']
+        return self
+    # }}}
 
     def initialize(self, md):  # {{{
-        #Nothing for now
+        # Nothing for now
         return self
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses):    # {{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if 'MasstransportAnalysis' in analyses:
             md = checkfield(md, 'fieldname', 'smb.href', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
             md = checkfield(md, 'fieldname', 'smb.smbref', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            if np.max(np.max(np.abs(md.smb.smbref[0:-1,]))) < 1:
+                print('!!! Warning: SMBgradients now expects smbref to be in m/yr ice eq. instead of mm/yr water eq.')
             md = checkfield(md, 'fieldname', 'smb.b_pos', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
             md = checkfield(md, 'fieldname', 'smb.b_neg', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
@@ -67,5 +77,5 @@
     # }}}
 
-    def marshall(self, prefix, md, fid):    # {{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
@@ -78,5 +88,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer')
 
-        #process requested outputs
+        # Process requested outputs
         outputs = self.requested_outputs
         indices = [i for i, x in enumerate(outputs) if x == 'default']
@@ -85,4 +95,3 @@
             outputs = outputscopy
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
-
     # }}}
Index: /issm/trunk/src/m/classes/SMBgradientscomponents.m
===================================================================
--- /issm/trunk/src/m/classes/SMBgradientscomponents.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradientscomponents.m	(revision 28013)
@@ -33,5 +33,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {'runoff'};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -42,5 +42,6 @@
 		function self = setdefaultparameters(self) % {{{
 
-		%Nothing for now
+			%output default:
+			self.requested_outputs={'default'};
 
 		end % }}}
Index: /issm/trunk/src/m/classes/SMBgradientscomponents.py
===================================================================
--- /issm/trunk/src/m/classes/SMBgradientscomponents.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradientscomponents.py	(revision 28013)
@@ -23,5 +23,5 @@
         self.averaging = 0
         self.requested_outputs = ['default']
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -41,22 +41,22 @@
 
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         #Nothing for now
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        list = ['SmbMassBalance', 'SmbRunoff']
+        list = ['SmbMassBalance']
         if self.steps_per_step > 1:
-            list.extend(['SmbMassBalanceSubstep', 'SmbRunoffSubstep'])
+            list.extend(['SmbMassBalanceSubstep'])
         return list
-    #}}}
+    # }}}
 
     def initialize(self, md):  # {{{
         #Nothing for now
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/SMBgradientsela.m
===================================================================
--- /issm/trunk/src/m/classes/SMBgradientsela.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradientsela.m	(revision 28013)
@@ -30,5 +30,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -39,6 +39,8 @@
 		function self = setdefaultparameters(self) % {{{
 
+			%output default:
 			self.b_max=9999;
 			self.b_min=-9999;
+			self.requested_outputs={'default'};
 
 		end % }}}
Index: /issm/trunk/src/m/classes/SMBgradientsela.py
===================================================================
--- /issm/trunk/src/m/classes/SMBgradientsela.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBgradientsela.py	(revision 28013)
@@ -5,12 +5,11 @@
 
 class SMBgradientsela(object):
-    """
-    SMBgradientsela Class definition
+    """SMBGRADIENTSELA class definition
 
-       Usage:
-          SMBgradientsela = SMBgradientsela()
+    Usage:
+        SMBgradientsela = SMBgradientsela()
     """
 
-    def __init__(self):  # {{{
+    def __init__(self, *args):  # {{{
         self.ela = float('NaN')
         self.b_pos = float('NaN')
@@ -21,6 +20,10 @@
         self.averaging = 0
         self.requested_outputs = []
-        self.setdefaultparameters()
-    #}}}
+
+        if len(args) == 0:
+            self.setdefaultparameters()
+        else:
+            error('constructor not supported')
+    # }}}
 
     def __repr__(self):  # {{{
@@ -41,25 +44,26 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         #Nothing for now
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
 
     def initialize(self, md):  # {{{
         #Nothing for now
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
-        self.b_max = 9999.
-        self.b_min = -9999.
+        self.b_max = 9999
+        self.b_min = -9999
+        self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):    # {{{
Index: /issm/trunk/src/m/classes/SMBhenning.m
===================================================================
--- /issm/trunk/src/m/classes/SMBhenning.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBhenning.m	(revision 28013)
@@ -35,5 +35,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -73,6 +73,6 @@
 			WriteData(fid,prefix,'name','md.smb.model','data',7,'format','Integer');
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','smbref','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer');
 
 			%process requested outputs
Index: /issm/trunk/src/m/classes/SMBhenning.py
===================================================================
--- /issm/trunk/src/m/classes/SMBhenning.py	(revision 28013)
+++ /issm/trunk/src/m/classes/SMBhenning.py	(revision 28013)
@@ -0,0 +1,92 @@
+import numpy as np
+
+from checkfield import checkfield
+from fielddisplay import fielddisplay
+from project3d import project3d
+from WriteData import WriteData
+
+
+class SMBforcing(object):
+    """SMBhenning class definition
+
+    Usage:
+        SMBhenning = SMBhenning()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.smbref = np.nan
+        self.steps_per_step = 1
+        self.averaging = 0
+        self.requested_outputs = []
+
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = '   surface forcings parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'mass_balance', 'surface mass balance [m/yr ice eq]'))
+        s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
+        s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
+        s += '\t\t{}\n'.format('0: Arithmetic (default)')
+        s += '\t\t{}\n'.format('1: Geometric')
+        s += '\t\t{}\n'.format('2: Harmonic')
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
+        return s
+    # }}}
+
+    def extrude(self, md):  # {{{
+        self.smbref = project3d(md, 'vector', self.smbref, 'type', 'node')
+        return self
+    # }}}
+
+    def defaultoutputs(self, md):  # {{{
+        return ['SmbMassBalance']
+    # }}}
+
+    def initialize(self, md):  # {{{
+        if np.all(np.isnan(self.smbref)):
+            self.smbref = np.zeros((md.mesh.numberofvertices))
+            print("      no smb.smbref specified: values set as zero")
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        if solution == 'TransientSolution' and not md.transient.issmb:
+            return
+        if 'MasstransportAnalysis' in analyses:
+            md = checkfield(md, 'fieldname', 'smb.mass_balance', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+        if 'BalancethicknessAnalysis' in analyses:
+            md = checkfield(md, 'fieldname', 'smb.mass_balance', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1)
+        md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1])
+        md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2])
+        md = checkfield(md, 'fieldname', 'smb.requested_outputs', 'stringrow', 1)
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
+        yts = md.constants.yts
+        WriteData(fid, prefix, 'name', 'md.smb.model', 'data', 1, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'mass_balance', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer')
+
+        #process requested outputs
+        outputs = self.requested_outputs
+        indices = [i for i, x in enumerate(outputs) if x == 'default']
+        if len(indices) > 0:
+            outputscopy = outputs[0:max(0, indices[0] - 1)] + self.defaultoutputs(md) + outputs[indices[0] + 1:]
+            outputs = outputscopy
+        WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
+    # }}}
+
+    def setdefaultparameters(self):  #{{{
+        self.requested_outputs = ['default']
+        return self
+    # }}}
Index: /issm/trunk/src/m/classes/SMBmeltcomponents.m
===================================================================
--- /issm/trunk/src/m/classes/SMBmeltcomponents.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBmeltcomponents.m	(revision 28013)
@@ -13,5 +13,4 @@
 		averaging = 0;
 		requested_outputs= {};
-		isclimatology;
 	end
 	methods
@@ -19,4 +18,5 @@
 			switch nargin
 				case 0
+					self=setdefaultparameters(self);
 				otherwise
 					error('constructor not supported');
@@ -45,5 +45,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -107,4 +107,10 @@
 
 		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			%output default:
+			self.requested_outputs={'default'};
+
+		end % }}}
 	end
 end
Index: /issm/trunk/src/m/classes/SMBmeltcomponents.py
===================================================================
--- /issm/trunk/src/m/classes/SMBmeltcomponents.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBmeltcomponents.py	(revision 28013)
@@ -8,5 +8,5 @@
 
 class SMBmeltcomponents(object):
-    """SMBMELTCOMPONENTS lass definition
+    """SMBMELTCOMPONENTS class definition
 
     Usage:
@@ -15,18 +15,17 @@
 
     def __init__(self, *args):  # {{{
-        self.accumulation = np.nan
-        self.evaporation = np.nan
-        self.melt = np.nan
-        self.refreeze = np.nan
-        self.steps_per_step = 1
-        self.averaging = 0
+        self.accumulation      = np.nan
+        self.evaporation       = np.nan
+        self.melt              = np.nan
+        self.refreeze          = np.nan
+        self.steps_per_step    = 1
+        self.averaging         = 0
         self.requested_outputs = []
-        self.isclimatology = np.nan
 
         if len(args) == 0:
-            pass
+            self.setdefaultparameters()
         else:
             error('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -43,5 +42,5 @@
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -51,9 +50,9 @@
         self.refreeze = project3d(md, 'vector', self.refreeze, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
 
     def initialize(self, md):  # {{{
@@ -75,5 +74,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -115,2 +114,7 @@
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
     # }}}
+
+    def setdefaultparameters(self):  # {{{
+        self.requested_outputs = ['default']
+        return self
+    # }}}
Index: /issm/trunk/src/m/classes/SMBpdd.m
===================================================================
--- /issm/trunk/src/m/classes/SMBpdd.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBpdd.m	(revision 28013)
@@ -56,5 +56,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -78,4 +78,5 @@
 		  self.rlapslgm   = 6.5;
 		  self.issetpddfac = 0;
+		  self.requested_outputs={'default'};
 
 		end % }}}
Index: /issm/trunk/src/m/classes/SMBpdd.py
===================================================================
--- /issm/trunk/src/m/classes/SMBpdd.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBpdd.py	(revision 28013)
@@ -38,9 +38,9 @@
         self.steps_per_step = 1
         self.averaging = 0
-
-    #set defaults
+        self.requested_outputs = []
+
+        # Set defaults
         self.setdefaultparameters()
-        self.requested_outputs = []
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -104,5 +104,5 @@
             self.precipitations_presentday = project3d(md, 'vector', self.precipitations_presentday, 'type', 'node')
             self.precipitations_lgm = project3d(md, 'vector', self.precipitations_lgm, 'type', 'node')
-       
+
         if self.issetpddfac:
             self.pddfac_snow = project3d(md, 'vector', self.pddfac_snow, 'type', 'node')
@@ -112,9 +112,9 @@
         self.s0t = project3d(md, 'vector', self.s0t, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
-        return []
-    #}}}
+        return ['SmbMassBalance']
+    # }}}
 
     def initialize(self, md):  # {{{
@@ -128,5 +128,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -138,7 +138,8 @@
         self.rlapslgm = 6.5
         self.issetpddfac = 0
+        self.requested_outputs = ['default']
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):    # {{{
@@ -180,5 +181,5 @@
         md = checkfield(md, 'fieldname', 'masstransport.requested_outputs', 'stringrow', 1)
         return md
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):    # {{{
@@ -218,5 +219,5 @@
             WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'Tdiff', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', 2, 'yts', md.constants.yts)
             WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'sealev', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', 2, 'yts', md.constants.yts)
-       
+
         if self.issetpddfac:
             WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'pddfac_snow', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
Index: /issm/trunk/src/m/classes/SMBpddSicopolis.m
===================================================================
--- /issm/trunk/src/m/classes/SMBpddSicopolis.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBpddSicopolis.m	(revision 28013)
@@ -41,5 +41,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
 		end % }}}
 		function self = initialize(self,md) % {{{
@@ -69,7 +69,8 @@
 		function self = setdefaultparameters(self) % {{{
 
-			self.isfirnwarming		= 1;
-			self.desfac				= -log(2.0)/1000;
-			self.rlaps				= 7.4;
+			self.isfirnwarming = 1;
+			self.desfac        = -log(2.0)/1000;
+			self.rlaps         = 7.4;
+			self.requested_outputs={'default'};
 
 		end % }}}
Index: /issm/trunk/src/m/classes/SMBpddSicopolis.py
===================================================================
--- /issm/trunk/src/m/classes/SMBpddSicopolis.py	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBpddSicopolis.py	(revision 28013)
@@ -31,6 +31,5 @@
         self.requested_outputs = []
 
-        nargs = len(args)
-        if nargs == 0:
+        if len(args) == 0:
             self.setdefaultparameters()
         else:
@@ -71,5 +70,5 @@
 
     def defaultoutputs(self, md):  # {{{
-        listing = ['']
+        listing = ['SmbMassBalance']
         return listing
     # }}}
@@ -102,4 +101,5 @@
         self.desfac = -np.log(2.0) / 1000
         self.rlaps = 7.4
+        self.requested_outputs = ['default']
         return self
     # }}}
@@ -137,11 +137,10 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer')
 
-        #process requested outputs
+        # Process requested outputs
         outputs = self.requested_outputs
-        pos = np.where('default' in outputs)
-        if not isempty(pos):
-            outputs[pos] = []  #remove 'default' from outputs
-            outputs = [outputs, defaultoutputs(self, md)]  #add defaults
-
+        indices = [i for i, x in enumerate(outputs) if x == 'default']
+        if len(indices) > 0:
+            outputscopy = outputs[0:max(0, indices[0] - 1)] + self.defaultoutputs(md) + outputs[indices[0] + 1:]
+            outputs = outputscopy
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
     # }}}
Index: /issm/trunk/src/m/classes/SMBsemic.m
===================================================================
--- /issm/trunk/src/m/classes/SMBsemic.m	(revision 28012)
+++ /issm/trunk/src/m/classes/SMBsemic.m	(revision 28013)
@@ -12,14 +12,52 @@
 		dailywindspeed		= NaN;
 		dailypressure		= NaN;
-		dailyairdensity	= NaN;
+		dailyairdensity		= NaN;
 		dailyairhumidity	= NaN;
 		dailytemperature	= NaN;
+		Tamp              = NaN;
+		mask              = NaN;
+		hice              = NaN;
+		hsnow             = NaN;
+		qmr               = NaN;
 		desfac				= 0;
-		rlaps					= 0;
+		desfacElevation   = 0;
+		rlaps				= 0;
 		rdl					= 0;
-		s0gcm					= NaN;
-		steps_per_step = 1;
-		averaging = 0;
-		requested_outputs = {};
+		s0gcm				= NaN;
+		steps_per_step		= 1;
+		averaging			= 0;
+		requested_outputs	= {};
+
+		hcrit             = 0;
+		rcrit             = 0;
+
+		% albedo
+		albedo            = 0; % required for first energy balance calculation of SEMIC
+		albedo_snow       = 0; % required for ISBA method
+		albedo_scheme     = 0; 
+		alb_smax = NaN;
+		alb_smin = NaN;
+		albi = NaN;
+		albl = NaN;
+
+		% albedo parameters depending on albedo_scheme
+		% for slater 
+		tmin = NaN;
+		tmax = NaN;
+
+		% for isba & denby method
+		mcrit = NaN;
+
+		% for isba
+		tau_a = NaN;
+		tau_f = NaN;
+		wcrit = NaN;
+
+		% for alex
+		tmid = NaN;
+		afac = NaN;
+
+		% method
+		ismethod  = 0;
 	end
 	methods
@@ -46,24 +84,87 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			list = {''};
+			list = {'SmbMassBalance'};
+		end % }}}
+		function list = outputlists(self,md) % {{{
+			if self.ismethod == 1
+				list = {'default','SmbMassBalance','SmbMassBalanceSnow','SmbMassBalanceIce',...
+					'SmbMassBalanceSemic','SmbMelt','SmbRefreeze','SmbAccumulation',...
+					'SmbHIce','SmbHSnow','SmbAlbedo','SmbAlbedoSnow','TemperatureSEMIC',...
+					'SmbSemicQmr','TotalSmb','TotalSmbMelt','TotalSmbRefreeze'};
+			else
+				list = {'default','SmbMassBalance'};
+			end
 		end % }}}
 		function self = initialize(self,md) % {{{
-
-			if isnan(self.s0gcm),
-				self.s0gcm=zeros(md.mesh.numberofvertices,1);
-				disp('      no SMBsemic.s0gcm specified: values set as zero');
-			end
-
+			% Explain
+			%  initialize SEMIC smb values, such as s0gcm(surface elevation), albedo,
+			% albedo_snow, hice, hsnow, Tamp... values.
+			% 
+			%
+			% Usage
+			%  md.smb = initialize(md.smb,md);
+
+			if isnan(self.s0gcm)
+				if ~isnan(md.geometry.surface) & (numel(md.geometry.surface) == md.mesh.numberofvertices)
+					self.s0gcm=md.geometry.surface;
+					disp('      no SMBsemic.s0gcm specified: values from md.geometry.surface');
+				else
+					self.s0gcm=zeros(md.mesh.numberofvertices,1);
+					disp('      no SMBsemic.s0gcm specified: values set as zero');
+				end
+			end
+			if isnan(self.mask)
+				self.mask = 2*ones(md.mesh.numberofvertices,1);
+				disp('      no SMBsemic.mask specified: values set as 2 for ice');
+			end
+
+			% update each values.
+			if isnan(self.Tamp)
+				self.Tamp= 3*ones(md.mesh.numberofvertices,1);
+				disp('      no SMBsemic.Tamp specified: values set as 3.0');
+			end
+			self.albedo     = 0.8*ones(md.mesh.numberofvertices,1);
+			self.albedo_snow= 0.5*ones(md.mesh.numberofvertices,1);
+			self.hice       = 10*ones(md.mesh.numberofvertices,1);
+			self.hsnow      = 5*ones(md.mesh.numberofvertices,1);
+			self.qmr        = zeros(md.mesh.numberofvertices,1);
 		end % }}}
 		function self = setdefaultparameters(self) % {{{
 
-			self.desfac		= -log(2.0)/1000;
-			self.rlaps		= 7.4;
-			self.rdl			= 0.29;
-
-		end % }}}zo
+			% albedo parameters
+			self.albedo_scheme   = 0;
+			self.alb_smax = 0.79;
+			self.alb_smin = 0.6;
+			self.albi = 0.41;
+			self.albl = 0.07;
+
+			% albedo parameters for?
+			% for slater
+			self.tmin  = 263.15;
+			self.tmax  = 273.15;
+			% for isba & denby
+			self.mcrit = 6e-8;
+			% for isba
+			self.tau_a = 0.008;
+			self.tau_f = 0.24;
+			self.wcrit = 15.0;
+			% for alex
+			self.tmid  = 273.35;
+			self.afac  = -0.18;
+
+			self.hcrit = 0.028;% from Krapp et al. (2017)
+			self.rcrit = 0.85; % from Krapp et al. (2017)
+		
+			self.desfac		      = -log(2.0)/1000;
+			self.desfacElevation = 2000;
+			self.rlaps		      = 7.4;
+			self.rdl			      = 0.29;
+
+			self.ismethod        = 0;
+			self.requested_outputs={'default'};
+		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
 
-			if ismember('MasstransportAnalysis',analyses),
+			if ismember('MasstransportAnalysis',analyses)
 				md = checkfield(md,'fieldname','smb.desfac','<=',1,'numel',1);
 				md = checkfield(md,'fieldname','smb.s0gcm','>=',0,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
@@ -79,9 +180,38 @@
 				md = checkfield(md,'fieldname','smb.dailyairhumidity','timeseries',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.dailytemperature','timeseries',1,'NaN',1,'Inf',1);
+
+				% TODO: transient model should be merged with SEMIC developed by Ruckamp et al. (2018)
+
+				md = checkfield(md,'fieldname','smb.ismethod','numel',1,'values',[0,1]);
+				if self.ismethod == 1 % transient mode
+					md = checkfield(md,'fieldname','smb.desfacElevation','>=',0,'numel',1);
+
+					md = checkfield(md,'fieldname','smb.albedo_scheme','NaN',1,'Inf',1,'numel',1,'values',[0,1,2,3,4]);
+					md = checkfield(md,'fieldname','smb.alb_smax','>=',0,'NaN',1,'Inf',1,'numel',1);
+					md = checkfield(md,'fieldname','smb.mask','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1],'values',[0,1,2]);
+
+					% initial values
+					md = checkfield(md,'fieldname','smb.albedo','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
+					md = checkfield(md,'fieldname','smb.albedo_snow','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
+					md = checkfield(md,'fieldname','smb.alb_smax','>=',0,'<=',1,'NaN',1,'Inf',1,'numel',1);
+					md = checkfield(md,'fieldname','smb.alb_smin','<=',1,'NaN',1,'Inf',1,'numel',1);
+					md = checkfield(md,'fieldname','smb.albi','>=',0,'<=',1,'NaN',1,'Inf',1,'numel',1);
+					md = checkfield(md,'fieldname','smb.albl','>=',0,'<=',1,'NaN',1,'Inf',1,'numel',1);
+					md = checkfield(md,'fieldname','smb.hice','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
+					md = checkfield(md,'fieldname','smb.hsnow','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
+					md = checkfield(md,'fieldname','smb.qmr','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
+				end
 			end
 			md = checkfield(md,'fieldname','smb.steps_per_step','>=',1,'numel',[1]);
 			md = checkfield(md,'fieldname','smb.averaging','numel',[1],'values',[0 1 2]);
 			md = checkfield(md,'fieldname','smb.requested_outputs','stringrow',1);
-
+			% check requested_outputs
+			if self.ismethod==1
+				for i = 1:length(self.requested_outputs)
+					if ~any(strcmpi(self.requested_outputs{i},self.outputlists))
+						error(sprintf('ERROR: %s requested_output is not available',self.requested_outputs{i}));
+					end
+				end
+			end
 		end % }}}
 		function disp(self) % {{{
@@ -101,8 +231,60 @@
 			fielddisplay(self,'dailyairhumidity','daily air specific humidity [kg/kg]');
 			fielddisplay(self,'dailytemperature','daily surface air temperature [K]');
-			fielddisplay(self,'rlaps','present day lapse rate (default is 7.4 [degree/km]; )Erokhina et al. 2017)');
+			fielddisplay(self,'rlaps','present day lapse rate (default is 7.4 [degree/km]; Erokhina et al. 2017)');
 			fielddisplay(self,'desfac','desertification elevation factor (default is -log(2.0)/1000 [1/km]; Vizcaino et al. 2010)');
 			fielddisplay(self,'rdl','longwave downward radiation decrease (default is 0.29 [W/m^2/km]; Marty et al. 2002)');
 			fielddisplay(self,'s0gcm','GCM reference elevation; (default is 0) [m]');
+			fielddisplay(self,'albedo_scheme','albedom scheme. 0: none, 1: (default is 0)');
+
+			fielddisplay(self,'ismethod','method for calculating SMB with SEMIC. Default version of SEMIC is really slow. 0: steady, 1: transient (default: 0)');
+			if self.ismethod == 1 % transient mode
+				fielddisplay(self,'desfacElevation','desertification elevation (default is 2000 m; Vizcaino et al. 2010)');
+				fielddisplay(self,'Tamp','amplitude of diurnal cycle [K]');
+				fielddisplay(self,'albedo','initial albedo [no unit]');
+				fielddisplay(self,'albedo_snow','initial albedo for snow [no unit]');
+				fielddisplay(self,'hice','initial thickness of ice [unit: m]');
+				fielddisplay(self,'hsnow','initial thickness of snow [unit: m]');
+				fielddisplay(self,'mask','masking for albedo. 0: ocean, 1: land, 2: ice (default: 2)');
+				fielddisplay(self,'hcrit','critical snow height for albedo [unit: m]');
+				fielddisplay(self,'rcrit','critical refreezing height for albedo [no unit]');
+
+				disp(sprintf('\nSEMIC albedo parameters.'));
+				fielddisplay(self,'albedo_scheme','albedo scheme for SEMIC. 0: none, 1: slater, 2: isba, 3: denby, 4: alex (default is 0)');
+				fielddisplay(self,'alb_smax','maximum snow albedo (default: 0.79)');
+				fielddisplay(self,'alb_smin','minimum snow albedo (default: 0.6)');
+				fielddisplay(self,'albi','background albedo for bare ice (default: 0.41)');
+				fielddisplay(self,'albl','background albedo for bare land (default: 0.07)');
+			end
+			% albedo_scheme - 0: none, 1: slater, 2: isba, 3: denby, 4: alex.
+         if self.albedo_scheme == 0
+            disp(sprintf('\n\tSEMIC snow albedo parameter of None.'));
+				disp(sprintf('\t   albedo of snow is updated from albedo snow max (alb_smax).'));
+            disp(sprintf('\t   alb_snow = abl_smax'));
+			elseif self.albedo_scheme == 1
+				disp(sprintf('\n\tSEMIC snow albedo parameters of Slater et al, (1998).'));
+				disp(sprintf('\t   alb = alb_smax - (alb_smax - alb_smin)*tm^(3.0)'))
+				disp(sprintf('\t   tm  = 1 (tsurf > 273.15 K)'));
+				disp(sprintf('\t         tm = f*(tsurf-tmin) (tmin <= tsurf < 273.15)'));
+				disp(sprintf('\t         0 (tsurf < tmin)'));
+				disp(sprintf('\t   f = 1/(273.15-tmin)'));
+				fielddisplay(self,'tmin','minimum temperature for which albedo decline become effective. (default: 263.15 K)[unit: K])');
+				fielddisplay(self,'tmax','maxmium temperature for which albedo decline become effective. This value should be fixed. (default: 273.15 K)[unit: K])');
+			elseif self.albedo_scheme == 2
+				disp(sprintf('\n\tSEMIC snow albedo parameters for ISBA.? where is citation?'));
+				fielddisplay(self,'mcrit','critical melt rate (default: 6e-8) [unit: m/sec]');
+				fielddisplay(self,'wcrit','critical liquid water content (default: 15) [unit: kg/m2]');
+				fielddisplay(self,'tau_a','dry albedo decline [unit: 1/day]');
+				fielddisplay(self,'tau_f','wet albedo decline [unit: 1/day]');
+			elseif self.albedo_scheme == 3
+				disp(sprintf('\n\tSEMIC snow albedo parameters for Denby et al. (2002 Tellus)'));
+				fielddisplay(self,'mcrit','critical melt rate (default: 6e-8) [unit: m/sec]');
+			elseif self.albedo_scheme == 4
+				disp(sprintf('\n\tSEMIC snow albedo parameters for Alex.?'));
+				fielddisplay(self,'afac','[unit: ?]');
+				fielddisplay(self,'tmid','[unit: ?]');
+			else
+				error(sprintf('ERROR: %d is not supported albedom scheme.',self.albedo_scheme))
+			end
+
 			fielddisplay(self, 'steps_per_step', 'number of smb steps per time step');
 			fielddisplay(self, 'averaging', 'averaging methods from short to long steps');
@@ -114,25 +296,65 @@
 		function marshall(self,prefix,md,fid) % {{{
 
+			yts=md.constants.yts;
+
 			WriteData(fid,prefix,'name','md.smb.model','data',12,'format','Integer');
 
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','ismethod','format','Integer','values',[0, 1]);
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','desfac','format','Double');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','desfacElevation','format','Double');
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','s0gcm','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','rlaps','format','Double');
 			WriteData(fid,prefix,'object',self,'class','smb','fieldname','rdl','format','Double');
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailysnowfall','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyrainfall','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailydsradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailydlradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailywindspeed','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailypressure','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyairdensity','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyairhumidity','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailytemperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer');
-			WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer');
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailysnowfall','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyrainfall','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailydsradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailydlradiation','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailywindspeed','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailypressure','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyairdensity','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailyairhumidity','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'class','smb','fieldname','dailytemperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			% TODO: transient mode should be merged with SEMIC developed by Ruckamp et al. (2018).
+			if self.ismethod == 1% transient mode
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','Tamp','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','mask','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','hice','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','hsnow','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','qmr','format','DoubleMat','mattype',1);
+
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','hcrit','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','rcrit','format','Double');
+
+				%albedo
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','albedo','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','albedo_snow','format','DoubleMat','mattype',1);
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','albedo_scheme','format','Integer');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','albi','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','albl','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','alb_smin','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','alb_smax','format','Double');
+
+				%albedo parameters for ?
+				%for slater
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','tmin','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','tmax','format','Double');
+				%for isba & denby
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','mcrit','format','Double');
+				%for isba
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','wcrit','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','tau_a','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','tau_f','format','Double');
+				%for alex
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','tmid','format','Double');
+				WriteData(fid,prefix,'object',self,'class','smb','fieldname','afac','format','Double');
+			end
+
+			WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','averaging','format','Integer');
+
 			%process requested outputs
 			outputs = self.requested_outputs;
 			pos  = find(ismember(outputs,'default'));
-			if ~isempty(pos),
+			if ~isempty(pos)
 				outputs(pos) = []; %remove 'default' from outputs
 				outputs      = [outputs defaultoutputs(self,md)]; %add defaults
Index: /issm/trunk/src/m/classes/SMBsemic.py
===================================================================
--- /issm/trunk/src/m/classes/SMBsemic.py	(revision 28013)
+++ /issm/trunk/src/m/classes/SMBsemic.py	(revision 28013)
@@ -0,0 +1,333 @@
+from math import log
+
+import numpy as np
+
+from checkfield import checkfield
+from fielddisplay import fielddisplay
+from project3d import project3d
+from WriteData import WriteData
+
+
+class SMBsemic(object):
+    """SMBsemic class definition
+
+    Usage:
+        SMBsemic = SMBsemic()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.dailysnowfall = np.nan
+        self.dailyrainfall = np.nan
+        self.dailydsradiation = np.nan
+        self.dailydlradiation = np.nan
+        self.dailypressure = np.nan
+        self.dailyairdensity = np.nan
+        self.dailyairhumidity = np.nan
+        self.dailytemperature = np.nan
+        self.Tamp = np.nan
+        self.mask = np.nan
+        self.hice = np.nan
+        self.hsnow = np.nan
+        self.desfac = 0
+        self.desfacElevation = 0
+        self.rlaps = 0
+        self.rdl = 0
+        self.s0gcm = np.nan
+        self.steps_per_step = 1
+        self.averaging = 0
+        self.requested_outputs = []
+
+        self.hcrit = 0
+        self.rcrit = 0
+
+        # albedo
+        self.albedo = 0 # required for first energy balance calculation of SEMIC
+        self.albedo_snow = 0 # required for ISBA method
+        self.albedo_scheme = 0
+        self.alb_smax = np.nan
+        self.alb_smin = np.nan
+        self.albi = np.nan
+        self.albl = np.nan
+
+        # albedo parameters depending on albedo_scheme
+        # for slater
+        self.tmin = np.nan
+        self.tmax = np.nan
+
+        # for isba & denby method
+        self.mcrit = np.nan
+
+        # for isba
+        self.tau_a = np.nan
+        self.tau_f = np.nan
+        self.wcrit = np.nan
+
+        # for alex
+        self.tmid = np.nan
+        self.afac = np.nan
+
+        # method
+        self.ismethod = 0
+
+        if len(args) == 0:
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = '   surface forcings parameters:\n'
+        s += '   Interface for coupling GCM data to the energy balance model SEMIC (Krapp et al (2017) https://doi.org/10.5194/tc-11-1519-2017).\n'
+        s += '   The implemented coupling uses daily mean GCM input to calculate yearly mean smb, accumulation, ablation, and surface temperature.\n'
+        s += '   smb and temperatures are updated every year\n'
+        s += '\n   SEMIC parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'dailysnowfall', 'daily surface dailysnowfall [m/s]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailyrainfall', 'daily surface dailyrainfall [m/s]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailydsradiation', 'daily downwelling shortwave radiation [W/m2]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailydlradiation', 'daily downwelling longwave radiation [W/m2]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailywindspeed', 'daily surface wind speed [m/s]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailypressure', 'daily surface pressure [Pa]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailyairdensity', 'daily air density [kg/m3]'))
+        s += '{}\n'.format(fielddisplay(self, 'dailyairhumidity', 'daily air specific humidity [kg/kg]'))
+        s += '{}\n'.format(fielddisplay(self, 'rlaps', 'present day lapse rate (default is 7.4 [degree/km]; Erokhina et al. 2017)'))
+        s += '{}\n'.format(fielddisplay(self, 'desfac', 'desertification elevation factor (default is -log(2.0)/1000 [1/km]; Vizcaino et al. 2010)'))
+        s += '{}\n'.format(fielddisplay(self, 'rdl', 'longwave downward radiation decrease (default is 0.29 [W/m^2/km]; Marty et al. 2002)'))
+        s += '{}\n'.format(fielddisplay(self, 's0gcm', 'GCM reference elevation; (default is 0) [m]'))
+        s += '{}\n'.format(fielddisplay(self,'ismethod','method for calculating SMB with SEMIC. Default version of SEMIC is really slow. 0: steady, 1: transient (default: 0)'))
+        if self.ismethod: # transient mode
+            s += '{}\n'.format(fielddisplay(self,'desfacElevation','desertification elevation (default is 2000 m; Vizcaino et al. 2010)'))
+            s += '{}\n'.format(fielddisplay(self,'Tamp','amplitude of diurnal cycle [K]'))
+            s += '{}\n'.format(fielddisplay(self,'albedo','initial albedo [no unit]'))
+            s += '{}\n'.format(fielddisplay(self,'albedo_snow','initial albedo for snow [no unit]'))
+            s += '{}\n'.format(fielddisplay(self,'hice','initial thickness of ice [unit: m]'))
+            s += '{}\n'.format(fielddisplay(self,'hsnow','initial thickness of snow [unit: m]'))
+            s += '{}\n'.format(fielddisplay(self,'mask','masking for albedo. 0: ocean, 1: land, 2: ice (default: 2)'))
+            s += '{}\n'.format(fielddisplay(self,'hcrit','critical snow height for albedo [unit: m]'))
+            s += '{}\n'.format(fielddisplay(self,'rcrit','critical refreezing height for albedo [no unit]'))
+
+            s += '\nSEMIC albedo parameters.\n'
+            s += '{}\n'.format(fielddisplay(self,'albedo_scheme','albedo scheme for SEMIC. 0: none, 1: slater, 2: isba, 3: denby, 4: alex (default is 0)'))
+            s += '{}\n'.format(fielddisplay(self,'alb_smax','maximum snow albedo (default: 0.79)'))
+            s += '{}\n'.format(fielddisplay(self,'alb_smin','minimum snow albedo (default: 0.6)'))
+            s += '{}\n'.format(fielddisplay(self,'albi','background albedo for bare ice (default: 0.41)'))
+            s += '{}\n'.format(fielddisplay(self,'albl','background albedo for bare land (default: 0.07)'))
+        # albedo_scheme - 0: none, 1: slater, 2: isba, 3: denby, 4: alex.
+        if self.albedo_scheme == 0:
+            s += '\n\tSEMIC snow albedo parameter of None.\n'
+				s += '\t   albedo of snow is updated from albedo snow max (alb_smax).\n'
+            s += '\t   alb_snow = abl_smax \n '
+        elif self.albedo_scheme == 1:
+            s += '\n\tSEMIC snow albedo parameters of Slater et al, (1998).\n'
+            s += '\t   alb = alb_smax - (alb_smax - alb_smin)*tm^(3.0)\n'
+            s += '\t   tm  = 1 (tsurf > 273.15 K)\n'
+            s += '\t         tm = f*(tsurf-tmin) (tmin <= tsurf < 273.15)\n'
+            s += '\t         0 (tsurf < tmin)\n'
+            s += '\t   f = 1/(273.15-tmin)\n'
+            s += '{}\n'.format(fielddisplay(self, 'tmin', 'minimum temperature for which albedo decline become effective. (default: 263.15 K)[unit: K])'))
+            s += '{}\n'.format(fielddisplay(self, 'tmax', 'maxmium temperature for which albedo decline become effective. This value should be fixed. (default: 273.15 K)[unit: K])'))
+        elif self.albedo_scheme == 2:
+            s += '\n\tSEMIC snow albedo parameters of ISBA.? where is citation?\n'
+            s += '{}\n'.format(fielddisplay(self, 'mcrit', 'critical melt rate (default: 6e-8) [unit: m/sec]'))
+            s += '{}\n'.format(fielddisplay(self, 'wcrit', 'critical liquid water content (default: 15) [unit: kg/m2]'))
+            s += '{}\n'.format(fielddisplay(self, 'tau_a', 'dry albedo decline [unit: 1/day]'))
+            s += '{}\n'.format(fielddisplay(self, 'tau_f', 'wet albedo decline [unit: 1/day]'))
+        elif self.albedo_scheme == 3:
+            s += '\n\tSEMIC snow albedo parameters of Denby et al. (2002 Tellus)\n'
+            s += '{}\n'.format(fielddisplay(self,'mcrit','critical melt rate (defaut: 6e-8) [unit: m/sec]'))
+        elif self.albedo_scheme == 4:
+            s += '\n\tSEMIC snow albedo parameters of Alex.?\n'
+            s += '{}\n'.format(fielddisplay(self,'afac','[unit: ?]'))
+            s += '{}\n'.format(fielddisplay(self,'tmid','[unit: ?]'))
+        else:
+            raise Exception('ERROR: {} is not supported albedo scheme.'.format(self.albedo_scheme))
+
+        s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step'))
+        s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps'))
+        s += '\t\t{}\n'.format('0: Arithmetic (default)')
+        s += '\t\t{}\n'.format('1: Geometric')
+        s += '\t\t{}\n'.format('2: Harmonic')
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
+        return s
+    # }}}
+
+    def extrude(self, md):  # {{{
+        self.dailysnowfall = project3d(md, 'vector', self.dailysnowfall, 'type', 'node')
+        self.dailyrainfall = project3d(md, 'vector', self.dailyrainfall, 'type', 'node')
+        self.dailydsradiation = project3d(md, 'vector', self.dailydsradiation, 'type', 'node')
+        self.dailydlradiation = project3d(md, 'vector', self.dailydlradiation, 'type', 'node')
+        self.dailywindspeed = project3d(md, 'vector', self.dailywindspeed, 'type', 'node')
+        self.dailypressure = project3d(md, 'vector', self.dailypressure, 'type', 'node')
+        self.dailyairdensity = project3d(md, 'vector', self.dailyairdensity, 'type', 'node')
+        self.dailyairhumidity = project3d(md, 'vector', self.dailyairhumidity, 'type', 'node')
+        self.dailytemperature = project3d(md, 'vector', self.dailytemperature, 'type', 'node')
+        self.s0gcm = project3d(md, 'vector', self.s0gcm, 'type', 'node')
+        return self
+    # }}}
+
+    def defaultoutputs(self, md):  # {{{
+        return ['SmbMassBalance']
+    # }}}
+
+    def outputlists(self, md):  # {{{
+        if self.ismethod:
+            list = ['default','SmbMassBalance', 'SmbMassBalanceSnow', 'SmbMassBalanceIce',
+                  'SmbMassBalanceSecmi', 'SmbMelt', 'SmbRefreeze','SmbAccumulation',
+                  'SmbHIce', 'SmbHSnow', 'SmbAlbedo', 'SmbAlbedoSnow', 'TemperatureSEMIC',
+                  'SmbSemicQmr', 'TotalSmb', 'TotalSmbMelt', 'TotalSmbRefreeze']
+        else:
+            list = ['default','SmbMassBalance']
+        return list
+    # }}}
+
+    def initialize(self, md):  # {{{
+        if np.all(np.isnan(self.s0gcm)):
+            self.s0gcm = np.zeros((md.mesh.numberofvertices))
+            print('      no SMBsemic.s0gcm specified: values set as zero')
+
+        self.Tamp = 3 * np.ones((md.mesh.numberofvertices,))
+        #self.albedo = 0.8 * np.ones((md.mesh.numberofvertices,))
+        #self.albedo_snow = 0.5 * np.ones((md.mesh.numberofvertices,))
+        self.hice = np.zeros((md.mesh.numberofvertices,))
+        self.hsnow = 5 * np.ones((md.mesh.numberofvertices,))
+
+        return self
+    # }}}
+
+    def setdefaultparameters(self):  #{{{
+        # albedo parameters
+        self.albedo_scheme = 0
+        self.alb_smax = 0.79
+        self.alb_smin = 0.6
+        self.albi = 0.41
+        self.albl = 0.07
+
+        # albedo parameters for?
+        # for slater
+        self.tmin = 263.15
+        self.tmax = 273.15
+
+        # for isba & denby
+        self.mcrit = 6e-8
+
+        # for isba
+        self.tau_a = 0.008
+        self.tau_f = 0.24
+        self.wcrit = 15.0
+
+        # for alex
+        self.tmid = 273.35
+        self.afac = -0.18
+
+        self.hcrit = 0.028 # from Krapp et al. (2017)
+        self.rcrit = 0.85 # from Krapp et al. (2017)
+
+        self.desfac = -log(2.0) / 1000
+        self.desfacElevation = 2000
+        self.rlaps = 7.4
+        self.rdl = 0.29
+
+        self.ismethod = 0
+        self.requested_outputs = ['default']
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        if 'MasstransportAnalysis' in analyses:
+            md = checkfield(md, 'fieldname', 'smb.desfac', '<=', 1, 'numel', 1)
+            md = checkfield(md, 'fieldname', 'smb.s0gcm', '>=', 0, 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
+            md = checkfield(md, 'fieldname', 'smb.rlaps', '>=', 0, 'numel', 1)
+            md = checkfield(md, 'fieldname', 'smb.rdl', '>=', 0, 'numel', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailysnowfall', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailyrainfall', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailydsradiation', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailydlradiation', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailywindspeed', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailypressure', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailyairdensity', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailyairhumidity', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+            md = checkfield(md, 'fieldname', 'smb.dailytemperature', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
+
+            # TODO: transient model should be merged with SEMIC developed by Ruckamp et al. (2018)
+            md = checkfield(md, 'fieldname', 'smb.ismethod', 'numel', 1, 'values', [0, 1])
+            if self.ismethod: # transient mode
+                md = checkfield(md, 'fieldname', 'smb.desfacElevation', '>=', 0, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.albedo_scheme', 'NaN', 1, 'Inf', 1, 'numel', 1, 'values', [0, 1, 2, 3, 4])
+                md = checkfield(md, 'fieldname', 'smb.alb_smax', '>=', 0, 'NaN', 1, 'Inf', 1, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.mask', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1], 'values', [0, 1, 2])
+
+                # initial values
+                md = checkfield(md, 'fieldname', 'smb.albedo', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
+                md = checkfield(md, 'fieldname', 'smb.albedo_snow', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
+                md = checkfield(md, 'fieldname', 'smb.alb_smax', '>=', 0, '<=', 1, 'NaN', 1, 'Inf', 1, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.alb_smin', '>=', 0, '<=', 1, 'NaN', 1, 'Inf', 1, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.albi', '>=', 0, '<=', 1, 'NaN', 1, 'Inf', 1, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.albl', '>=', 0, '<=', 1, 'NaN', 1, 'Inf', 1, 'numel', 1)
+                md = checkfield(md, 'fieldname', 'smb.hice', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
+                md = checkfield(md, 'fieldname', 'smb.hsnow', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
+        md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1])
+        md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2])
+        md = checkfield(md, 'fieldname', 'smb.requested_outputs', 'stringrow', 1)
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
+        yts = md.constants.yts
+        WriteData(fid, prefix, 'name', 'md.smb.model', 'data', 12, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'ismethod', 'format', 'Integer', 'values', [0, 1])
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'desfac', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'desfacElevation', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 's0gcm', 'format', 'DoubleMat', 'mattype', 1)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'rlaps', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'rdl', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailysnowfall', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailyrainfall', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailydsradiation', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailydlradiation', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailywindspeed', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailypressure', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailyairdensity', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailyairhumidity', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class' ,'smb', 'fieldname', 'dailytemperature', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        # TODO: transient mode should be merged with SEMIC developed by Ruckamp et al. (2018).
+        if self.ismethod: # transient mode
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'Tamp', 'format', 'DoubleMat', 'mattype', 1)
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'mask', 'format', 'DoubleMat', 'mattype', 1)
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'hice', 'format', 'DoubleMat', 'mattype', 1)
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'hsnow', 'format', 'DoubleMat', 'mattype', 1)
+
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'hcrit', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'rcrit', 'format', 'Double')
+
+            # albedo
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'albedo', 'format', 'DoubleMat', 'mattype', 1)
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'albedo_snow', 'format', 'DoubleMat', 'mattype', 1)
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'albedo_scheme', 'format', 'Integer')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'albi', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'albl', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'alb_smin', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'alb_smax', 'format', 'Double')
+
+            # albedo parameters for ?
+            # for slater
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'tmin', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'tmax', 'format', 'Double')
+            # for isba & denby
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'mcrit', 'format', 'Double')
+            # for isba
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'wcrit', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'tau_a', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'tau_f', 'format', 'Double')
+            # for alex
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'tmid', 'format', 'Double')
+            WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'afac', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'averaging', 'format', 'Integer')
+
+        # Process requested outputs
+        outputs = self.requested_outputs
+        indices = [i for i, x in enumerate(outputs) if x == 'default']
+        if len(indices) > 0:
+            outputscopy = outputs[0:max(0, indices[0] - 1)] + self.defaultoutputs(md) + outputs[indices[0] + 1:]
+            outputs = outputscopy
+        WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray')
+    # }}}
Index: /issm/trunk/src/m/classes/adm1qn3inversion.m
===================================================================
--- /issm/trunk/src/m/classes/adm1qn3inversion.m	(revision 28012)
+++ /issm/trunk/src/m/classes/adm1qn3inversion.m	(revision 28013)
@@ -10,4 +10,5 @@
 		maxiter                     = 0
 		dxmin                       = 0
+		dfmin_frac                  = 0
 		gttol                       = 0
 
@@ -38,6 +39,7 @@
 
 			%m1qn3 parameters
-			self.dxmin  = 0.1;
-			self.gttol = 1e-4;
+			self.dxmin      = 0.1;
+			self.dfmin_frac = 1.;
+			self.gttol      = 1e-4;
 
 		end % }}}
@@ -55,4 +57,5 @@
 			md = checkfield(md,'fieldname','inversion.maxiter','numel',1,'>=',0);
 			md = checkfield(md,'fieldname','inversion.dxmin','numel',1,'>',0);
+         md = checkfield(md,'fieldname','inversion.dfmin_frac','numel',1,'>=',0., '<=', 1.);
 			md = checkfield(md,'fieldname','inversion.gttol','numel',1,'>',0);
 
@@ -64,4 +67,5 @@
 			fielddisplay(self,'maxiter','maximum number of Function evaluation (forward run)');
 			fielddisplay(self,'dxmin','convergence criterion: two points less than dxmin from eachother (sup-norm) are considered identical');
+         fielddisplay(self,'dfmin_frac','expected reduction of J during the first step (e.g., 0.3=30% reduction in cost function)');
 			fielddisplay(self,'gttol','convergence criterion: ||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)');
 		end % }}}
@@ -76,4 +80,5 @@
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','maxiter','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','dxmin','format','Double');
+         WriteData(fid,prefix,'object',self,'class','inversion','fieldname','dfmin_frac','format','Double');
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','gttol','format','Double');
 
@@ -85,4 +90,5 @@
 			writejsdouble(fid,[modelname '.inversion.maxiter'],self.maxiter);
 			writejsdouble(fid,[modelname '.inversion.dxmin'],self.dxmin);
+         writejsdouble(fid,[modelname '.inversion.dfmin_frac'],self.dfmin_frac);
 			writejsdouble(fid,[modelname '.inversion.gttol'],self.gttol);
 
Index: /issm/trunk/src/m/classes/amr.py
===================================================================
--- /issm/trunk/src/m/classes/amr.py	(revision 28012)
+++ /issm/trunk/src/m/classes/amr.py	(revision 28013)
@@ -11,5 +11,5 @@
     """
 
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.hmin = 0
         self.hmax = 0
@@ -33,7 +33,7 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   amr parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'hmin', 'minimum element length'))
@@ -56,7 +56,7 @@
         s += '{}\n'.format(fielddisplay(self, 'restart', 'indicates if ReMesh() will call before first time step'))
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         self.hmin = 100
         self.hmax = 100e3
@@ -89,7 +89,7 @@
         self.restart = 0
         return self
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         md = checkfield(md, 'fieldname', 'amr.hmax', 'numel', [1], '>', 0, 'NaN', 1)
         md = checkfield(md, 'fieldname', 'amr.hmin', 'numel', [1], '>', 0, '<', self.hmax, 'NaN', 1)
@@ -112,5 +112,5 @@
    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'name', 'md.amr.type', 'data', 1, 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'hmin', 'format', 'Double')
@@ -133,3 +133,3 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'deviatoricerror_maximum', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'class', 'amr', 'fieldname', 'restart', 'format', 'Integer')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/autodiff.m
===================================================================
--- /issm/trunk/src/m/classes/autodiff.m	(revision 28012)
+++ /issm/trunk/src/m/classes/autodiff.m	(revision 28013)
@@ -58,9 +58,17 @@
 			for i=1:numel(self.dependents),
 				dep=self.dependents{i};
-				md=checkconsistency(dep,md,solution,analyses);
+				if isempty(dep)
+					md = checkmessage(md,['md.autodiff.dependents{' num2str(i) '} is empty!']);
+				else
+					md=checkconsistency(dep,md,solution,analyses);
+				end
 			end
 			for i=1:numel(self.independents),
 				indep=self.independents{i};
-				md=checkconsistency(indep,md,i,solution,analyses,self.driver);
+				if isempty(indep)
+					md = checkmessage(md,['md.autodiff.independents{' num2str(i) '} is empty!']);
+				else
+					md=checkconsistency(indep,md,i,solution,analyses,self.driver);
+				end
 			end
 
@@ -107,14 +115,9 @@
 			if(num_dependent_objects),
 				names={};
-				indices=zeros(num_dependent_objects,1);
-
 				for i=1:num_dependent_objects,
 					dep=self.dependents{i};
-
 					names{i}=dep.name;
-					indices(i)=dep.index;
 				end
 				WriteData(fid,prefix,'data',names,'name','md.autodiff.dependent_object_names','format','StringArray');
-				WriteData(fid,prefix,'data',indices,'name','md.autodiff.dependent_object_indices','format','IntMat','mattype',3);
 			end
 			%}}}
@@ -122,46 +125,12 @@
 			num_independent_objects=numel(self.independents);
 			WriteData(fid,prefix,'data',num_independent_objects,'name','md.autodiff.num_independent_objects','format','Integer');
-
-			if(num_independent_objects),
-				names=cell(num_independent_objects,1);
-				types=zeros(num_independent_objects,1);
-
-				max_parameters_pre = [];
-				min_parameters_pre = [];
-				M_size = false;
-				for i=1:num_independent_objects,
-					indep=self.independents{i};
-					if M_size == false && indep.control_size>1
-						M_size = true;
-					end
-				end
-
-				for i=1:num_independent_objects,
-					indep=self.independents{i};
-
-					names{i}=indep.name;
-					types(i)=indep.typetoscalar();
-					
-					if indep.control_size == 1 && M_size == true
-						indep.min_parameters = [indep.min_parameters;NaN];
-						indep.max_parameters = [indep.max_parameters;NaN];
-					end
-					min_parameters_pre=[min_parameters_pre,indep.min_parameters];
-					max_parameters_pre=[max_parameters_pre,indep.max_parameters];
-					scaling_factors(i)=indep.control_scaling_factor;
-					control_sizes(i) = indep.control_size;
-
-				end
-
-				max_parameters=reshape(max_parameters_pre,size(max_parameters_pre,1),sum(control_sizes));
-				min_parameters=reshape(min_parameters_pre,size(min_parameters_pre,1),sum(control_sizes));
-
-				WriteData(fid,prefix,'data',names,'name','md.autodiff.independent_object_names','format','StringArray');
-				WriteData(fid,prefix,'data',types,'name','md.autodiff.independent_object_types','format','IntMat','mattype',3);
-				WriteData(fid,prefix,'data',min_parameters,'name','md.autodiff.independent_min_parameters','format','DoubleMat','mattype',3);
-	         WriteData(fid,prefix,'data',max_parameters,'name','md.autodiff.independent_max_parameters','format','DoubleMat','mattype',3);
-	         WriteData(fid,prefix,'data',scaling_factors,'name','md.autodiff.independent_scaling_factors','format','IntMat','mattype',3);
-				WriteData(fid,prefix,'data',control_sizes,'name','md.autodiff.independent_control_sizes','format','IntMat','mattype',3);
-
+			for i=1:num_independent_objects
+				indep=self.independents{i};
+				WriteData(fid,prefix,'data',indep.name,'name','md.autodiff.independent_name','format','String');
+				WriteData(fid,prefix,'data',indep.typetoscalar(),'name','md.autodiff.independent_type','format','Integer');
+				WriteData(fid,prefix,'data',indep.min_parameters,'name','md.autodiff.independent_min_parameters','format','DoubleMat','mattype',3);
+				WriteData(fid,prefix,'data',indep.max_parameters,'name','md.autodiff.independent_max_parameters','format','DoubleMat','mattype',3);
+				WriteData(fid,prefix,'data',indep.control_scaling_factor,'name','md.autodiff.independent_scaling_factor','format','Double');
+				WriteData(fid,prefix,'data',indep.control_size,'name','md.autodiff.independent_control_size','format','Integer');
 			end
 			%}}}
Index: /issm/trunk/src/m/classes/autodiff.py
===================================================================
--- /issm/trunk/src/m/classes/autodiff.py	(revision 28012)
+++ /issm/trunk/src/m/classes/autodiff.py	(revision 28013)
@@ -9,9 +9,8 @@
 
 class autodiff(object):
-    """
-    AUTODIFF class definition
-
-       Usage:
-          autodiff = autodiff()
+    """autodiff class definition
+
+    Usage:
+        autodiff = autodiff()
     """
     def __init__(self, *args):  # {{{
@@ -20,11 +19,11 @@
         self.independents = []
         self.driver = 'fos_forward'
-        self.obufsize = float('NaN')
-        self.lbufsize = float('NaN')
-        self.cbufsize = float('NaN')
-        self.tbufsize = float('NaN')
-        self.gcTriggerMaxSize = float('NaN')
-        self.gcTriggerRatio = float('NaN')
-        self.tapeAlloc = float('NaN')
+        self.obufsize = np.nan
+        self.lbufsize = np.nan
+        self.cbufsize = np.nan
+        self.tbufsize = np.nan
+        self.gcTriggerMaxSize = np.nan
+        self.gcTriggerRatio = np.nan
+        self.tapeAlloc = np.nan
         if not len(args):
             self.setdefaultparameters()
@@ -34,16 +33,16 @@
 
     def __repr__(self):  # {{{
-        s = "      automatic differentiation parameters:\n"
-        s += "%s\n" % fielddisplay(self, 'isautodiff', "indicates if the automatic differentiation is activated")
-        s += "%s\n" % fielddisplay(self, 'dependents', "list of dependent variables")
-        s += "%s\n" % fielddisplay(self, 'independents', "list of independent variables")
-        s += "%s\n" % fielddisplay(self, 'driver', "ADOLC driver ('fos_forward' or 'fov_forward')")
-        s += "%s\n" % fielddisplay(self, 'obufsize', "Number of operations per buffer (== OBUFSIZE in usrparms.h)")
-        s += "%s\n" % fielddisplay(self, 'lbufsize', "Number of locations per buffer (== LBUFSIZE in usrparms.h)")
-        s += "%s\n" % fielddisplay(self, 'cbufsize', "Number of values per buffer (== CBUFSIZE in usrparms.h)")
-        s += "%s\n" % fielddisplay(self, 'tbufsize', "Number of taylors per buffer (<=TBUFSIZE in usrparms.h)")
-        s += "%s\n" % fielddisplay(self, 'gcTriggerRatio', "free location block sorting / consolidation triggered if the ratio between allocated and used locations exceeds gcTriggerRatio")
-        s += "%s\n" % fielddisplay(self, 'gcTriggerMaxSize', "free location block sorting / consolidation triggered if the allocated locations exceed gcTriggerMaxSize)")
-        s += "%s\n" % fielddisplay(self, 'tapeAlloc', 'Iteration count of a priori memory allocation of the AD tape')
+        s = '      automatic differentiation parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'isautodiff', "indicates if the automatic differentiation is activated"))
+        s += '{}\n'.format(fielddisplay(self, 'dependents', "list of dependent variables"))
+        s += '{}\n'.format(fielddisplay(self, 'independents', "list of independent variables"))
+        s += '{}\n'.format(fielddisplay(self, 'driver', "ADOLC driver ('fos_forward' or 'fov_forward')"))
+        s += '{}\n'.format(fielddisplay(self, 'obufsize', "Number of operations per buffer (== OBUFSIZE in usrparms.h)"))
+        s += '{}\n'.format(fielddisplay(self, 'lbufsize', "Number of locations per buffer (== LBUFSIZE in usrparms.h)"))
+        s += '{}\n'.format(fielddisplay(self, 'cbufsize', "Number of values per buffer (== CBUFSIZE in usrparms.h)"))
+        s += '{}\n'.format(fielddisplay(self, 'tbufsize', "Number of taylors per buffer (<=TBUFSIZE in usrparms.h)"))
+        s += '{}\n'.format(fielddisplay(self, 'gcTriggerRatio', "free location block sorting / consolidation triggered if the ratio between allocated and used locations exceeds gcTriggerRatio"))
+        s += '{}\n'.format(fielddisplay(self, 'gcTriggerMaxSize', "free location block sorting / consolidation triggered if the allocated locations exceed gcTriggerMaxSize)"))
+        s += '{}\n'.format(fielddisplay(self, 'tapeAlloc', 'Iteration count of a priori memory allocation of the AD tape'))
 
         return s
@@ -62,5 +61,5 @@
 
     def checkconsistency(self, md, solution, analyses):  # {{{
-        #Early return
+        # Early return
         if not self.isautodiff:
             return md
@@ -74,8 +73,8 @@
         md = checkfield(md, 'fieldname', 'autodiff.tapeAlloc', '>=', 0)
 
-    #Driver value:
+        # Driver value
         md = checkfield(md, 'fieldname', 'autodiff.driver', 'values', ['fos_forward', 'fov_forward', 'fov_forward_all', 'fos_reverse', 'fov_reverse', 'fov_reverse_all'])
 
-    #go through our dependents and independents and check consistency:
+        # Go through our dependents and independents and check consistency
         for dep in self.dependents:
             dep.checkconsistency(md, solution, analyses)
@@ -90,5 +89,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'driver', 'format', 'String')
 
-        #early return
+        # Early return
         if not self.isautodiff:
             WriteData(fid, prefix, 'data', False, 'name', 'md.autodiff.mass_flux_segments_present', 'format', 'Boolean')
@@ -96,5 +95,5 @@
             return
 
-        #buffer sizes {{{
+        # Buffer sizes
         WriteData(fid, prefix, 'object', self, 'fieldname', 'obufsize', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'lbufsize', 'format', 'Double')
@@ -104,6 +103,6 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'gcTriggerMaxSize', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'tapeAlloc', 'format', 'Integer')
-        #}}}
-        #process dependent variables {{{
+
+        # Process dependent variables
         num_dependent_objects = len(self.dependents)
         WriteData(fid, prefix, 'data', num_dependent_objects, 'name', 'md.autodiff.num_dependent_objects', 'format', 'Integer')
@@ -111,29 +110,22 @@
         if num_dependent_objects:
             names = []
-            indices = np.zeros(num_dependent_objects)
-
             for i, dep in enumerate(self.dependents):
                 names.append(dep.name)
-                indices[i] = dep.index
 
             WriteData(fid, prefix, 'data', names, 'name', 'md.autodiff.dependent_object_names', 'format', 'StringArray')
-            WriteData(fid, prefix, 'data', indices, 'name', 'md.autodiff.dependent_object_indices', 'format', 'IntMat', 'mattype', 3)
-            #}}}
-        #process independent variables {{{
+
+        # Process independent variables
         num_independent_objects = len(self.independents)
         WriteData(fid, prefix, 'data', num_independent_objects, 'name', 'md.autodiff.num_independent_objects', 'format', 'Integer')
 
-        if num_independent_objects:
-            names = [None] * num_independent_objects
-            types = np.zeros(num_independent_objects)
-
-            for i, indep in enumerate(self.independents):
-                names[i] = indep.name
-                types[i] = indep.typetoscalar()
-
-            WriteData(fid, prefix, 'data', names, 'name', 'md.autodiff.independent_object_names', 'format', 'StringArray')
-            WriteData(fid, prefix, 'data', types, 'name', 'md.autodiff.independent_object_types', 'format', 'IntMat', 'mattype', 3)
-            #}}}
-        #if driver is fos_forward, build index:  {{{
+        for indep in self.independents:
+            WriteData(fid, prefix, 'data', indep.name, 'name', 'md.autodiff.independent_name', 'format', 'String')
+            WriteData(fid, prefix, 'data', indep.typetoscalar(), 'name', 'md.autodiff.independent_type', 'format', 'Integer')
+            WriteData(fid, prefix, 'data', indep.min_parameters, 'name','md.autodiff.independent_min_parameters','format', 'DoubleMat', 'mattype', 3)
+            WriteData(fid, prefix, 'data', indep.max_parameters, 'name', 'md.autodiff.independent_max_parameters', 'format', 'DoubleMat', 'mattype', 3)
+            WriteData(fid, prefix, 'data', indep.control_scaling_factor, 'name', 'md.autodiff.independent_scaling_factor', 'format', 'Double')
+            WriteData(fid, prefix, 'data', indep.control_size, 'name', 'md.autodiff.independent_control_size', 'format', 'Integer')
+
+        # If driver is fos_forward, build index
         if strcmpi(self.driver, 'fos_forward'):
             index = 0
@@ -149,8 +141,8 @@
                         index += indep.nods
 
-            index -= 1  #get c - index numbering going
+            index -= 1  # get c-index numbering going
             WriteData(fid, prefix, 'data', index, 'name', 'md.autodiff.fos_forward_index', 'format', 'Integer')
-            #}}}
-        #if driver is fos_reverse, build index:  {{{
+
+        # If driver is fos_reverse, build index
         if strcmpi(self.driver, 'fos_reverse'):
             index = 0
@@ -163,8 +155,8 @@
                     index += 1
 
-            index -= 1  #get c - index numbering going
+            index -= 1  # get c-index numbering going
             WriteData(fid, prefix, 'data', index, 'name', 'md.autodiff.fos_reverse_index', 'format', 'Integer')
-            #}}}
-        #if driver is fov_forward, build indices:  {{{
+
+        # If driver is fov_forward, build indices
         if strcmpi(self.driver, 'fov_forward'):
             indices = 0
@@ -180,8 +172,8 @@
                         indices += indep.nods
 
-            indices -= 1  #get c - indices numbering going
+            indices -= 1  # get c-indices numbering going
             WriteData(fid, prefix, 'data', indices, 'name', 'md.autodiff.fov_forward_indices', 'format', 'IntMat', 'mattype', 3)
-            #}}}
-        #deal with mass fluxes:  {{{
+
+        # Deal with mass fluxes
         mass_flux_segments = [dep.segments for dep in self.dependents if strcmpi(dep.name, 'MassFlux')]
 
@@ -192,16 +184,18 @@
             flag = False
         WriteData(fid, prefix, 'data', flag, 'name', 'md.autodiff.mass_flux_segments_present', 'format', 'Boolean')
-        #}}}
-        #deal with trace keep on: {{{
+
+        # Deal with trace keep on
         keep = False
 
-        #From ADOLC userdoc:
-        # The optional integer argument keep of trace on determines whether the numerical values of all active variables are
-        # recorded in a buffered temporary array or file called the taylor stack. This option takes effect if keep = 1 and
-        # prepares the scene for an immediately following gradient evaluation by a call to a routine implementing the reverse
-        # mode as described in the Section 4 and Section 5.
+        # From ADOLC userdoc:
+        # The optional integer argument keep of trace on determines whether the 
+        # numerical values of all active variables are recorded in a buffered 
+        # temporary array or file called the taylor stack. This option takes 
+        # effect if keep = 1 and prepares the scene for an immediately 
+        # following gradient evaluation by a call to a routine implementing the 
+        # reverse mode as described in the Section 4 and Section 5.
         #
         if len(self.driver) <= 3:
-            keep = False  #there is no "_reverse" string within the driver string:
+            keep = False  # there is no "_reverse" string within the driver string
         else:
             if strncmpi(self.driver[3:], '_reverse', 8):
@@ -210,5 +204,5 @@
                 keep = False
         WriteData(fid, prefix, 'data', keep, 'name', 'md.autodiff.keep', 'format', 'Boolean')
-    #}}}
+    # }}}
 
         return
Index: /issm/trunk/src/m/classes/balancethickness.py
===================================================================
--- /issm/trunk/src/m/classes/balancethickness.py	(revision 28012)
+++ /issm/trunk/src/m/classes/balancethickness.py	(revision 28013)
@@ -23,5 +23,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -32,5 +32,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'stabilization', "0: None, 1: SU, 2: SSA's artificial diffusivity, 3:DG"))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -38,5 +38,5 @@
         self.stabilization = 1
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/basalforcings.m
===================================================================
--- /issm/trunk/src/m/classes/basalforcings.m	(revision 28012)
+++ /issm/trunk/src/m/classes/basalforcings.m	(revision 28013)
@@ -8,4 +8,5 @@
 		groundedice_melting_rate  = NaN;
 		floatingice_melting_rate  = NaN;
+		perturbation_melting_rate = NaN;
 		geothermalflux            = NaN;
 	end
@@ -26,4 +27,5 @@
 			fielddisplay(self,'groundedice_melting_rate','basal melting rate (positive if melting) [m/yr]');
 			fielddisplay(self,'floatingice_melting_rate','basal melting rate (positive if melting) [m/yr]');
+			fielddisplay(self,'perturbation_melting_rate','(optional) perturbation in basal melting rate under floating ice [m/yr]');
 			fielddisplay(self,'geothermalflux','geothermal heat flux [W/m^2]');
 
@@ -32,4 +34,5 @@
 			self.groundedice_melting_rate=project3d(md,'vector',self.groundedice_melting_rate,'type','node','layer',1); 
 			self.floatingice_melting_rate=project3d(md,'vector',self.floatingice_melting_rate,'type','node','layer',1); 
+			self.perturbation_melting_rate=project3d(md,'vector',self.perturbation_melting_rate,'type','node','layer',1); 
 			self.geothermalflux=project3d(md,'vector',self.geothermalflux,'type','node','layer',1); %bedrock only gets geothermal flux
 		end % }}}
@@ -74,4 +77,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','floatingice_melting_rate','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts)
 			WriteData(fid,prefix,'object',self,'fieldname','geothermalflux','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'fieldname','perturbation_melting_rate','format','DoubleMat','name','md.basalforcings.perturbation_melting_rate','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts)
 		end % }}}
 		function savemodeljs(self,fid,modelname) % {{{
@@ -79,4 +83,5 @@
 			writejs1Darray(fid,[modelname '.basalforcings.groundedice_melting_rate'],self.groundedice_melting_rate);
 			writejs1Darray(fid,[modelname '.basalforcings.floatingice_melting_rate'],self.floatingice_melting_rate);
+			writejs1Darray(fid,[modelname '.basalforcings.perturbation_melting_rate'],self.perturbation_melting_rate);
 			writejs1Darray(fid,[modelname '.basalforcings.geothermalflux'],self.geothermalflux);
 
Index: /issm/trunk/src/m/classes/basalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/basalforcings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/basalforcings.py	(revision 28013)
@@ -17,21 +17,24 @@
         self.groundedice_melting_rate = np.nan
         self.floatingice_melting_rate = np.nan
+        self.perturbation_melting_rate = np.nan
         self.geothermalflux = np.nan
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   basal forcings parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'groundedice_melting_rate', 'basal melting rate (positive if melting) [m/yr]'))
         s += '{}\n'.format(fielddisplay(self, 'floatingice_melting_rate', 'basal melting rate (positive if melting) [m/yr]'))
+        s += '{}\n'.format(fielddisplay(self, 'perturbation_melting_rate', '(optional) perturbation in basal melting rate under floating ice [m/yr]'))
         s += '{}\n'.format(fielddisplay(self, 'geothermalflux', 'geothermal heat flux [W/m^2]'))
         return s
-    #}}}
+    # }}}
     def extrude(self, md):  # {{{
         self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1)
+        self.perturbation_melting_rate = project3d(md, 'vector', self.perturbation_melting_rate, 'type', 'node', 'layer', 1)
         self.floatingice_melting_rate = project3d(md, 'vector', self.floatingice_melting_rate, 'type', 'node', 'layer', 1)
         self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) # Bedrock only gets geothermal flux
         return self
-    #}}}
+    # }}}
     def initialize(self, md):  # {{{
         if np.all(np.isnan(self.groundedice_melting_rate)):
@@ -42,8 +45,8 @@
             print('      no basalforcings.floatingice_melting_rate specified: values set as zero')
         return self
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport:
@@ -65,3 +68,4 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'floatingice_melting_rate', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'geothermalflux', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'perturbation_melting_rate', 'format', 'DoubleMat', 'name', 'md.basalforcings.perturbation_melting_rate', 'mattype', 1, 'scale', 1 / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
     # }}}
Index: /issm/trunk/src/m/classes/basalforcingsbeckmanngoosse.m
===================================================================
--- /issm/trunk/src/m/classes/basalforcingsbeckmanngoosse.m	(revision 28012)
+++ /issm/trunk/src/m/classes/basalforcingsbeckmanngoosse.m	(revision 28013)
@@ -139,5 +139,5 @@
 				WriteData(fid,prefix,'object',self,'fieldname','ocean_salinity','format','DoubleMat','name','md.basalforcings.ocean_salinity','mattype',1,'timeserieslength',md.mesh.numberofvertices+1);
 			elseif(self.isthermalforcing==1)
-				WriteData(fid,prefix,'object',self,'fieldname','ocean_thermalforcing','format','DoubleMat','name','md.basalforcings.ocean_thermalforcing','mattype',1,'timeserieslength',md.mesh.numberofvertices+1);
+				WriteData(fid,prefix,'object',self,'fieldname','ocean_thermalforcing','format','DoubleMat','name','md.basalforcings.ocean_thermalforcing','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			end
 		end % }}}
Index: /issm/trunk/src/m/classes/basalforcingsismip6.m
===================================================================
--- /issm/trunk/src/m/classes/basalforcingsismip6.m	(revision 28012)
+++ /issm/trunk/src/m/classes/basalforcingsismip6.m	(revision 28013)
@@ -58,5 +58,5 @@
 			md = checkfield(md,'fieldname','basalforcings.basin_id','Inf',1,'>=',0,'<=',md.basalforcings.num_basins,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','basalforcings.gamma_0','numel',1,'NaN',1,'Inf',1,'>',0);
-			md = checkfield(md,'fieldname','basalforcings.tf_depths','NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','basalforcings.tf_depths','NaN',1,'Inf',1,'size',[1,NaN],'<=',0);
 			md = checkfield(md,'fieldname','basalforcings.delta_t','NaN',1,'Inf',1,'numel',md.basalforcings.num_basins,'size',[1,md.basalforcings.num_basins]);
 			md = checkfield(md,'fieldname','basalforcings.islocal','values',[0 1]);
@@ -78,5 +78,5 @@
 			fielddisplay(self,'basin_id','basin number assigned to each node (unitless)');
 			fielddisplay(self,'gamma_0','melt rate coefficient (m/yr)');
-			fielddisplay(self,'tf_depths','Number of vertical layers in ocean thermal forcing dataset');
+			fielddisplay(self,'tf_depths','elevation of vertical layers in ocean thermal forcing dataset');
 			fielddisplay(self,'tf','thermal forcing (ocean temperature minus freezing point) (degrees C)');
 			fielddisplay(self,'delta_t','Ocean temperature correction per basin (degrees C)');
Index: /issm/trunk/src/m/classes/basin.py
===================================================================
--- /issm/trunk/src/m/classes/basin.py	(revision 28012)
+++ /issm/trunk/src/m/classes/basin.py	(revision 28013)
@@ -18,5 +18,5 @@
 
 
-class basin(object): #{{{
+class basin(object):  # {{{
     """BASIN class definition
 
@@ -25,5 +25,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.boundaries = []
         self.name       = ''
@@ -55,7 +55,7 @@
 
             self.proj = proj
-    #}}}
-
-    def __repr__(self): # {{{
+    # }}}
+
+    def __repr__(self):  # {{{
         s = '   basin parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'continent', 'continent name'))
@@ -67,7 +67,7 @@
 
         return s
-    #}}}
-
-    def setdefaultparameters(self): # {{{
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
         self.name       = ''
         self.continent  = ''
@@ -76,7 +76,7 @@
 
         return self
-    #}}}
-
-    def isnameany(self, *args): #{{{
+    # }}}
+
+    def isnameany(self, *args):  # {{{
         boolean = 0
         for arg in args:
@@ -90,7 +90,7 @@
                 break
         return boolean
-    #}}}
-
-    def iscontinentany(self, *args): #{{{
+    # }}}
+
+    def iscontinentany(self, *args):  # {{{
         boolean = 0
         for arg in args:
@@ -104,7 +104,7 @@
                 break
         return boolean
-    #}}}
-
-    def outputname(self, *args): #{{{
+    # }}}
+
+    def outputname(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -118,19 +118,19 @@
 
         return output
-    #}}}
-
-    def plot(self, *args): #{{{
+    # }}}
+
+    def plot(self, *args):  # {{{
         #add option
         for i in range(len(self.boundaries)):
             self.boundaries[i].plot('proj', self.proj, *args)
-    #}}}
-
-    def plot3d(self, *args): #{{{
+    # }}}
+
+    def plot3d(self, *args):  # {{{
         #add option
         for i in range(len(self.boundaries)):
             self.boundaries[i].plot3d(*args)
-    #}}}
-
-    def contour(self, *args): #{{{
+    # }}}
+
+    def contour(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -162,7 +162,7 @@
 
         return out
-    #}}}
-
-    def checkconsistency(self, *args): #{{{
+    # }}}
+
+    def checkconsistency(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -180,12 +180,12 @@
             boundary == self.boundaries[i]
             boundary.checkconsistency()
-    #}}}
-
-    def contourplot(self, *args): #{{{
+    # }}}
+
+    def contourplot(self, *args):  # {{{
         contour = self.contour()
         plot(contour.x, contour.y, 'r*-')
-    #}}}
-
-    def shapefilecrop(self, *args): #{{{
+    # }}}
+
+    def shapefilecrop(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -252,4 +252,4 @@
 
         return output
-    #}}}
-#}}}
+    # }}}
+# }}}
Index: /issm/trunk/src/m/classes/boundary.py
===================================================================
--- /issm/trunk/src/m/classes/boundary.py	(revision 28012)
+++ /issm/trunk/src/m/classes/boundary.py	(revision 28013)
@@ -10,5 +10,5 @@
 
 
-class boundary(object): #{{{
+class boundary(object):  # {{{
     """BOUNDARY class definition
 
@@ -17,5 +17,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.boundaries = []
         self.name       = ''
@@ -51,7 +51,7 @@
 
             self.proj = proj
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   boundary parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'shppath', 'path to filename for this boundary'))
@@ -61,7 +61,7 @@
 
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         self.shppath = ''
         self.shpfilename = ''
@@ -70,13 +70,13 @@
 
         return self
-    #}}}
+    # }}}
 
-    def name(self): #{{{
+    def name(self):  # {{{
         output = self.shpfilename
 
         return output
-    #}}}
+    # }}}
 
-    def edges(self): #{{{
+    def edges(self):  # {{{
         #read domain
         path, name, ext = fileparts(self.shpfilename)
@@ -92,7 +92,7 @@
 
         return output
-    #}}}
+    # }}}
 
-    def plot(self, *args): #{{{
+    def plot(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -131,10 +131,10 @@
             # y = domain[i].y * unitmultiplier
             # if len(x) == 1:
-        #}}}
+        # }}}
 
         #TODO: Finish translating from MATLAB after test2004.py runs without plot
-    #}}}
+    # }}}
 
-    def checkconsistency(self, *args): #{{{
+    def checkconsistency(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -157,5 +157,5 @@
                     raise Exception('boundary {} has two vertices extermely close to one another'.format(shpfilename))
 
-    def plot3d(self, *args): #{{{
+    def plot3d(self, *args):  # {{{
         #recover options
         options = pairoptions(*args)
@@ -181,4 +181,4 @@
 
         #TODO: Finish translating from MATLAB after test2004.py runs without plot
-    #}}}
-#}}}
+    # }}}
+# }}}
Index: /issm/trunk/src/m/classes/calving.py
===================================================================
--- /issm/trunk/src/m/classes/calving.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calving.py	(revision 28013)
@@ -17,5 +17,5 @@
         self.calvingrate = np.nan
         #self.setdefaultparameters() # Uncomment if/when setdefaultparameters is used
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -23,14 +23,14 @@
         s += '{}\n'.format(fielddisplay(self, 'calvingrate', 'calving rate at given location [m/a]'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.calvingrate = project3d(md, 'vector', self.calvingrate, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/calvingcalvingmip.m
===================================================================
--- /issm/trunk/src/m/classes/calvingcalvingmip.m	(revision 28013)
+++ /issm/trunk/src/m/classes/calvingcalvingmip.m	(revision 28013)
@@ -0,0 +1,57 @@
+%CALVINGCALVINGMIP class definition
+%   For calvingMIP laws and coefficients
+%   Usage:
+%      calvingcalvingmip=calvingcalvingmip();
+
+classdef calvingcalvingmip
+	properties (SetAccess=public) 
+		min_thickness = 0.;
+		experiment = 1;
+	end
+	methods
+		function self = calvingcalvingmip(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					inputstruct=varargin{1};
+					list1 = properties('calvingcalvingmip');
+					list2 = fieldnames(inputstruct);
+					for i=1:length(list1)
+						fieldname = list1{i};
+						if ismember(fieldname,list2),
+							self.(fieldname) = inputstruct.(fieldname);
+						end
+					end
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = extrude(self,md) % {{{
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+			%For now we turn this off by setting the threshold to 0
+			self.min_thickness = 0.;
+
+			self.experiment = 1;
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+			%Early return
+			if (~strcmp(solution,'TransientSolution') | md.transient.ismovingfront==0), return; end
+
+			md = checkfield(md,'fieldname','calving.min_thickness','>=',0,'NaN',1,'Inf',1,'numel',1);
+			md = checkfield(md,'fieldname','calving.experiment','values',[0, 1, 2, 3, 4, 5]);
+		end % }}}
+		function disp(self) % {{{
+			disp(sprintf('   CalvingMIP parameters:'));
+			fielddisplay(self,'experiment','Experiment in CalvingMIP');
+			fielddisplay(self,'min_thickness','minimum thickness below which no ice is allowed [m]');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+			yts=md.constants.yts;
+			WriteData(fid,prefix,'name','md.calving.law','data',12,'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','min_thickness','format','Double');
+			WriteData(fid,prefix,'object',self,'fieldname','experiment','format','Integer');
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/calvingcrevassedepth.py
===================================================================
--- /issm/trunk/src/m/classes/calvingcrevassedepth.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calvingcrevassedepth.py	(revision 28013)
@@ -18,5 +18,5 @@
 
         #self.setdefaultparameters() # Uncomment if/when setdefaultparameters is used
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   Calving Pi parameters:'
@@ -25,11 +25,11 @@
         s += '{}\n'.format(fielddisplay(self, 'water_height', 'water height in the crevasse [m]'))
         return s
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         #Early return
Index: /issm/trunk/src/m/classes/calvingdev.py
===================================================================
--- /issm/trunk/src/m/classes/calvingdev.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calvingdev.py	(revision 28013)
@@ -21,5 +21,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -29,9 +29,9 @@
 
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -40,5 +40,5 @@
         self.stress_threshold_floatingice = 150e3
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/calvinglevermann.py
===================================================================
--- /issm/trunk/src/m/classes/calvinglevermann.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calvinglevermann.py	(revision 28013)
@@ -20,5 +20,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -27,15 +27,15 @@
 
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.coeff = project3d(md, 'vector', self.coeff, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         #Proportionality coefficient in Levermann model
         self.coeff = 2e13
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/calvingminthickness.py
===================================================================
--- /issm/trunk/src/m/classes/calvingminthickness.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calvingminthickness.py	(revision 28013)
@@ -19,5 +19,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -25,14 +25,14 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'min_thickness', 'minimum thickness below which no ice is allowed'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         #minimum thickness is 100 m by default
         self.min_thickness = 100.
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/calvingparameterization.m
===================================================================
--- /issm/trunk/src/m/classes/calvingparameterization.m	(revision 28012)
+++ /issm/trunk/src/m/classes/calvingparameterization.m	(revision 28013)
@@ -1,4 +1,4 @@
 %CALVINGPARAMETERIZATION class definition
-%	For test calving laws and coefficients
+%   For test calving laws and coefficients
 %   Usage:
 %      calvingparameterization=calvingparameterization();
@@ -41,5 +41,5 @@
 			self.min_thickness = 0.;
 
-			%parameters for the spatial temporal seperation 
+			%Parameters for the spatial temporal separation
 			%The coefficient follows: gamma= f(x)
 			% 0 - f(x) = y_{o} + \alpha (x+x_{o})
@@ -48,13 +48,13 @@
 			% the amplifier
 			self.theta = 0;
-			% the slope alpha 
+			% the slope alpha
 			self.alpha = 0;
-			% offset in x-axis 
+			% offset in x-axis
 			self.xoffset = 0;
-			% offset in y-axis 
+			% offset in y-axis
 			self.yoffset = 0;
 			% velocity thresholds to reduce calving rate
-			vel_upperbound = 6000; % m/a
-			vel_lowerbound = 0; % m/a
+			self.vel_upperbound = 6000; % m/a
+			self.vel_lowerbound = 0; % m/a
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
Index: /issm/trunk/src/m/classes/calvingparameterization.py
===================================================================
--- /issm/trunk/src/m/classes/calvingparameterization.py	(revision 28013)
+++ /issm/trunk/src/m/classes/calvingparameterization.py	(revision 28013)
@@ -0,0 +1,111 @@
+from checkfield import checkfield
+from fielddisplay import fielddisplay
+from WriteData import WriteData
+
+
+class calvingparameterization(object):
+    """calvingparameterization class definition
+    For test calving laws and coefficients
+
+    Usage:
+        calvingparameterization = calvingparameterization()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.min_thickness = 0
+        self.use_param = 0
+        self.theta = 0
+        self.alpha = 0
+        self.xoffset = 0
+        self.yoffset = 0
+        self.vel_upperbound = 0
+        self.vel_threshold = 0
+        self.vel_lowerbound = 0
+
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = '   Calving test parameters:\n'
+        s += '{}\n'.format(fielddisplay(self, 'min_thickness', 'minimum thickness below which no ice is allowed [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'use_param', '-1 - just use frontal ablation rate, 0 - f(x) = y_{o} + \alpha (x+x_{o}), 1 - f(x)=y_{o}-\frac{\theta}{2}\tanh(\alpha(x+x_{o})), 2 - tanh(thickness), 3 - tanh(normalized vel), 4 - tanh(truncated vel), 5 - linear(truncated vel)'))
+        s += '{}\n'.format(fielddisplay(self, 'theta', 'the amplifier'))
+        s += '{}\n'.format(fielddisplay(self, 'alpha', 'the slope'))
+        s += '{}\n'.format(fielddisplay(self, 'xoffset', 'offset in x-axis'))
+        s += '{}\n'.format(fielddisplay(self, 'yoffset', 'offset in y-axis'))
+        s += '{}\n'.format(fielddisplay(self, 'vel_lowerbound', 'lowerbound of ice velocity to reduce the calving rate [m/a]'))
+        s += '{}\n'.format(fielddisplay(self, 'vel_threshold', 'threshold of ice velocity to reduce the calving rate [m/a]'))
+        s += '{}\n'.format(fielddisplay(self, 'vel_upperbound', 'upperbound of ice velocity to reduce the calving rate [m/a]'))
+        return s
+    # }}}
+
+    def extrude(self, md):  # {{{
+        return self
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
+        # For now we turn this off by setting the threshold to 0
+        self.min_thickness = 0.
+
+        # Parameters for the spatial temporal separation
+        # The coefficient follows: gamma= f(x)
+        # 0 - f(x) = y_{o} + \alpha (x+x_{o})
+        # 1 - f(x)=y_{o}-\frac{\theta}{2}\tanh(\alpha(x+x_{o}))
+        self.use_param = 0
+
+        # The amplifier
+        self.theta = 0
+
+        # The slope alpha
+        self.alpha = 0
+
+        # Offset in x-axis
+        self.xoffset
+
+        # Offset in y-axis
+        self.yoffset
+
+        # Velocity thresholds to reduce calving rate
+        self.vel_upperbound = 6000 # m/a
+        self.vel_lowerbound = 0 # m/a
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        # Early return
+        if not solution == 'TransientSolution' or not md.transient.ismovingfront:
+            return
+
+        md = checkfield(md, 'fieldname', 'calving.min_thickness', '>=', 0, 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.use_param', 'values', [-1, 0, 1, 2, 3, 4, 5])
+        md = checkfield(md, 'fieldname', 'calving.theta', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.alpha', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.xoffset', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.yoffset', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.vel_lowerbound', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.vel_threshold', 'NaN', 1, 'Inf', 1, 'numel', 1)
+        md = checkfield(md, 'fieldname', 'calving.vel_upperbound', 'NaN', 1, 'Inf', 1, 'numel', 1)
+
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
+        yts = md.constants.yts
+        WriteData(fid, prefix, 'name', 'md.calving.law', 'data', 9, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'min_thickness', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'use_param', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'theta', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'alpha', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'xoffset', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'yoffset', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vel_lowerbound', 'format', 'Double', 'scale', 1. / yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vel_threshold','format', 'Double', 'scale', 1. / yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vel_upperbound', 'format', 'Double', 'scale', 1. / yts)
+    # }}}
Index: /issm/trunk/src/m/classes/calvingvonmises.py
===================================================================
--- /issm/trunk/src/m/classes/calvingvonmises.py	(revision 28012)
+++ /issm/trunk/src/m/classes/calvingvonmises.py	(revision 28013)
@@ -19,5 +19,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -27,9 +27,9 @@
         s += '{}\n'.format(fielddisplay(self, 'min_thickness', 'minimum thickness below which no ice is allowed [m]'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -41,5 +41,5 @@
         self.min_thickness = 0.
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/calvingvonmisesAD.m
===================================================================
--- /issm/trunk/src/m/classes/calvingvonmisesAD.m	(revision 28013)
+++ /issm/trunk/src/m/classes/calvingvonmisesAD.m	(revision 28013)
@@ -0,0 +1,82 @@
+%CALVINGVONMISESAD class definition
+%
+%   Usage:
+%      calvingvonmisesAD=calvingvonmisesAD();
+
+classdef calvingvonmisesAD
+	properties (SetAccess=public) 
+		basin_id							  = NaN;
+		num_basins           = 0;
+		stress_threshold_groundedice = 0.;
+		stress_threshold_floatingice = 0.;
+		min_thickness = 0.;
+	end
+	methods
+		function self = calvingvonmises(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					inputstruct=varargin{1};
+					list1 = properties('calvingvonmisesAD');
+					list2 = fieldnames(inputstruct);
+					for i=1:length(list1)
+						fieldname = list1{i};
+						if ismember(fieldname,list2),
+							self.(fieldname) = inputstruct.(fieldname);
+						end
+					end
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = extrude(self,md) % {{{
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.basin_id             = NaN;
+			num_basins           = 0;
+			
+			%Default sigma max
+			self.stress_threshold_groundedice = 1e6;
+			self.stress_threshold_floatingice = 150e3;
+
+			%For now we turn this off by setting the threshold to 0
+			self.min_thickness = 0.;
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+			%Early return
+			if (~strcmp(solution,'TransientSolution') | md.transient.ismovingfront==0), return; end
+
+			md = checkfield(md,'fieldname','calving.basin_id','Inf',1,'>=',0,'<=',md.frontalforcings.num_basins,'size',[md.mesh.numberofelements 1]);
+			md = checkfield(md,'fieldname','frontalforcings.num_basins','numel',1,'NaN',1,'Inf',1,'>',0);
+			md = checkfield(md,'fieldname','calving.stress_threshold_groundedice','>',0,'NaN',1,'Inf',1,'size',[md.calving.num_basins 1]);
+			md = checkfield(md,'fieldname','calving.stress_threshold_floatingice','>',0,'NaN',1,'Inf',1,'size',[md.calving.num_basins 1]);
+			%md = checkfield(md,'fieldname','calving.stress_threshold_groundedice','>',0,'NaN',1,'Inf',1,'size',[1 1]);
+			%md = checkfield(md,'fieldname','calving.stress_threshold_floatingice','>',0,'NaN',1,'Inf',1,'size',[1 1]);
+			md = checkfield(md,'fieldname','calving.min_thickness','>=',0,'NaN',1,'Inf',1,'numel',1);
+		end % }}}
+		function disp(self) % {{{
+			disp(sprintf('   Calving VonMises (AD) parameters:'));
+			fielddisplay(self,'basin_id','basin ID for elements');
+			fielddisplay(self,'num_basins', 'number of basins');
+			fielddisplay(self,'stress_threshold_groundedice','sigma_max applied to grounded ice only [Pa]');
+			fielddisplay(self,'stress_threshold_floatingice','sigma_max applied to floating ice only [Pa]');
+			fielddisplay(self,'min_thickness','minimum thickness below which no ice is allowed [m]');
+
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+			yts=md.constants.yts;
+			WriteData(fid,prefix,'name','md.calving.law','data',11,'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','num_basins','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','basin_id','data',self.basin_id-1,'name','md.calving.basin_id','format','IntMat','mattype',2); %0-indexed
+			%WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_groundedice','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			%WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_floatingice','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			%WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_groundedice','format','Double');
+			%WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_floatingice','format','Double');
+			WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_groundedice','format','DoubleMat');
+			WriteData(fid,prefix,'object',self,'fieldname','stress_threshold_floatingice','format','DoubleMat');
+			WriteData(fid,prefix,'object',self,'fieldname','min_thickness','format','Double');
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/cfdragcoeffabsgrad.m
===================================================================
--- /issm/trunk/src/m/classes/cfdragcoeffabsgrad.m	(revision 28012)
+++ /issm/trunk/src/m/classes/cfdragcoeffabsgrad.m	(revision 28013)
@@ -18,5 +18,4 @@
 		weights            = NaN; %weight coefficients for every vertex
 		weights_string     = ''; %string to identify this particular set of weights
-		cumulated          = NaN; %do we cumulate cfdragcoeffabsgrad through time?
 	end
 	
Index: /issm/trunk/src/m/classes/cfdragcoeffabsgradtransient.m
===================================================================
--- /issm/trunk/src/m/classes/cfdragcoeffabsgradtransient.m	(revision 28013)
+++ /issm/trunk/src/m/classes/cfdragcoeffabsgradtransient.m	(revision 28013)
@@ -0,0 +1,69 @@
+%CFDRAGCOEFFABSGRADTRANSIENT class definition
+%
+%   Usage:
+%      cfdragcoeffabsgradtransient=cfdragcoeffabsgradtransient();
+%      cfdragcoeffabsgradtransient=cfdragcoeffabsgradtransient('name','SurfaceAltimetry',...
+%                    'definitionstring','Outputdefinition1',... 
+%                    'weights',ones(md.mesh.numberofvertices+1,1));
+%
+%
+
+classdef cfdragcoeffabsgradtransient
+	properties (SetAccess=public)
+		%cfdragcoeffabsgrad
+		name               = '';
+		definitionstring   = ''; %string that identifies this output definition uniquely, from 'Outputdefinition[1-100]'
+		weights            = NaN; %weight coefficients for every vertex
+	end
+	
+	methods
+		function self = extrude(self,md) % {{{
+			if ~isnan(self.weights)
+				self.weights=project3d(md,'vector',self.weights,'type','node');
+			end
+		end % }}}
+		function self = cfdragcoeffabsgradtransient(varargin) % {{{
+			if nargin==0,
+				self=setdefaultparameters(self);
+			else
+				%use provided options to change fields
+				options=pairoptions(varargin{:});
+
+				%get name
+				self.name=getfieldvalue(options,'name','');
+				self.definitionstring=getfieldvalue(options,'definitionstring');
+				self.weights=getfieldvalue(options,'weights',NaN);
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if ~ischar(self.name),
+				error('cfdragcoeffabsgrad error message: ''name'' field should be a string!');
+			end
+			OutputdefinitionStringArray={};
+			for i=1:200
+				OutputdefinitionStringArray{i}=strcat('Outputdefinition',num2str(i));
+			end
+			md = checkfield(md,'fieldname','self.definitionstring','field',self.definitionstring,'values',OutputdefinitionStringArray);
+			md = checkfield(md,'fieldname','self.weights','field',self.weights,'size',[md.mesh.numberofvertices+1 NaN],'NaN',1,'Inf',1);
+
+		end % }}}
+		function md = disp(self) % {{{
+		
+			disp(sprintf('   TimeMisfit:\n'));
+
+			fielddisplay(self,'name','identifier for this cfdragcoeffabsgradtransient response');
+			fielddisplay(self,'definitionstring','string that identifies this output definition uniquely, from ''Outputdefinition[1-10]''');
+			fielddisplay(self,'weights','weights (at vertices) to apply to the cfdragcoeffabsgradtransient');
+
+		end % }}}
+		function md = marshall(self,prefix,md,fid) % {{{
+
+		WriteData(fid,prefix,'data',self.name,'name','md.cfdragcoeffabsgradtransient.name','format','String');
+		WriteData(fid,prefix,'data',self.definitionstring,'name','md.cfdragcoeffabsgradtransient.definitionstring','format','String');
+		WriteData(fid,prefix,'data',self.weights,'name','md.cfdragcoeffabsgradtransient.weights','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/cflevelsetmisfit.m
===================================================================
--- /issm/trunk/src/m/classes/cflevelsetmisfit.m	(revision 28012)
+++ /issm/trunk/src/m/classes/cflevelsetmisfit.m	(revision 28013)
@@ -25,5 +25,4 @@
 		weights_string     = ''; %string to identify this particular set of weights
 		datatime				 = 0; %time in years from start that the data is from 
-		cumulated          = NaN; %do we cumulate cflevelsetmisfit through time?
 	end
 	
Index: /issm/trunk/src/m/classes/cfrheologybbarabsgrad.m
===================================================================
--- /issm/trunk/src/m/classes/cfrheologybbarabsgrad.m	(revision 28013)
+++ /issm/trunk/src/m/classes/cfrheologybbarabsgrad.m	(revision 28013)
@@ -0,0 +1,78 @@
+%CFRHEOLOGYBBARABSGRAD class definition
+%
+%   Usage:
+%      cfrheologybbarabsgrad=cfdragcoeffabsgrad();
+%      cfrheologybbarabsgrad=cfdragcoeffabsgrad('name','SurfaceAltimetry',...
+%                    'definitionstring','Outputdefinition1',... 
+%							'model_string','Surface',...
+%                    'weights',ones(md.mesh.numberofvertices,1),...
+%                    'weights_string','WeightsSurfaceObservations');
+%
+%
+
+classdef cfrheologybbarabsgrad
+	properties (SetAccess=public)
+		%cfrheologybbarabsgrad
+		name               = '';
+		definitionstring   = ''; %string that identifies this output definition uniquely, from 'Outputdefinition[1-100]'
+		weights            = NaN; %weight coefficients for every vertex
+		weights_string     = ''; %string to identify this particular set of weights
+		cumulated          = NaN; %do we cumulate cfrheologybbarabsgrad through time?
+	end
+	
+	methods
+		function self = extrude(self,md) % {{{
+			if ~isnan(self.weights)
+				self.weights=project3d(md,'vector',self.weights,'type','node');
+			end
+		end % }}}
+		function self = cfrheologybbarabsgrad(varargin) % {{{
+			if nargin==0,
+				self=setdefaultparameters(self);
+			else
+				%use provided options to change fields
+				options=pairoptions(varargin{:});
+
+				%get name
+				self.name=getfieldvalue(options,'name','');
+				self.definitionstring=getfieldvalue(options,'definitionstring');
+				self.weights=getfieldvalue(options,'weights',NaN);
+				self.weights_string=getfieldvalue(options,'weights_string','');
+
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if ~ischar(self.name),
+				error('cfrheologybbarabsgrad error message: ''name'' field should be a string!');
+			end
+			OutputdefinitionStringArray={};
+			for i=1:200
+				OutputdefinitionStringArray{i}=strcat('Outputdefinition',num2str(i));
+			end
+			md = checkfield(md,'fieldname','self.definitionstring','field',self.definitionstring,'values',OutputdefinitionStringArray);
+
+			md = checkfield(md,'fieldname','self.weights','field',self.weights,'timeseries',1,'NaN',1,'Inf',1);
+
+		end % }}}
+		function md = disp(self) % {{{
+		
+			disp(sprintf('   TimeMisfit:\n'));
+
+			fielddisplay(self,'name','identifier for this cfrheologybbarabsgrad response');
+			fielddisplay(self,'definitionstring','string that identifies this output definition uniquely, from ''Outputdefinition[1-10]''');
+			fielddisplay(self,'weights','weights (at vertices) to apply to the cfrheologybbarabsgrad');
+			fielddisplay(self,'weights_string','string for weights for identification purposes');
+
+		end % }}}
+		function md = marshall(self,prefix,md,fid) % {{{
+
+		WriteData(fid,prefix,'data',self.name,'name','md.cfrheologybbarabsgrad.name','format','String');
+		WriteData(fid,prefix,'data',self.definitionstring,'name','md.cfrheologybbarabsgrad.definitionstring','format','String');
+		WriteData(fid,prefix,'data',self.weights,'name','md.cfrheologybbarabsgrad.weights','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+		WriteData(fid,prefix,'data',self.weights_string,'name','md.cfrheologybbarabsgrad.weights_string','format','String');
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/cfrheologybbarabsgradtransient.m
===================================================================
--- /issm/trunk/src/m/classes/cfrheologybbarabsgradtransient.m	(revision 28013)
+++ /issm/trunk/src/m/classes/cfrheologybbarabsgradtransient.m	(revision 28013)
@@ -0,0 +1,68 @@
+%CFRHEOLOGYBBARABSGRADTRANSIENT class definition
+%
+%   Usage:
+%      cfrheologybbarabsgradtransient=cfdragcoeffabsgradtransient();
+%      cfrheologybbarabsgradtransient=cfdragcoeffabsgradtransient('name','SurfaceAltimetry',...
+%                    'definitionstring','Outputdefinition1',... 
+%                    'weights',ones(md.mesh.numberofvertices+1,1))%
+%
+
+classdef cfrheologybbarabsgradtransient
+	properties (SetAccess=public)
+		%cfrheologybbarabsgrad
+		name               = '';
+		definitionstring   = ''; %string that identifies this output definition uniquely, from 'Outputdefinition[1-100]'
+		weights            = NaN; %weight coefficients for every vertex
+	end
+	
+	methods
+		function self = extrude(self,md) % {{{
+			if ~isnan(self.weights)
+				self.weights=project3d(md,'vector',self.weights,'type','node');
+			end
+		end % }}}
+		function self = cfrheologybbarabsgradtransient(varargin) % {{{
+			if nargin==0,
+				self=setdefaultparameters(self);
+			else
+				%use provided options to change fields
+				options=pairoptions(varargin{:});
+
+				%get name
+				self.name=getfieldvalue(options,'name','');
+				self.definitionstring=getfieldvalue(options,'definitionstring');
+				self.weights=getfieldvalue(options,'weights',NaN);
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if ~ischar(self.name),
+				error('cfrheologybbarabsgradtransient error message: ''name'' field should be a string!');
+			end
+			OutputdefinitionStringArray={};
+			for i=1:200
+				OutputdefinitionStringArray{i}=strcat('Outputdefinition',num2str(i));
+			end
+			md = checkfield(md,'fieldname','self.definitionstring','field',self.definitionstring,'values',OutputdefinitionStringArray);
+			md = checkfield(md,'fieldname','self.weights','field',self.weights,'size',[md.mesh.numberofvertices+1 NaN],'NaN',1,'Inf',1);
+
+		end % }}}
+		function md = disp(self) % {{{
+		
+			disp(sprintf('   TimeMisfit:\n'));
+
+			fielddisplay(self,'name','identifier for this cfrheologybbarabsgrad response');
+			fielddisplay(self,'definitionstring','string that identifies this output definition uniquely, from ''Outputdefinition[1-10]''');
+			fielddisplay(self,'weights','weights (at vertices) to apply to the cfrheologybbarabsgrad');
+
+		end % }}}
+		function md = marshall(self,prefix,md,fid) % {{{
+
+		WriteData(fid,prefix,'data',self.name,'name','md.cfrheologybbarabsgradtransient.name','format','String');
+		WriteData(fid,prefix,'data',self.definitionstring,'name','md.cfrheologybbarabsgradtransient.definitionstring','format','String');
+		WriteData(fid,prefix,'data',self.weights,'name','md.cfrheologybbarabsgradtransient.weights','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/cfsurfacelogvel.m
===================================================================
--- /issm/trunk/src/m/classes/cfsurfacelogvel.m	(revision 28012)
+++ /issm/trunk/src/m/classes/cfsurfacelogvel.m	(revision 28013)
@@ -25,5 +25,4 @@
 		weights_string     = ''; %string to identify this particular set of weights
 		datatime				 = 0; %time in years from start that the data is from 
-		cumulated          = NaN; %do we cumulate cfsurfacelogvel through time?
 	end
 	
Index: /issm/trunk/src/m/classes/cfsurfacesquare.m
===================================================================
--- /issm/trunk/src/m/classes/cfsurfacesquare.m	(revision 28012)
+++ /issm/trunk/src/m/classes/cfsurfacesquare.m	(revision 28013)
@@ -25,5 +25,4 @@
 		weights_string     = ''; %string to identify this particular set of weights
 		datatime				 = 0; %time in years from start that the data is from 
-		cumulated          = NaN; %do we cumulate cfsurfacesquare through time?
 	end
 	
Index: /issm/trunk/src/m/classes/cfsurfacesquaretransient.m
===================================================================
--- /issm/trunk/src/m/classes/cfsurfacesquaretransient.m	(revision 28013)
+++ /issm/trunk/src/m/classes/cfsurfacesquaretransient.m	(revision 28013)
@@ -0,0 +1,83 @@
+%MISFIT class definition
+%
+%   Usage:
+%      cfsurfacesquaretransient=cfsurfacesquaretransient();
+%      cfsurfacesquaretransient=cfsurfacesquaretransient('name','SurfaceAltimetry',...
+%                    'definitionstring','Outputdefinition1',... 
+%							'model_string','Surface',...
+%                    'observations',[md.geometry.surface;0],...
+%                    'weights',ones(md.mesh.numberofvertices+1,1));
+%
+%
+
+classdef cfsurfacesquaretransient
+	properties (SetAccess=public)
+		%cfsurfacesquaretransient
+		name                = '';
+		definitionstring    = ''; %string that identifies this output definition uniquely, from 'Outputdefinition[1-100]'
+		model_string        = ''; %string for field that is modeled
+		observations        = NaN;%observed field that we compare the model against
+		weights             = NaN;%weight coefficients for every vertex
+	end
+	
+	methods
+		function self = extrude(self,md) % {{{
+			if ~isnan(self.weights)
+				self.weights=project3d(md,'vector',self.weights,'type','node');
+			end
+			if ~isnan(self.observations)
+				self.observations=project3d(md,'vector',self.observations,'type','node');
+			end
+		end % }}}
+		function self = cfsurfacesquaretransient(varargin) % {{{
+			if nargin==0,
+				self=setdefaultparameters(self);
+			else
+				%use provided options to change fields
+				options=pairoptions(varargin{:});
+
+				%get name
+				self.name                = getfieldvalue(options,'name','');
+				self.definitionstring    = getfieldvalue(options,'definitionstring');
+				self.model_string        = getfieldvalue(options,'model_string');
+				self.observations        = getfieldvalue(options,'observations',NaN);
+				self.weights             = getfieldvalue(options,'weights',NaN);
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if ~ischar(self.name),
+				error('cfsurfacesquaretransient error message: ''name'' field should be a string!');
+			end
+			OutputdefinitionStringArray={};
+			for i=1:100
+				OutputdefinitionStringArray{i}=strcat('Outputdefinition',num2str(i));
+			end
+			md = checkfield(md,'fieldname','self.definitionstring','field',self.definitionstring,'values',OutputdefinitionStringArray);
+			md = checkfield(md,'fieldname','self.observations','field',self.observations,'size',[md.mesh.numberofvertices+1 NaN],'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','self.weights','field',self.weights,'size',[md.mesh.numberofvertices+1 NaN],'NaN',1,'Inf',1);
+
+		end % }}}
+		function md = disp(self) % {{{
+		
+			disp(sprintf('   cfsurfacesquaretransient:\n'));
+
+			fielddisplay(self,'name','identifier for this cfsurfacesquaretransient response');
+			fielddisplay(self,'definitionstring','string that identifies this output definition uniquely, from ''Outputdefinition[1-10]''');
+			fielddisplay(self,'model_string','string for field that is modeled');
+			fielddisplay(self,'observations','observed field that we compare the model against');
+			fielddisplay(self,'weights','weights (at vertices) to apply to the cfsurfacesquaretransient');
+
+		end % }}}
+		function md = marshall(self,prefix,md,fid) % {{{
+
+		WriteData(fid,prefix,'data',self.name,'name','md.cfsurfacesquaretransient.name','format','String');
+		WriteData(fid,prefix,'data',self.definitionstring,'name','md.cfsurfacesquaretransient.definitionstring','format','String');
+		WriteData(fid,prefix,'data',self.model_string,'name','md.cfsurfacesquaretransient.model_string','format','String');
+		WriteData(fid,prefix,'data',self.observations,'name','md.cfsurfacesquaretransient.observations','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+		WriteData(fid,prefix,'data',self.weights,'name','md.cfsurfacesquaretransient.weights','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/clusters/computecanada.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/computecanada.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/computecanada.m	(revision 28013)
@@ -62,4 +62,6 @@
 			 if ~(cluster.cpuspertask > 0), md = checkmessage(md,'cpuspertask must be > 0'); end
 			 if ~(cluster.port >= 0), md = checkmessage(md,'port must be >= 0'); end
+			 if isempty(cluster.email), md = checkmessage(md,'email empty'); end
+			 if isempty(cluster.mailtype), md = checkmessage(md,'mailtype empty'); end
 			 if isempty(cluster.projectaccount), md = checkmessage(md,'projectaccount empty'); end
 			 if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end
@@ -88,5 +90,5 @@
 			 fprintf(fid,'#SBATCH --mem-per-cpu=%igb\n',cluster.memory); %memory in in gigabytes
 			 fprintf(fid,'#SBATCH --mail-user=%s\n',cluster.email); %email
-			 fprintf(fid,'#SBATCH --mail-type=%s',cluster.mailtype); 
+			 fprintf(fid,'#SBATCH --mail-type=%s\n',cluster.mailtype); 
 			 fprintf(fid,'#SBATCH --output=%s.outlog \n',modelname);
 			 fprintf(fid,'#SBATCH --error=%s.errlog \n\n',modelname);
Index: /issm/trunk/src/m/classes/clusters/discovery.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/discovery.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/discovery.m	(revision 28013)
@@ -19,5 +19,5 @@
 		memory        = 2;  %in Gb
 		email         = 'END,FAIL';
-
+		deleteckptdata= 0;
 	end
 	%}}}
@@ -42,4 +42,5 @@
 			disp(sprintf('    memory: %i Gb',cluster.memory));
 			disp(sprintf('    email: %s (notifications: BEGIN,END,FAIL)',cluster.email));
+			disp(sprintf('    deleteckptdata: %i',cluster.deleteckptdata));
 			disp(sprintf('    codepath:      %s',cluster.codepath));
 			disp(sprintf('    executionpath: %s',cluster.executionpath));
@@ -104,5 +105,5 @@
 			fprintf(fid,'#SBATCH --nodes=%i\n',cluster.numnodes);
 			fprintf(fid,'#SBATCH --ntasks-per-node=%i\n',cluster.cpuspernode);
-			fprintf(fid,'#SBATCH --time=%s\n',datestr(cluster.time/24,'HH:MM:SS')); %walltime is in HH:MM:SS format. cluster.time is in hour
+			fprintf(fid,'#SBATCH --time=%s\n',eraseBetween(datestr(cluster.time/24,'dd-HH:MM:SS'),1,1)); %walltime is in d-HH:MM:SS format. cluster.time is in hour
 			fprintf(fid,'#SBATCH --mem=%iG\n',cluster.memory);
 			if ~isempty(cluster.email)
@@ -116,4 +117,8 @@
 			if ~io_gather, %concatenate the output files:
 				fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
+			end
+
+			if (cluster.deleteckptdata)
+				fprintf(fid,'rm -rf *.rst *.ckpt\n');
 			end
 			fclose(fid);
Index: /issm/trunk/src/m/classes/clusters/eis_nasa_smce.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/eis_nasa_smce.py	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/eis_nasa_smce.py	(revision 28013)
@@ -86,9 +86,9 @@
     def checkconsistency(self, md, solution, analyses):  # {{{
         # Now, check cluster.cpuspernode according to processor type
-        if self.processor == 'skylake':
-            if self.cpuspernode > 14 or self.cpuspernode < 1:
-                md = md.checkmessage('cpuspernode should be between 1 and 14 for \'skyw\' processors in hyperthreading mode')
-        else:
-            md = md.checkmessage('unknown processor type, should be \'skylake\'')
+        #if self.processor == 'skylake':
+        #    if self.cpuspernode > 14 or self.cpuspernode < 1:
+        #        md = md.checkmessage('cpuspernode should be between 1 and 14 for \'skyw\' processors in hyperthreading mode')
+        #else:
+        #    md = md.checkmessage('unknown processor type, should be \'skylake\'')
 
         # Miscellaneous
Index: /issm/trunk/src/m/classes/clusters/generic.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/generic.m	(revision 28013)
@@ -33,5 +33,5 @@
 
 			%Change the defaults if ispc
-			if ispc & ~ismingw,
+			if ispc,
 				cluster.codepath      = [issmdir() '\bin'];
 				cluster.etcpath       = [issmdir() '\etc'];
@@ -99,31 +99,24 @@
 
 			if ~ispc(),
+				% Check that executable exists at the right path
+				if ~exist([cluster.codepath '/' executable],'file'),
+					error(['File ' cluster.codepath '/' executable ' does not exist']);
+				end
+
+				% Process codepath and prepend empty spaces with \ to avoid errors in queuing script
+				codepath=strrep(cluster.codepath,' ','\ ');
+
+				% Write queuing script
 				fid=fopen([modelname '.queue'],'w');
 				fprintf(fid,'#!%s\n',cluster.shell);
-				if ~isvalgrind,
-					if cluster.interactive
-						if IssmConfig('_HAVE_MPI_'),
-							fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
-						else
-							fprintf(fid,'%s/%s %s %s %s',cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
-						end
-					else
-						if IssmConfig('_HAVE_MPI_'),
-							fprintf(fid,'mpiexec -np %i %s/%s %s %s %s 2> %s.errlog > %s.outlog &',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname,modelname,modelname);
-						else
-							fprintf(fid,'%s/%s %s %s %s 2> %s.errlog > %s.outlog &',cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname,modelname,modelname);
-						end
-					end
-				elseif isgprof,
-					fprintf(fid,'\n gprof %s/issm.exe gmon.out > %s.performance',cluster.codepath,modelname);
-				else
+				if isvalgrind,
 					%Add --gen-suppressions=all to get suppression lines
 					%fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib); it could be deleted
 					if ismac,
 						if IssmConfig('_HAVE_MPI_'),
-							fprintf(fid,'mpiexec -np %i %s --leak-check=full --error-limit=no --dsymutil=yes --suppressions=%s %s/%s %s %s %s 2> %s.errlog > %s.outlog ',...
+							fprintf(fid,'mpiexec -np %i %s --leak-check=full --leak-check=full --show-leak-kinds=all --error-limit=no --dsymutil=yes --suppressions=%s %s/%s %s %s %s 2> %s.errlog > %s.outlog ',...
 							cluster.np,cluster.valgrind,cluster.valgrindsup,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname], modelname,modelname,modelname);
 						else
-							fprintf(fid,'%s --leak-check=full --dsymutil=yes --error-limit=no --suppressions=%s %s/%s %s %s %s 2> %s.errlog > %s.outlog',...
+							fprintf(fid,'%s --leak-check=full --dsymutil=yes --error-limit=no --leak-check=full --show-leak-kinds=all --suppressions=%s %s/%s %s %s %s 2> %s.errlog > %s.outlog',...
 							cluster.valgrind,cluster.valgrindsup,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname], modelname,modelname,modelname);
 						end
@@ -137,4 +130,20 @@
 						end
 					end
+				elseif isgprof,
+					fprintf(fid,'\n gprof %s/issm.exe gmon.out > %s.performance',cluster.codepath,modelname);
+				else
+					if cluster.interactive
+						if IssmConfig('_HAVE_MPI_'),
+							fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
+						else
+							fprintf(fid,'%s/%s %s %s %s',cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
+						end
+					else
+						if IssmConfig('_HAVE_MPI_'),
+							fprintf(fid,'mpiexec -np %i %s/%s %s %s %s 2> %s.errlog > %s.outlog &',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname,modelname,modelname);
+						else
+							fprintf(fid,'%s/%s %s %s %s 2> %s.errlog > %s.outlog &',cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname,modelname,modelname);
+						end
+					end
 				end
 				if ~io_gather, %concatenate the output files:
@@ -144,18 +153,11 @@
 
 			else % Windows
-
 				fid=fopen([modelname '.bat'],'w');
 				fprintf(fid,'@echo off\n');
 
-				% if IssmConfig('_HAVE_PETSC_MPI_'),
-				% 	warning('parallel runs not allowed yet in Windows. Defaulting to 1 cpus');
-				% 	cluster.np=1;
-				% end
-
 				if cluster.np>1,
-					% fprintf(fid,'"C:\\Program Files\\MPICH2\\bin\\mpiexec.exe" -n %i "%s/%s" %s ./ %s',cluster.np,cluster.codepath,executable,solution,modelname);
 					fprintf(fid,'"C:\\Program Files\\Microsoft MPI\\Bin\\mpiexec.exe" -n %i "%s/%s" %s ./ %s',cluster.np,cluster.codepath,executable,solution,modelname);
 				else
-					fprintf(fid,'"%s/%s" %s ./ %s',cluster.codepath,executable,solution,modelname);
+					fprintf(fid,'"%s\\%s" %s ./ %s',cluster.codepath,executable,solution,modelname);
 				end
 				fclose(fid);
Index: /issm/trunk/src/m/classes/clusters/generic.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic.py	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/generic.py	(revision 28013)
@@ -18,5 +18,5 @@
 
 class generic(object):
-    """GENERIC cluster class definition
+    """generic cluster class definition
 
     Usage:
Index: /issm/trunk/src/m/classes/clusters/generic_static.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic_static.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/generic_static.m	(revision 28013)
@@ -3,4 +3,10 @@
 %   Usage:
 %      cluster=generic_static('name','astrid','np',3);
+%      cluster=generic('name',oshostname(),'np',3,'login','username');
+%
+%   TODO:
+%   - Add support for restart to Windows (under MSYS2), then activate tests 125 
+%   and 126 in test suite
+%
 
 classdef generic_static
@@ -17,4 +23,5 @@
 	methods
 		function cluster=generic_static(varargin) % {{{
+
 			%use provided options to change fields
 			options=pairoptions(varargin{:});
@@ -69,19 +76,31 @@
 			end
 
-			% Check that executable exists at the right path
-			if ~exist([cluster.codepath '/' executable],'file'),
-				error(['File ' cluster.codepath '/' executable ' does not exist']);
+			if ~ispc(),
+				% Check that executable exists at the right path
+				if ~exist([cluster.codepath '/' executable],'file'),
+					error(['File ' cluster.codepath '/' executable ' does not exist']);
+				end
+
+				% Process codepath and prepend empty spaces with \ to avoid errors in queuing script
+				codepath=strrep(cluster.codepath,' ','\ ');
+
+				% Write queuing script
+				fid=fopen([modelname '.queue'],'w');
+				fprintf(fid,'#!%s\n',cluster.shell);
+				fprintf(fid,['%s/mpiexec -np %i %s/%s %s %s %s \n'],codepath,cluster.np,codepath,executable,solution,'./',modelname);
+				fclose(fid);
+			else % Windows
+				fid=fopen([modelname '.bat'],'w');
+				fprintf(fid,'@echo off\n');
+
+				if cluster.np>1,
+					fprintf(fid,'"%s\\mpiexec.exe" -n %i "%s/%s" %s ./ %s',cluster.codepath,cluster.np,cluster.codepath,executable,solution,modelname);
+				else
+					fprintf(fid,'"%s\\%s" %s ./ %s',cluster.codepath,executable,solution,modelname);
+				end
+				fclose(fid);
 			end
 
-			% Process codepath and prepend empty spaces with \ to avoid errors in queuing script
-			codepath=strrep(cluster.codepath,' ','\ ');
-
-			% Write queuing script
-			fid=fopen([modelname '.queue'],'w');
-			fprintf(fid,'#!%s\n',cluster.shell);
-			fprintf(fid,['%s/mpiexec -np %i %s/%s %s %s %s \n'],codepath,cluster.np,codepath,executable,solution,'./',modelname);
-			fclose(fid);
-
-			% Create an errlog and outlog file
+			%Create an errlog and outlog file
 			fid=fopen([modelname '.errlog'],'w');
 			fclose(fid);
Index: /issm/trunk/src/m/classes/clusters/local.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/local.py	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/local.py	(revision 28013)
@@ -8,5 +8,5 @@
 
 
-class local(object): #{{{
+class local(object):  # {{{
     """LOCAL class definition
 
@@ -16,5 +16,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.name           = ''
         self.np             = 1
@@ -34,5 +34,5 @@
         self = options.AssignObjectFields(self)
 
-    def __repr__(cluster): #{{{
+    def __repr__(cluster):  # {{{
         # Display the object
         s = 'class {} = \n'.format(type(cluster).__name__)
@@ -46,7 +46,7 @@
 
         return s
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if cluster.np < 1:
             md.checkmessage('number of processors should be at least 1')
@@ -56,7 +56,7 @@
 
         return md
-    #}}}
+    # }}}
 
-    def BuildQueueScript(cluster, dirname, modelname, solution, io_gather, isvalgrind, isgporf, isdakota, isoceancoupling): #{{{
+    def BuildQueueScript(cluster, dirname, modelname, solution, io_gather, isvalgrind, isgporf, isdakota, isoceancoupling):  # {{{
         # Which executable are we calling?
         executable = 'issm.exe' # Default
@@ -70,14 +70,14 @@
         fid.close()
 
-    def UploadQueueJob(cluster, modelname, dirname, filelist): #{{{
+    def UploadQueueJob(cluster, modelname, dirname, filelist):  # {{{
         # Do nothing really
         pass
-    #}}}
+    # }}}
 
-    def LaunchQueueJob(cluster, modelname, dirname, filelist, restart, batch): #{{{
+    def LaunchQueueJob(cluster, modelname, dirname, filelist, restart, batch):  # {{{
         subprocess.call('source ' + modelname + '.queue')
-    #}}}
+    # }}}
 
-    def Download(cluster, dirname, filelist): #{{{
+    def Download(cluster, dirname, filelist):  # {{{
         pass
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/clusters/localpfe.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/localpfe.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/localpfe.m	(revision 28013)
@@ -92,7 +92,23 @@
 			fclose(fid);
 
-
-			%in interactive mode, create a run file, and errlog and outlog file
-			if cluster.interactive,
+			%in interactive mode, create a run file, and errlog and outlog file
+			if cluster.interactive,
+				fid=fopen([modelname '.run'],'w');
+				if cluster.interactive==10,
+						fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n');
+						fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname);
+				else
+					if ~isvalgrind,
+						fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,cluster.executionpath,modelname);
+						%fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.nprocs(),cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
+					else
+						fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
+					end
+				end
+				if ~io_gather, %concatenate the output files:
+					fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
+				end
+				fclose(fid);
+
 				fid=fopen([modelname '.errlog'],'w'); fclose(fid);
 				fid=fopen([modelname '.outlog'],'w'); fclose(fid);
@@ -224,5 +240,5 @@
 				end
 				if cluster.interactive,
-					compressstring = [compressstring ' ' modelname '.errlog ' modelname '.outlog '];
+					compressstring = [compressstring ' ' modelname '.run '  modelname '.errlog ' modelname '.outlog '];
 				end
 				system(compressstring);
Index: /issm/trunk/src/m/classes/clusters/pace.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/pace.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/pace.m	(revision 28013)
@@ -9,5 +9,5 @@
 	properties (SetAccess=public)
 	% {{{
-		name            = 'login-phoenix-4.pace.gatech.edu' %Phoenix cluster name
+		name            = 'login-phoenix-slurm.pace.gatech.edu' %Phoenix cluster name
 		login           = ''; %personal login
 		numnodes        = 1; %number of nodes requested
@@ -17,5 +17,5 @@
 		queue           = 'inferno'; %queue
 		time            = 60; %time requested per run [minutes]
-		accountname     = 'GT-arobel3-atlas'; %group account name
+		accountname     = 'gts-arobel3-atlas'; %group account name
 		codepath        = ''; %path to issm binaries
 		executionpath   = ''; %path for execution folder
@@ -62,17 +62,20 @@
 			fid=fopen([modelname '.queue'],'w');
 			fprintf(fid,'#!/bin/sh\n');
-			fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %conversion of walltime from minutes to seconds.
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -l nodes=1:ppn=%i\n',cluster.np);
-			fprintf(fid,'#PBS -l pmem=%igb\n',cluster.mem);
-			fprintf(fid,'#PBS -q %s\n',cluster.queue);
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-		
-			fprintf(fid,'#PBS -o %s/%s/%s.outlog \n',cluster.executionpath,dirname,modelname);
-         fprintf(fid,'#PBS -e %s/%s/%s.errlog \n\n',cluster.executionpath,dirname,modelname);
 
-			fprintf(fid,'export PBS_O_WORKDIR=%s\n',[cluster.executionpath '/' dirname]);
-			fprintf(fid,'cd $PBS_O_WORKDIR\n');
-			fprintf(fid,'mpiexec -np %i %s/%s %s %s %s \n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);	
+			fprintf(fid,'#SBATCH -t%i\n',cluster.time);
+         fprintf(fid,'#SBATCH -J%s\n',modelname);
+         fprintf(fid,'#SBATCH -N 1 --ntasks-per-node=%i\n',cluster.np);
+         %fprintf(fid,'#SBATCH -N %i\n',cluster.numnodes);
+         %fprintf(fid,'#SBATCH --ntasks=1\n');
+         %fprintf(fid,'#SBATCH --cpus-per-task=%i\n',cluster.np);
+         fprintf(fid,'#SBATCH --mem-per-cpu=%iG\n',cluster.mem);
+         fprintf(fid,'#SBATCH -p%s\n',cluster.queue);
+         fprintf(fid,'#SBATCH -A %s\n',cluster.accountname);
+         fprintf(fid,'#SBATCH -o%s/%s/%s.outlog \n',cluster.executionpath,dirname,modelname);
+         fprintf(fid,'#SBATCH -e%s/%s/%s.errlog \n\n',cluster.executionpath,dirname,modelname);
+         fprintf(fid,'export SLURM_SUBMIT_DIR=%s\n',[cluster.executionpath '/' dirname]);
+         fprintf(fid,'cd $SLURM_SUBMIT_DIR\n');
+         fprintf(fid,'export LD_LIBRARY_PATH=/opt/slurm/current/lib:/opt/pmix/current/lib:$LD_LIBRARY_PATH \n');
+         fprintf(fid,'srun --mpi=pmi2 -n %i %s/%s %s %s %s \n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
 
 			fclose(fid);
@@ -97,8 +100,8 @@
 			disp('launching solution sequence on remote cluster');
 			if ~isempty(restart)
-				launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue '];
+				launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && sbatch ' modelname '.queue '];
 			else
 				launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...
-					' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && qsub ' modelname '.queue '];
+					' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && sbatch ' modelname '.queue '];
 			end
 			issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
Index: /issm/trunk/src/m/classes/clusters/pfe.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/pfe.m	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/pfe.m	(revision 28013)
@@ -11,5 +11,5 @@
 		name           = 'pfe'
 		login          = '';
-		modules        = {'comp-intel/2018.3.222' 'mpi-intel/2018.3.222' 'scicon/app-tools'};
+		modules        = {'comp-intel/2018.3.222' '/nasa/intel/impi/2021.3/modulefiles/mpi/2021.3.0' 'scicon/app-tools'};
 		numnodes       = 20;
 		cpuspernode    = 8;
@@ -181,5 +181,5 @@
 			fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname);
 			if ~isvalgrind,
-				fprintf(fid,'/u/scicon/tools/bin/toss3/several_tries mpiexec -np %i mbind.x -cs -n%i %s/%s %s %s/%s %s\n',cluster.nprocs(),cluster.cpuspernode,cluster.codepath,executable,solution,cluster.executionpath,dirname,modelname);
+				fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i mbind.x -cs -n%i %s/%s %s %s/%s %s\n',cluster.nprocs(),cluster.cpuspernode,cluster.codepath,executable,solution,cluster.executionpath,dirname,modelname);
 			else
 				fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.nprocs(),cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
Index: /issm/trunk/src/m/classes/clusters/saga.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/saga.py	(revision 28012)
+++ /issm/trunk/src/m/classes/clusters/saga.py	(revision 28013)
@@ -140,7 +140,7 @@
             #fid.write('srun {} --tool=callgrind {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog \n'.format(self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
             # leak check
-            fid.write('srun {} --leak-check=full {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
+            fid.write('mpirun --bind-to none {} --leak-check=full {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
         else:
-            fid.write('time srun {}/{} {} {}/{} {}\n'.format(self.codepath, executable, solution, self.executionpath, dirname, modelname))
+            fid.write('time mpirun --bind-to none {}/{} {} {}/{} {}\n'.format(self.codepath, executable, solution, self.executionpath, dirname, modelname))
         fid.close()
 
Index: /issm/trunk/src/m/classes/constants.py
===================================================================
--- /issm/trunk/src/m/classes/constants.py	(revision 28012)
+++ /issm/trunk/src/m/classes/constants.py	(revision 28013)
@@ -20,5 +20,5 @@
         #set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   constants parameters:\n'
@@ -29,5 +29,5 @@
         s += '{}\n'.format(fielddisplay(self, 'gravitational_constant', 'Newtonian constant of gravitation [m^3/kg/s^2]'))
         return s
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         # Acceleration due to gravity (m / s^2)
@@ -47,5 +47,5 @@
 
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         md = checkfield(md, 'fieldname', 'constants.g', '>=', 0, 'size', [1]) # We allow 0 for validation tests
Index: /issm/trunk/src/m/classes/damage.py
===================================================================
--- /issm/trunk/src/m/classes/damage.py	(revision 28012)
+++ /issm/trunk/src/m/classes/damage.py	(revision 28013)
@@ -74,5 +74,5 @@
             self.spcdamage = project3d(md, 'vector', self.spcdamage, 'type', 'node')
             return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -109,5 +109,5 @@
             list = ['DamageD']
         return list
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/debris.m
===================================================================
--- /issm/trunk/src/m/classes/debris.m	(revision 28012)
+++ /issm/trunk/src/m/classes/debris.m	(revision 28013)
@@ -6,14 +6,15 @@
 classdef debris
 	properties (SetAccess=public)
-		 spcthickness           = NaN;
-		 min_thickness          = 0;
-		 stabilization          = 0;
-		 packingfraction	= 0;
-		 removalmodel		= 0;
-		 displacementmodel	= 0;
-		 removal_slope_threshold= 0;
-		 removal_stress_threshold=0;
-		 vertex_pairing         = NaN;
-		 requested_outputs      = {};
+		spcthickness             = NaN;
+		min_thickness            = 0;
+		stabilization            = 0;
+		packingfraction          = 0;
+		removalmodel             = 0;
+		displacementmodel        = 0;
+		max_displacementvelocity = 0;
+		removal_slope_threshold  = 0;
+		removal_stress_threshold = 0;
+		vertex_pairing           = NaN;
+		requested_outputs        = {};
 	end
 	methods (Static)
@@ -23,6 +24,4 @@
 			% old fields must be recovered (make sure they are in the deprecated
 			% model properties)
-
-
 
 			if verLessThan('matlab','7.9'),
@@ -68,5 +67,5 @@
 		function list = defaultoutputs(self,md) % {{{
 
-			list = {'DebrisThickness'};
+			list = {'DebrisThickness','DebrisMaskNodeActivation','VxDebris','VyDebris'};
 
 		end % }}}
@@ -89,8 +88,11 @@
 
 			%Slope threshold for removalmodel (1)
-                 	self.removal_slope_threshold=0;
-                 	
+			self.removal_slope_threshold=0;
+
 			%Stress threshold for removalmodel (2)
 			self.removal_stress_threshold=0;
+
+			%Max velocity for displacementmodel (1)
+			self.max_displacementvelocity=0;
 
 			%default output
@@ -100,12 +102,13 @@
 
 			%Early return
-			if ~ismember('MasstransportAnalysis',analyses) |  (strcmp(solution,'TransientSolution') & md.transient.isdebris==0), return; end
+			if ~ismember('MasstransportAnalysis',analyses) | (strcmp(solution,'TransientSolution') & md.transient.isdebris==0), return; end
 
 			md = checkfield(md,'fieldname','debris.spcthickness');
-			md = checkfield(md,'fieldname','debris.stabilization','values',[0 1 2 3]);
+			md = checkfield(md,'fieldname','debris.stabilization','values',[0 1 2 3 4 5]);
 			md = checkfield(md,'fieldname','debris.min_thickness','>=',0);
 			md = checkfield(md,'fieldname','debris.packingfraction','>=',0);
 			md = checkfield(md,'fieldname','debris.removalmodel','values',[0 1 2]);
 			md = checkfield(md,'fieldname','debris.displacementmodel','values',[0 1 2]);
+			md = checkfield(md,'fieldname','debris.max_displacementvelocity','>=',0);
 			md = checkfield(md,'fieldname','debris.removal_slope_threshold','>=',0);
 			md = checkfield(md,'fieldname','debris.removal_stress_threshold','>=',0);
@@ -116,5 +119,5 @@
 		end % }}}
 		function disp(self) % {{{
-			disp(sprintf('   Debris solution parameters:'));
+			disp(sprintf('   debris solution parameters:'));
 			fielddisplay(self,'spcthickness','debris thickness constraints (NaN means no constraint) [m]');
 			fielddisplay(self,'min_thickness','minimum debris thickness allowed [m]');
@@ -122,5 +125,6 @@
 			fielddisplay(self,'stabilization','0: no stabilization, 1: artificial diffusion, 2: streamline upwinding, 3: streamline upwind Petrov-Galerkin (SUPG)');
 			fielddisplay(self,'removalmodel','frontal removal of debris. 0: no removal, 1: Slope-triggered debris removal, 2: driving-stress triggered debris removal');
-			fielddisplay(self,'displacementmodel','debris displacement. 0: no displacement, 1: ...');
+			fielddisplay(self,'displacementmodel','debris displacement. 0: no displacement, 1: additional debris velocity above the critical slope/stress threshold');
+			fielddisplay(self,'max_displacementvelocity','maximum velocity of debris transport (v_ice + v_displacement) (m/a)');
 			fielddisplay(self,'removal_slope_threshold','critical slope (degrees) for removalmodel (1)');
 			fielddisplay(self,'removal_stress_threshold','critical stress (Pa) for removalmodel (2)');
@@ -132,7 +136,4 @@
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
-
-			yts=md.constants.yts;
-
 			WriteData(fid,prefix,'object',self,'fieldname','spcthickness','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'object',self,'fieldname','min_thickness','format','Double');
@@ -140,4 +141,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','removalmodel','format','Integer');
 			WriteData(fid,prefix,'object',self,'fieldname','displacementmodel','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','max_displacementvelocity','format','Double');
 			WriteData(fid,prefix,'object',self,'fieldname','removal_slope_threshold','format','Double');
 			WriteData(fid,prefix,'object',self,'fieldname','removal_stress_threshold','format','Double');
@@ -161,4 +163,5 @@
 			writejsdouble(fid,[modelname '.debris.removalmodel'],self.removalmodel);
 			writejsdouble(fid,[modelname '.debris.displacementmodel'],self.displacementmodel);
+			writejsdouble(fid,[modelname '.debris.max_displacementvelocity'],self.displacementmodel);
 			writejsdouble(fid,[modelname '.debris.removal_slope_threshold'],self.removal_slope_threshold);
 			writejsdouble(fid,[modelname '.debris.removal_stress_threshold'],self.removal_stress_threshold);
Index: /issm/trunk/src/m/classes/debris.py
===================================================================
--- /issm/trunk/src/m/classes/debris.py	(revision 28013)
+++ /issm/trunk/src/m/classes/debris.py	(revision 28013)
@@ -0,0 +1,136 @@
+import numpy as np
+
+from checkfield import checkfield
+from fielddisplay import fielddisplay
+from project3d import project3d
+from WriteData import WriteData
+
+
+class debris(object):
+    """debris class definition
+
+    Usage:
+        debris = debris()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.spcthickness = np.nan
+        self.min_thickness = 0
+        self.stabilization = 0
+        self.packingfraction = 0
+        self.removalmodel = 0
+        self.displacementmodel = 0
+        self.max_displacementvelocity = 0
+        self.removal_slope_threshold = 0
+        self.removal_stress_threshold = 0
+        self.vertex_pairing = np.nan
+        self.requested_outputs = []
+
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = '   debris solution parameters:\n'
+        s += '{}\n'.format(fielddisplay(self,'spcthickness','debris thickness constraints (NaN means no constraint) [m]'))
+        s += '{}\n'.format(fielddisplay(self,'min_thickness','minimum debris thickness allowed [m]'))
+        s += '{}\n'.format(fielddisplay(self,'packingfraction','fraction of debris covered in the ice'))
+        s += '{}\n'.format(fielddisplay(self,'stabilization','0: no stabilization, 1: artificial diffusion, 2: streamline upwinding, 3: streamline upwind Petrov-Galerkin (SUPG)'))
+        s += '{}\n'.format(fielddisplay(self,'removalmodel','frontal removal of debris. 0: no removal, 1: Slope-triggered debris removal, 2: driving-stress triggered debris removal'))
+        s += '{}\n'.format(fielddisplay(self,'displacementmodel','debris displacement. 0: no displacement, 1: ...'))
+        s += '{}\n'.format(fielddisplay(self,'max_displacementvelocity','maximum velocity of debris transport (v_ice + v_displacement) (m/a)'))
+        s += '{}\n'.format(fielddisplay(self,'removal_slope_threshold','critical slope (degrees) for removalmodel (1)'))
+        s += '{}\n'.format(fielddisplay(self,'removal_stress_threshold','critical stress (Pa) for removalmodel (2)'))
+
+        s += '\n      {}\n'.format('Penalty options:')
+        s += '{}\n'.format(fielddisplay(self,'vertex_pairing','pairs of vertices that are penalized'))
+        s += '{}\n'.format(fielddisplay(self,'requested_outputs','additional outputs requested'))
+        return s
+    # }}}
+
+    def defaultoutputs(self, md):  # {{{
+        return ['DebrisThickness', 'DebrisMaskNodeActivation', 'VxDebris', 'VyDebris']
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
+        # Type of stabilization to use 0:nothing 1:artificial_diffusivity 3:Discontinuous Galerkin
+        self.stabilization = 2
+
+        # Minimum debris thickness that can be used
+        self.min_thickness = 0
+
+        # Fraction of debris covered in the ice
+        self.packingfraction = 0.01
+
+        # Type of frontal debris removal
+        self.removalmodel = 0
+
+        # Type of debris displacement
+        self.displacementmodel = 0
+
+        # Slope threshold for removalmodel (1)
+        self.removal_slope_threshold = 0
+
+        # Stress threshold for removalmodel (2)
+        self.removal_stress_threshold = 0
+
+        # Max velocity for displacementmodel (1)
+        self.max_displacementvelocity = 0
+
+        # Default output
+        self.requested_outputs = ['default']
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        # Early return
+        if not 'MasstransportAnalysis' in analyses or solution == 'TransientSolution' and not md.transient.isdebris:
+            return md
+
+        md = checkfield(md, 'fieldname', 'debris.spcthickness')
+        md = checkfield(md, 'fieldname', 'debris.stabilization', 'values', [0, 1, 2, 3, 4, 5])
+        md = checkfield(md, 'fieldname', 'debris.min_thickness', '>=', 0)
+        md = checkfield(md, 'fieldname', 'debris.packingfraction', '>=', 0)
+        md = checkfield(md, 'fieldname', 'debris.removalmodel', 'values', [0, 1, 2])
+        md = checkfield(md, 'fieldname', 'debris.displacementmodel', 'values', [0, 1, 2])
+        md = checkfield(md, 'fieldname', 'debris.max_displacementvelocity', '>=', 0)
+        md = checkfield(md, 'fieldname', 'debris.removal_slope_threshold', '>=', 0)
+        md = checkfield(md, 'fieldname', 'debris.removal_stress_threshold', '>=', 0)
+        md = checkfield(md, 'fieldname', 'debris.requested_outputs', 'stringrow', 1)
+
+        if not np.any(np.isnan(md.stressbalance.vertex_pairing)) and len(md.stressbalance.vertex_pairing) > 0:
+            md = checkfield(md, 'fieldname', 'stressbalance.vertex_pairing', '>', 0)
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'spcthickness', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'min_thickness', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'stabilization', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'removalmodel', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'displacementmodel', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'max_displacementvelocity', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'removal_slope_threshold', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'removal_stress_threshold', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'packingfraction', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vertex_pairing', 'format', 'DoubleMat', 'mattype', 3)
+
+        # Process requested outputs
+        outputs = self.requested_outputs
+        indices = [i for i, x in enumerate(outputs) if x == 'default']
+        if len(indices) > 0:
+            outputscopy = outputs[0:max(0, indices[0] - 1)] + self.defaultoutputs(md) + outputs[indices[0] + 1:]
+            outputs = outputscopy
+        WriteData(fid, prefix, 'data', outputs, 'name', 'md.debris.requested_outputs', 'format', 'StringArray')
+    # }}}
+
+    def extrude(self, md):  #{{{
+        self.spcthickness = project3d(md, 'vector', self.spcthickness, 'type', 'node')
+        return
+    # }}}
Index: /issm/trunk/src/m/classes/debug.py
===================================================================
--- /issm/trunk/src/m/classes/debug.py	(revision 28012)
+++ /issm/trunk/src/m/classes/debug.py	(revision 28013)
@@ -19,5 +19,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -28,9 +28,9 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'profiling', 'enables profiling (memory, flops, time)'))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
Index: /issm/trunk/src/m/classes/dependent.m
===================================================================
--- /issm/trunk/src/m/classes/dependent.m	(revision 28012)
+++ /issm/trunk/src/m/classes/dependent.m	(revision 28013)
@@ -10,5 +10,4 @@
 		exp                  = '';
 		segments             = [];
-		index                = -1;
 		nods                 = 0;
 	end
@@ -22,5 +21,4 @@
 			self.exp=getfieldvalue(options,'exp','');
 			self.segments=getfieldvalue(options,'segments',[]);
-			self.index=getfieldvalue(options,'index',-1);
 			self.nods=getfieldvalue(options,'nods',0);
 
Index: /issm/trunk/src/m/classes/dsl.py
===================================================================
--- /issm/trunk/src/m/classes/dsl.py	(revision 28012)
+++ /issm/trunk/src/m/classes/dsl.py	(revision 28013)
@@ -23,5 +23,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -31,5 +31,5 @@
         s += '{}\n'.format(fielddisplay(self, 'sea_water_pressure_at_sea_floor', 'Corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable quantity (in m equivalent, not in Pa!).'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
@@ -37,5 +37,5 @@
         self.sea_surface_height_above_geoid = np.nan
         self.sea_water_pressure_at_sea_floor = np.nan
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  #{{{
@@ -65,5 +65,5 @@
         self.sea_water_pressure_at_sea_floor = project3d(md, 'vector', self.sea_water_pressure_at_sea_floor, 'type', 'node', 'layer', 1)
         return self
-    #}}}
+    # }}}
 
     def initialize(self, md):  #{{{
@@ -79,3 +79,3 @@
             self.sea_water_pressure_at_sea_floor = np.append(np.zeros((md.mesh.numberofvertices, 1)), 0).reshape(-1, 1)
             print('      no dsl.sea_water_pressure_at_sea_floor specified: transient values set at zero')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/dslmme.py
===================================================================
--- /issm/trunk/src/m/classes/dslmme.py	(revision 28012)
+++ /issm/trunk/src/m/classes/dslmme.py	(revision 28013)
@@ -18,11 +18,9 @@
         self.sea_water_pressure_at_sea_floor = [] #Corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable quantity (in m equivalent, not in Pa!) for each ensemble.
 
-        nargs = len(args)
-
-        if nargs == 0:
+        if len(args) == 0:
             self.setdefaultparameters()
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -33,9 +31,9 @@
         s += '{}\n'.format(fielddisplay(self, 'sea_water_pressure_at_sea_floor', 'Corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable quantity (in m equivalent, not in Pa!) for each ensemble.'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -54,5 +52,5 @@
 
         return md
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -63,5 +61,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'sea_water_pressure_at_sea_floor', 'format', 'MatArray', 'timeserieslength', md.mesh.numberofvertices + 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'sea_surface_height_above_geoid', 'format', 'MatArray', 'timeserieslength', md.mesh.numberofvertices + 1)
-    #}}}
+    # }}}
 
     def extrude(self, md):  #{{{
@@ -71,3 +69,3 @@
 
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/esa.py
===================================================================
--- /issm/trunk/src/m/classes/esa.py	(revision 28012)
+++ /issm/trunk/src/m/classes/esa.py	(revision 28013)
@@ -25,5 +25,5 @@
     #set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -52,5 +52,5 @@
         self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/flowequation.py
===================================================================
--- /issm/trunk/src/m/classes/flowequation.py	(revision 28012)
+++ /issm/trunk/src/m/classes/flowequation.py	(revision 28013)
@@ -39,5 +39,5 @@
         
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -60,5 +60,5 @@
         s += '{}\n'.format(fielddisplay(self, 'borderFS', "vertices on FS' border (for tiling)"))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -72,5 +72,5 @@
         self.fe_FS = 'MINIcondensed'
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -81,5 +81,5 @@
         self.borderFS = project3d(md, 'vector', self.borderFS, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/fourierlove.py
===================================================================
--- /issm/trunk/src/m/classes/fourierlove.py	(revision 28012)
+++ /issm/trunk/src/m/classes/fourierlove.py	(revision 28013)
@@ -13,5 +13,5 @@
     """
 
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.nfreq = 0
         self.frequencies = 0
@@ -37,7 +37,7 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         # TODO:
         # - Correct display to match MATLAB
@@ -78,7 +78,7 @@
 
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         # We setup an elastic love number computation by default
         self.nfreq = 1
@@ -104,7 +104,7 @@
         self.core_mantle_boundary = 2
         self.complex_computation = 0
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if 'LoveAnalysis' not in analyses:
             return md
@@ -149,7 +149,7 @@
 
         return md
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'object', self, 'fieldname', 'nfreq', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'frequencies', 'format', 'DoubleMat', 'mattype',3)
@@ -173,11 +173,11 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'inner_core_boundary', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'core_mantle_boundary', 'format', 'Integer')
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
-    def build_frequencies_from_time(self): #{{{
+    def build_frequencies_from_time(self):  # {{{
         if not self.istemporal:
             raise RuntimeError('cannot build frequencies for temporal love numbers if love.istemporal==0')
@@ -192,3 +192,3 @@
                     self.frequencies[(i - 1) * 2 * self.n_temporal_iterations + j] = j * np.log(2) / self.time[i] / 2 / np.pi
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/friction.m
===================================================================
--- /issm/trunk/src/m/classes/friction.m	(revision 28012)
+++ /issm/trunk/src/m/classes/friction.m	(revision 28013)
@@ -10,4 +10,5 @@
 		q                        = NaN;
 		coupling                 = 0;
+		linearize                = 0;
 		effective_pressure       = NaN;
 		effective_pressure_limit = 0;
@@ -34,5 +35,6 @@
 		function self = setdefaultparameters(self) % {{{
 
-			self.coupling = 0;
+			self.linearize = 0;
+			self.coupling  = 0;
 			self.effective_pressure_limit = 0;
 
@@ -47,4 +49,5 @@
 			md = checkfield(md,'fieldname','friction.q','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','friction.p','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
+			md = checkfield(md,'fieldname','friction.linearize','numel',[1],'values',[0:2]);
 			md = checkfield(md,'fieldname','friction.coupling','numel',[1],'values',[0:4]);
 			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
@@ -58,6 +61,7 @@
 			fielddisplay(self,'p','p exponent');
 			fielddisplay(self,'q','q exponent');
+			fielddisplay(self,'coupling','Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: use coupled model (not implemented yet)');
+			fielddisplay(self,'linearize','0: not linearized, 1: interpolated linearly, 2: constant per element (default is 0)');
 			fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]');
-			fielddisplay(self,'coupling','Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: use coupled model (not implemented yet)');
 			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
@@ -75,4 +79,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','q','format','DoubleMat','mattype',2);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','coupling','format','Integer');
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','linearize','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 			if self.coupling==3 || self.coupling==4
@@ -86,4 +91,5 @@
 			writejs1Darray(fid,[modelname '.friction.q'],self.q);
 			writejs1Darray(fid,[modelname '.friction.coupling'],self.coupling);
+			writejs1Darray(fid,[modelname '.friction.linearize'],self.linearize);
 			writejs1Darray(fid,[modelname '.friction.effective_pressure'],self.effective_pressure);
 			writejs1Darray(fid,[modelname '.friction.effective_pressure_limit'],self.effective_pressure_limit);
Index: /issm/trunk/src/m/classes/friction.py
===================================================================
--- /issm/trunk/src/m/classes/friction.py	(revision 28012)
+++ /issm/trunk/src/m/classes/friction.py	(revision 28013)
@@ -8,5 +8,5 @@
 
 class friction(object):
-    """FRICTION class definition
+    """friction class definition
 
     Usage:
@@ -19,23 +19,27 @@
         self.q = np.nan
         self.coupling = 0
+        self.linearize = 0
         self.effective_pressure = np.nan
         self.effective_pressure_limit = 0
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = 'Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s - 1) * u_b,\n'
         s += '(effective stress Neff = rho_ice * g * thickness + rho_water * g * base, r = q / p and s = 1 / p)\n'
-        s += '{}\n'.format(fielddisplay(self, "coefficient", "friction coefficient [SI]"))
-        s += '{}\n'.format(fielddisplay(self, "p", "p exponent"))
-        s += '{}\n'.format(fielddisplay(self, "q", "q exponent"))
+        s += '{}\n'.format(fielddisplay(self, 'coefficient', 'friction coefficient [SI]'))
+        s += '{}\n'.format(fielddisplay(self, 'p', 'p exponent'))
+        s += '{}\n'.format(fielddisplay(self, 'q', 'q exponent'))
         s += '{}\n'.format(fielddisplay(self, 'coupling', 'Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)'))
+        s += '{}\n'.format(fielddisplay(self, 'linearize', '0: not linearized, 1: interpolated linearly, 2: constant per element (default is 0)'))
         s += '{}\n'.format(fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]'))
         s += '{}\n'.format(fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)'))
         return s
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
+        self.linearize = 0
+        self.coupling = 0
         self.effective_pressure_limit = 0
         return self
-    #}}}
+    # }}}
     def extrude(self, md):  # {{{
         self.coefficient = project3d(md, 'vector', self.coefficient, 'type', 'node', 'layer', 1)
@@ -45,9 +49,9 @@
             self.effective_pressure = project3d(md, 'vector', self.effective_pressure, 'type', 'node', 'layer', 1)
         return self
-    #}}}
+    # }}}
     def defaultoutputs(self, md):  # {{{
         list = []
         return list
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         # Early return
@@ -59,4 +63,5 @@
         md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
         md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
+        md = checkfield(md, 'fieldname', 'friction.linearize', 'numel', [1], 'values', [0, 1, 2])
         md = checkfield(md, 'fieldname', 'friction.coupling', 'numel', [1], 'values', [0, 1, 2, 3, 4])
         md = checkfield(md, 'fieldname', 'friction.effective_pressure_limit', 'numel', [1], '>=', 0)
@@ -77,4 +82,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'coupling', 'format', 'Integer')
+        WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'linearize', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'friction', 'fieldname', 'effective_pressure_limit', 'format', 'Double')
         if self.coupling == 3 or self.coupling == 4:
Index: /issm/trunk/src/m/classes/frictioncoulomb.py
===================================================================
--- /issm/trunk/src/m/classes/frictioncoulomb.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictioncoulomb.py	(revision 28013)
@@ -24,5 +24,5 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -37,10 +37,10 @@
         s += '{}\n'.format(fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         self.effective_pressure_limit = 0
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -56,5 +56,5 @@
             raise ValueError('not supported yet')
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         # Early return
Index: /issm/trunk/src/m/classes/frictionhydro.py
===================================================================
--- /issm/trunk/src/m/classes/frictionhydro.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionhydro.py	(revision 28013)
@@ -81,3 +81,3 @@
         elif self.coupling > 4:
             raise ValueError('md.friction.coupling larger than 4, not supported yet')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/frictionjosh.py
===================================================================
--- /issm/trunk/src/m/classes/frictionjosh.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionjosh.py	(revision 28013)
@@ -22,5 +22,5 @@
         self.setdefaultparameters()
         #self.requested_outputs = []
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -33,5 +33,5 @@
         #s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -39,5 +39,5 @@
         self.pressure_adjusted_temperature = project3d(md, 'vector', self.pressure_adjusted_temperature, 'type', 'node', 'layer', 1)
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -46,10 +46,10 @@
         self.effective_pressure_limit = 0
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
         list = []
         return list
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/frictionpism.py
===================================================================
--- /issm/trunk/src/m/classes/frictionpism.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionpism.py	(revision 28013)
@@ -24,5 +24,5 @@
         self.setdefaultparameters()
         self.requested_outputs = []
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
Index: /issm/trunk/src/m/classes/frictionregcoulomb.m
===================================================================
--- /issm/trunk/src/m/classes/frictionregcoulomb.m	(revision 28013)
+++ /issm/trunk/src/m/classes/frictionregcoulomb.m	(revision 28013)
@@ -0,0 +1,61 @@
+%FRICTIONREGCOULOMB class definition
+%
+%   Usage:
+%      frictionregcoulomb=frictionregcoulomb();
+
+classdef frictionregcoulomb
+	properties (SetAccess=public) 
+		C  = NaN;
+		u0 = 0.;
+		m  = NaN;
+	end
+	methods
+		function self = frictionregcoulomb(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					self=structtoobj(frictionregcoulomb(),varargin{1});
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = extrude(self,md) % {{{
+			self.C    = project3d(md,'vector',self.C,'type','node');
+			self.m    = project3d(md,'vector',self.m,'type','element');
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.u0 = 1000;
+
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			%Early return
+			if ~ismember('StressbalanceAnalysis',analyses) & ~ismember('ThermalAnalysis',analyses), return; end
+			md = checkfield(md,'fieldname','friction.C','timeseries',1,'NaN',1,'Inf',1,'>=',0.);
+			md = checkfield(md,'fieldname','friction.u0','NaN',1,'Inf',1,'>',0.,'numel',1);
+			md = checkfield(md,'fieldname','friction.m','NaN',1,'Inf',1,'>',0.,'size',[md.mesh.numberofelements,1]);
+		end % }}}
+		function disp(self) % {{{
+			%See Joughin et al. 2019 (equivalent form by Matt Trevers, poster at AGU 2022) https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019GL082526
+			disp('Regularized Couloub friction law (Joughin et al., 2019) parameters:');
+			disp('   Regularized Couloub friction law reads:');
+			disp('                       C^2 |u|^(1/m)         ');
+			disp('      tau_b = -  ____________________________');
+			disp('                        (|u|/u0 + 1)^(1/m)   ');
+			disp(' ');
+			fielddisplay(self,'C','friction coefficient [SI]');
+			fielddisplay(self,'m','m exponent (set to m=3 in original paper)');
+			fielddisplay(self,'u0','velocity controling plastic limit');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+			yts=md.constants.yts;
+
+			WriteData(fid,prefix,'name','md.friction.law','data',14,'format','Integer');
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','C','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','u0','format','Double','scale',1/yts);
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2);
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/frictionregcoulomb2.m
===================================================================
--- /issm/trunk/src/m/classes/frictionregcoulomb2.m	(revision 28013)
+++ /issm/trunk/src/m/classes/frictionregcoulomb2.m	(revision 28013)
@@ -0,0 +1,66 @@
+%FRICTIONREGCOULOMB2 class definition
+%
+%   Usage:
+%      frictionregcoulomb=frictionregcoulomb();
+
+classdef frictionregcoulomb2
+	properties (SetAccess=public) 
+		C  = NaN;
+		K  = NaN;
+		m  = NaN;
+		effective_pressure_limit = 0;
+	end
+	methods
+		function self = frictionregcoulomb2(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					self=structtoobj(frictionregcoulomb2(),varargin{1});
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = extrude(self,md) % {{{
+			self.C    = project3d(md,'vector',self.C,'type','node');
+			self.m    = project3d(md,'vector',self.m,'type','element');
+			self.K    = project3d(md,'vector',self.K,'type','node');
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.effective_pressure_limit = 0;
+
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			%Early return
+			if ~ismember('StressbalanceAnalysis',analyses) & ~ismember('ThermalAnalysis',analyses), return; end
+			md = checkfield(md,'fieldname','friction.C','timeseries',1,'NaN',1,'Inf',1,'>=',0.);
+			md = checkfield(md,'fieldname','friction.K','NaN',1,'Inf',1,'>',0.);
+			md = checkfield(md,'fieldname','friction.m','NaN',1,'Inf',1,'>',0.,'size',[md.mesh.numberofelements,1]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
+		end % }}}
+		function disp(self) % {{{
+			%See Zoet and Iverson 2020 or Choi et al., 2022 
+			disp('Regularized Couloub friction law 2  parameters:');
+			disp('   Regularized Couloub friction law reads:');
+			disp('                       C N |u|^(1/m)         ');
+			disp('      tau_b = -  ____________________________');
+			disp('                        (|u| + (K*N)^m)^(1/m)   ');
+			disp(' ');
+			fielddisplay(self,'C','friction coefficient [SI]');
+			fielddisplay(self,'m','m exponent');
+			fielddisplay(self,'K','(K*N)^m to be velocity controling plastic limit');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+			yts=md.constants.yts;
+
+			WriteData(fid,prefix,'name','md.friction.law','data',15,'format','Integer');
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','C','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','K','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2);
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/frictionschoof.py
===================================================================
--- /issm/trunk/src/m/classes/frictionschoof.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionschoof.py	(revision 28013)
@@ -29,5 +29,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         # See Brondex et al. 2019 https://www.the-cryosphere.net/13/177/2019/
@@ -45,9 +45,9 @@
         s += "{}\n".format(fielddisplay(self, 'effective_pressure_limit', 'fNeff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)'))
         return s
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         self.effective_pressure_limit = 0
         return self
-    #}}}
+    # }}}
     def extrude(self, md):  # {{{
         self.C = project3d(md, 'vector', self.C, 'type', 'node')
@@ -57,5 +57,5 @@
             self.effective_pressure = project3d(md, 'vector', self.effective_pressure, 'type', 'node', 'layer', 1)
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         # Early return
Index: /issm/trunk/src/m/classes/frictionshakti.py
===================================================================
--- /issm/trunk/src/m/classes/frictionshakti.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionshakti.py	(revision 28013)
@@ -23,5 +23,5 @@
         elif nargs == 1:
             self = structtoobj(self, args[0])
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -30,14 +30,14 @@
         s += '{}\n'.format(fielddisplay(self, 'coefficient', 'friction coefficient [SI]'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.coefficient = project3d(md, 'vector', self.coefficient, 'type', 'node', 'layer', 1)
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/frictionwaterlayer.py
===================================================================
--- /issm/trunk/src/m/classes/frictionwaterlayer.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionwaterlayer.py	(revision 28013)
@@ -27,5 +27,5 @@
         elif nargs == 1:
             self = structtoobj(self, args[0])
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -37,9 +37,9 @@
         s = "{}\n".format(fielddisplay(self, 'water_layer', 'water thickness at the base of the ice (m)'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  #{{{
@@ -70,3 +70,3 @@
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'water_layer', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/frictionweertman.m
===================================================================
--- /issm/trunk/src/m/classes/frictionweertman.m	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionweertman.m	(revision 28013)
@@ -8,4 +8,5 @@
 		C = NaN;
 		m = NaN;
+		linearize  = 0;
 	end
    methods (Static)
@@ -24,8 +25,11 @@
 		end % }}}
 		function self = extrude(self,md) % {{{
-			md.friction.C=project3d(md,'vector',md.friction.C,'type','node','layer',1);
-			md.friction.m=project3d(md,'vector',md.friction.m,'type','element');
+			disp('-------------- file: frictionweertman.m line: 27'); 
+			self.C=project3d(md,'vector',self.C,'type','node','layer',1);
+			self.m=project3d(md,'vector',self.m,'type','element');
 		end % }}}
 		function self = setdefaultparameters(self) % {{{
+
+			self.linearize = 0;
 
 		end % }}}
@@ -36,4 +40,5 @@
 			md = checkfield(md,'fieldname','friction.C','timeseries',1,'NaN',1,'Inf',1);
 			md = checkfield(md,'fieldname','friction.m','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
+			md = checkfield(md,'fieldname','friction.linearize','numel',[1],'values',[0:2]);
 		end % }}}
 		function disp(self) % {{{
@@ -47,4 +52,5 @@
 			fielddisplay(self,'C','friction coefficient [SI]');
 			fielddisplay(self,'m','m exponent');
+			fielddisplay(self,'linearize','0: not linearized, 1: interpolated linearly, 2: constant per element (default is 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -54,6 +60,5 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','C','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2);
-			
-
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','linearize','format','Integer');
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/frictionweertman.py
===================================================================
--- /issm/trunk/src/m/classes/frictionweertman.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frictionweertman.py	(revision 28013)
@@ -15,9 +15,10 @@
         self.C = float('NaN')
         self.m = float('NaN')
+        self.linearize = 0
 
     #set defaults
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -26,10 +27,12 @@
         string = "%s\n%s" % (string, fielddisplay(self, "C", "friction coefficient [SI]"))
         string = "%s\n%s" % (string, fielddisplay(self, "m", "m exponent"))
+        string = "%s\n%s" % (string, fielddisplay(self, "linearize", "0: not linearized, 1: interpolated linearly, 2: constant per element (default is 0)"))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
+        self.linearize = 0
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -41,4 +44,5 @@
         md = checkfield(md, 'fieldname', 'friction.C', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
         md = checkfield(md, 'fieldname', 'friction.m', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
+        md = checkfield(md, 'fieldname', 'friction.linearize', 'numel', [1], 'values', [0, 1, 2])
 
         return md
@@ -49,3 +53,4 @@
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'C', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'm', 'format', 'DoubleMat', 'mattype', 2)
+        WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'linearize', 'format', 'Integer')
     # }}}
Index: /issm/trunk/src/m/classes/frontalforcings.m
===================================================================
--- /issm/trunk/src/m/classes/frontalforcings.m	(revision 28012)
+++ /issm/trunk/src/m/classes/frontalforcings.m	(revision 28013)
@@ -1,3 +1,3 @@
-%FRONTAL FORCINGS class definition
+%FRONTALFORCINGS class definition
 %
 %   Usage:
Index: /issm/trunk/src/m/classes/frontalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/frontalforcings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frontalforcings.py	(revision 28013)
@@ -1,38 +1,50 @@
+import numpy as np
+
+from checkfield import checkfield
 from fielddisplay import fielddisplay
 from project3d import project3d
-from checkfield import checkfield
 from WriteData import WriteData
 
 
 class frontalforcings(object):
-    """
-    FRONTAL FORCINGS class definition
+    """frontalforcings class definition
 
-       Usage:
-          frontalforcings = frontalforcings()
+    Usage:
+        frontalforcings = frontalforcings()
     """
 
-    def __init__(self):  # {{{
-        self.meltingrate = float('NaN')
+    def __init__(self, *args):  # {{{
+        self.meltingrate = np.nan
+        self.ablationrate = np.nan
 
-    #set defaults
-        self.setdefaultparameters()
-    #}}}
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
 
     def __repr__(self):  # {{{
-        string = '   Frontalforcings parameters:'
-        string = "%s\n%s" % (string, fielddisplay(self, 'meltingrate', 'melting rate at given location [m / a]'))
+        s = '   Frontalforcings parameters:'
+        s += '{}\n'.format(fielddisplay(self, 'meltingrate', 'melting rate at given location [m/a]'))
+        s += '{}\n'.format(fielddisplay(self, 'ablationrate', 'frontal ablation rate at given location [m/a], it contains both calving and melting'))
 
-        return string
-    #}}}
+        return s
+    # }}}
 
     def extrude(self, md):  # {{{
         self.meltingrate = project3d(md, 'vector', self.meltingrate, 'type', 'node')
+        self.ablationrate = project3d(md, 'vector', self.ablationrate, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
+        self.meltingrate = np.nan
+        self.ablationrate = np.nan
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -42,4 +54,6 @@
 
         md = checkfield(md, 'fieldname', 'frontalforcings.meltingrate', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '>=', 0)
+        if not np.isnan(md.frontalforcings.ablationrate):
+            md = checkfield(md, 'fieldname', 'frontalforcings.ablationrate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
         return md
     # }}}
@@ -49,3 +63,5 @@
         WriteData(fid, prefix, 'name', 'md.frontalforcings.parameterization', 'data', 1, 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'meltingrate', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts, 'scale', 1. / yts)
+        if not np.isnan(md.frontalforcings.ablationrate):
+            WriteData(fid, prefix, 'object', self, 'fieldname', 'ablationrate', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts, 'scale', 1. / yts)
     # }}}
Index: /issm/trunk/src/m/classes/frontalforcingsrignot.py
===================================================================
--- /issm/trunk/src/m/classes/frontalforcingsrignot.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frontalforcingsrignot.py	(revision 28013)
@@ -26,5 +26,5 @@
             error('constructor not supported')
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -35,7 +35,7 @@
         s += '{}\n'.format(fielddisplay(self, 'thermalforcing', 'thermal forcing [∘C]'))
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): # {{{
+    def setdefaultparameters(self):  # {{{
         self.basin_id = np.nan
         self.num_basins = 0
@@ -44,9 +44,9 @@
 
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/frontalforcingsrignotarma.m
===================================================================
--- /issm/trunk/src/m/classes/frontalforcingsrignotarma.m	(revision 28012)
+++ /issm/trunk/src/m/classes/frontalforcingsrignotarma.m	(revision 28013)
@@ -22,4 +22,15 @@
 		basin_id                 = NaN;
 		subglacial_discharge     = NaN;
+		isdischargearma          = 0;
+		sd_ar_order              = 0;
+		sd_ma_order              = 0;
+		sd_arma_timestep         = 0;
+		sd_arlag_coefs           = NaN;
+		sd_malag_coefs           = NaN;
+		sd_monthlyfrac           = NaN;
+		sd_num_breaks            = 0;
+		sd_num_params            = 0;
+		sd_polynomialparams      = NaN;
+		sd_datebreaks            = NaN;
 	end
 	methods
@@ -66,5 +77,4 @@
          md = checkfield(md,'fieldname','frontalforcings.num_breaks','numel',1,'NaN',1,'Inf',1,'>=',0);
          md = checkfield(md,'fieldname','frontalforcings.basin_id','Inf',1,'>=',0,'<=',md.frontalforcings.num_basins,'size',[md.mesh.numberofelements 1]);
-         md = checkfield(md,'fieldname','frontalforcings.subglacial_discharge','>=',0,'NaN',1,'Inf',1,'timeseries',1);
          if(nbas>1 && nbrk>=1 && nprm>1)
             md = checkfield(md,'fieldname','frontalforcings.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1,nprm],'numel',nbas*(nbrk+1)*nprm);
@@ -127,4 +137,41 @@
 		   end
 
+			%%% Checking subglacial discharge %%%
+			md = checkfield(md,'fieldname','frontalforcings.isdischargearma','values',[0 1]);
+			if(~self.isdischargearma)
+				md = checkfield(md,'fieldname','frontalforcings.subglacial_discharge','>=',0,'NaN',1,'Inf',1,'timeseries',1);
+			else
+				sdnbrk  = md.frontalforcings.sd_num_breaks; 
+				sdnprm  = md.frontalforcings.sd_num_params;
+				md = checkfield(md,'fieldname','frontalforcings.sd_ar_order','numel',1,'NaN',1,'Inf',1,'>=',0);
+				md = checkfield(md,'fieldname','frontalforcings.sd_ma_order','numel',1,'NaN',1,'Inf',1,'>=',0);
+         	md = checkfield(md,'fieldname','frontalforcings.sd_arma_timestep','numel',1,'NaN',1,'Inf',1,'>=',max(1,md.timestepping.time_step)); %ARMA time step cannot be finer than ISSM timestep and annual timestep
+         	md = checkfield(md,'fieldname','frontalforcings.sd_arlag_coefs','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,md.frontalforcings.sd_ar_order]);
+         	md = checkfield(md,'fieldname','frontalforcings.sd_malag_coefs','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,md.frontalforcings.sd_ma_order]);
+         	md = checkfield(md,'fieldname','frontalforcings.sd_monthlyfrac','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,12]);
+				if(any(abs(sum(self.sd_monthlyfrac,2)-1)>1e-3))
+					error('the 12 entries for each basin of md.frontalforcings.sd_monthlyfrac should add up to 1');
+				end
+	         md = checkfield(md,'fieldname','frontalforcings.sd_num_params','numel',1,'NaN',1,'Inf',1,'>',0);
+		      md = checkfield(md,'fieldname','frontalforcings.sd_num_breaks','numel',1,'NaN',1,'Inf',1,'>=',0);
+	         if(nbas>1 && sdnbrk>=1 && sdnprm>1)
+	            md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnbrk+1,sdnprm],'numel',nbas*(sdnbrk+1)*sdnprm);
+	         elseif(nbas==1)
+	            md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[sdnprm,sdnbrk+1],'numel',nbas*(sdnbrk+1)*sdnprm);
+	         elseif(sdnbrk==0)
+	            md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnprm],'numel',nbas*(sdnbrk+1)*sdnprm);
+	         elseif(sdnprm==1)
+	            md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnbrk+1],'numel',nbas*(sdnbrk+1)*sdnprm);
+	         end
+	         if(sdnbrk>0)
+	            md = checkfield(md,'fieldname','frontalforcings.sd_datebreaks','NaN',1,'Inf',1,'size',[nbas,sdnbrk]);
+	         elseif(numel(md.frontalforcings.sd_datebreaks)==0 || all(isnan(md.frontalforcings.sd_datebreaks)))
+	            ;
+	         else
+	            error('md.frontalforcings.sd_num_breaks is 0 but md.frontalforcings.sd_datebreaks is not empty');
+	         end
+
+  			end
+
       end % }}}
 		function disp(self) % {{{
@@ -132,5 +179,4 @@
 			fielddisplay(self,'num_basins','number of different basins');
          fielddisplay(self,'basin_id','basin number assigned to each element [unitless]');
-         fielddisplay(self,'subglacial_discharge','sum of subglacial discharge for each basin [m/d]');
 			fielddisplay(self,'num_breaks','number of different breakpoints in the piecewise-polynomial (separating num_breaks+1 periods)');
 			fielddisplay(self,'num_params','number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)');
@@ -147,5 +193,18 @@
          fielddisplay(self,'monthlyvals_numbreaks','number of breakpoints in the piecewise-linear functions of monthly values');
          fielddisplay(self,'monthlyvals_datebreaks','dates at which the breakpoints in the piecewise-linear monthly values occur (1 row per basin)');
-		end % }}}
+         fielddisplay(self,'isdischargearma','whether an ARMA model is also used for the subglacial discharge (if 0: subglacial_discharge is used, if 1: sd_ parameters are used)');
+         fielddisplay(self,'subglacial_discharge','sum of subglacial discharge for each basin [m^3/d]');
+			disp(sprintf('%51s  if isdischargearma is 1, sd_variables are used (sd arma model variable: sum of subglacial discharge for each basin [m^3/d])',' '));
+         fielddisplay(self,'sd_ar_order','order of the subglacial discharge autoregressive model [unitless]');
+         fielddisplay(self,'sd_ma_order','order of the subglacial discharge moving-average model [unitless]');
+         fielddisplay(self,'sd_arma_timestep','time resolution of the subglacial discharge autoregressive model [yr]');
+         fielddisplay(self,'sd_arlag_coefs','basin-specific vectors of AR lag coefficients for subglacial discharge [unitless]');
+         fielddisplay(self,'sd_malag_coefs','basin-specific vectors of MA lag coefficients for subglacial discharge [unitless]');
+         fielddisplay(self,'sd_monthlyfrac','basin-specific vectors of 12 values with fraction of the annual discharge occuring every month [unitless]');
+			fielddisplay(self,'sd_num_params','number of different parameters in the subglacial discharge piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)');
+			fielddisplay(self,'sd_num_breaks','number of different breakpoints in the subglacial discharge piecewise-polynomial (separating sd_num_breaks+1 periods)');
+         fielddisplay(self,'sd_datebreaks','dates at which the breakpoints in the piecewise polynomial occur (1 row per basin) [yr]');
+         fielddisplay(self,'sd_polynomialparams','coefficients for the sd_polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders');
+ 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
 			yts=md.constants.yts;
@@ -228,9 +287,54 @@
 			end
 
+			%%% Deal with the subglacial discharge polynomial %%%
+			if(self.isdischargearma)
+				sdnprm  = md.frontalforcings.sd_num_params;
+				sdnper  = md.frontalforcings.sd_num_breaks+1;
+				sdpolyparamsScaled   = md.frontalforcings.sd_polynomialparams;
+	         sdpolyparams2dScaled = zeros(nbas,sdnper*sdnprm);
+	         if(sdnprm>1)
+	            % Case 3D %
+	            if(nbas>1 && sdnper>1)
+	               for(ii=[1:sdnprm])
+	                  sdpolyparamsScaled(:,:,ii) = sdpolyparamsScaled(:,:,ii)*((1/yts)^(ii-1));
+	               end
+	               % Fit in 2D array %
+	               for(ii=[1:sdnprm])
+	                  jj = 1+(ii-1)*sdnper;
+	                  sdpolyparams2dScaled(:,jj:jj+sdnper-1) = sdpolyparamsScaled(:,:,ii);
+	               end
+	            % Case 2D and higher-order params at increasing row index %
+	            elseif(nbas==1)
+	               for(ii=[1:sdnprm])
+	                  sdpolyparamsScaled(ii,:) = sdpolyparamsScaled(ii,:)*((1/yts)^(ii-1));
+	               end
+	               % Fit in row array %
+	               for(ii=[1:sdnprm])
+	                  jj = 1+(ii-1)*sdnper;
+	                  sdpolyparams2dScaled(1,jj:jj+sdnper-1) = sdpolyparamsScaled(ii,:);
+	               end
+	            % Case 2D and higher-order params at incrasing column index %
+	            elseif(sdnper==1)
+	               for(ii=[1:sdnprm])
+	                  sdpolyparamsScaled(:,ii) = sdpolyparamsScaled(:,ii)*((1/yts)^(ii-1));
+	               end
+	               % 2D array is already in correct format %
+						sdpolyparams2dScaled = sdpolyparamsScaled;
+	            end
+	         else
+					% 2D array is already in correct format and no need for scaling %
+               sdpolyparams2dScaled = sdpolyparamsScaled;
+	         end
+	         if(sdnper==1) %a single period (no break date)
+	            sd_dbreaks = zeros(nbas,1); %dummy
+	         else
+	            sd_dbreaks = md.frontalforcings.sd_datebreaks;
+	         end
+			end
+
 			WriteData(fid,prefix,'name','md.frontalforcings.parameterization','data',3,'format','Integer');
 			WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','num_basins','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','num_breaks','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','num_params','format','Integer');
-			WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','subglacial_discharge','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
          WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','ar_order','format','Integer');
          WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','ma_order','format','Integer');
@@ -245,4 +349,19 @@
          WriteData(fid,prefix,'data',interceptsM,'name','md.frontalforcings.monthlyvals_intercepts','format','DoubleMat');
          WriteData(fid,prefix,'data',trendsM,'name','md.frontalforcings.monthlyvals_trends','format','DoubleMat','scale',1/yts);
+			WriteData(fid,prefix,'object',self,'fieldname','isdischargearma','format','Boolean');
+			if(self.isdischargearma==0)
+				WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','subglacial_discharge','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			else
+				WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_num_breaks','format','Integer');
+				WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_num_params','format','Integer');
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_ar_order','format','Integer');
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_ma_order','format','Integer');
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_arma_timestep','format','Double','scale',yts);
+				WriteData(fid,prefix,'data',sdpolyparams2dScaled,'name','md.frontalforcings.sd_polynomialparams','format','DoubleMat');
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_arlag_coefs','format','DoubleMat','name','md.frontalforcings.sd_arlag_coefs','yts',yts);
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_malag_coefs','format','DoubleMat','name','md.frontalforcings.sd_malag_coefs','yts',yts);
+	         WriteData(fid,prefix,'data',sd_dbreaks,'name','md.frontalforcings.sd_datebreaks','format','DoubleMat','scale',yts);
+	         WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_monthlyfrac','format','DoubleMat','name','md.frontalforcings.sd_monthlyfrac','yts',yts);
+			end
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/frontalforcingsrignotarma.py
===================================================================
--- /issm/trunk/src/m/classes/frontalforcingsrignotarma.py	(revision 28012)
+++ /issm/trunk/src/m/classes/frontalforcingsrignotarma.py	(revision 28013)
@@ -31,4 +31,15 @@
         self.basin_id = np.nan
         self.subglacial_discharge = np.nan
+        self.isdischargearma = 0
+        self.sd_ar_order = 0
+        self.sd_ma_order = 0
+        self.sd_arma_timestep = 0
+        self.sd_arlag_coefs = np.nan
+        self.sd_malag_coefs = np.nan
+        self.sd_monthlyfrac = np.nan
+        self.sd_num_breaks  = 0
+        self.sd_num_params  = 0
+        self.sd_polynomialparams = np.nan
+        self.sd_datebreaks = np.nan
 
         if len(args) == 0:
@@ -41,5 +52,4 @@
         s += '{}\n'.format(fielddisplay(self, 'num_basins', 'number of different basins [unitless]'))
         s += '{}\n'.format(fielddisplay(self, 'basin_id', 'basin number assigned to each element [unitless]'))
-        s += '{}\n'.format(fielddisplay(self, 'subglacial_discharge', 'sum of subglacial discharge for each basin [m/d]'))
         s += '{}\n'.format(fielddisplay(self, 'num_breaks', 'number of different breakpoints in the piecewise-polynomial (separating num_breaks+1 periods)'))
         s += '{}\n'.format(fielddisplay(self, 'num_params', 'number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)'))
@@ -51,6 +61,18 @@
         s += '{}\n'.format(fielddisplay(self, 'arlag_coefs', 'basin-specific vectors of AR lag coefficients [unitless]'))
         s += '{}\n'.format(fielddisplay(self, 'malag_coefs', 'basin-specific vectors of MA lag coefficients [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'isdischargearma','whether an ARMA model is also used for the subglacial discharge (if 0: subglacial_discharge is used, if 1: sd_ parameters are used)'))
+        s += '{}\n'.format(fielddisplay(self, 'subglacial_discharge', 'sum of subglacial discharge for each basin [m/d]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_ar_order','order of the subglacial discharge autoregressive model [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_ma_order','order of the subglacial discharge moving-average model [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_arma_timestep','time resolution of the subglacial discharge autoregressive model [yr]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_arlag_coefs','basin-specific vectors of AR lag coefficients for subglacial discharge [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_malag_coefs','basin-specific vectors of MA lag coefficients for subglacial discharge [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_monthlyfrac','basin-specific vectors of 12 values with fraction of the annual discharge occuring every month [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_num_params','number of different parameters in the subglacial discharge piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_num_breaks','number of different breakpoints in the subglacial discharge piecewise-polynomial (separating sd_num_breaks+1 periods)'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_datebreaks','dates at which the breakpoints in the piecewise polynomial occur (1 row per basin) [yr]'))
+        s += '{}\n'.format(fielddisplay(self, 'sd_polynomialparams','coefficients for the sd_polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -61,5 +83,5 @@
         self.ma_order = 0.0  # Moving-average model of order 0
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -130,4 +152,38 @@
             raise RuntimeError('md.frontalforcings.monthlyvals_numbreaks is 0 but md.frontalforcings.monthlyvals_datebreaks is not empty')
 
+        ### Chacking subglacial discharge ###
+        md = checkfield(md, 'fieldname', 'frontalforcings.isdischargearma', 'values', [0, 1])
+        if(self.isdischargearma==0):
+            md = checkfield(md,'fieldname','frontalforcings.subglacial_discharge','>=',0,'NaN',1,'Inf',1,'timeseries',1)
+        else:
+            sdnbrk  = md.frontalforcings.sd_num_breaks
+            sdnprm  = md.frontalforcings.sd_num_params
+            md = checkfield(md,'fieldname','frontalforcings.sd_ar_order','numel',1,'NaN',1,'Inf',1,'>=',0)
+            md = checkfield(md,'fieldname','frontalforcings.sd_ma_order','numel',1,'NaN',1,'Inf',1,'>=',0)
+            md = checkfield(md,'fieldname','frontalforcings.sd_arma_timestep','numel',1,'NaN',1,'Inf',1,'>=',max(1,md.timestepping.time_step)) #ARMA time step cannot be finer than ISSM timestep and annual timestep
+            md = checkfield(md,'fieldname','frontalforcings.sd_arlag_coefs','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,md.frontalforcings.sd_ar_order])
+            md = checkfield(md,'fieldname','frontalforcings.sd_malag_coefs','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,md.frontalforcings.sd_ma_order])
+            md = checkfield(md,'fieldname','frontalforcings.sd_monthlyfrac','NaN',1,'Inf',1,'size',[md.frontalforcings.num_basins,12])
+            if(np.any(abs(np.sum(self.sd_monthlyfrac,axis=1)-1)>1e-3)):
+                raise RuntimeError('the 12 entries for each basin of md.frontalforcings.sd_monthlyfrac should add up to 1')
+            md = checkfield(md,'fieldname','frontalforcings.sd_num_params','numel',1,'NaN',1,'Inf',1,'>',0)
+            md = checkfield(md,'fieldname','frontalforcings.sd_num_breaks','numel',1,'NaN',1,'Inf',1,'>=',0)
+            if len(np.shape(self.sd_polynomialparams)) == 1:
+                self.sd_polynomialparams = np.array([[self.sd_polynomialparams]])
+            if(nbas>1 and sdnbrk>=1 and sdnprm>1):
+                md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnbrk+1,sdnprm],'numel',nbas*(sdnbrk+1)*sdnprm)
+            elif(nbas==1):
+                md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nprm,nbrk+1],'numel',nbas*(sdnbrk+1)*sdnprm)
+            elif(sdnbrk==0):
+                md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnprm],'numel',nbas*(sdnbrk+1)*sdnprm)
+            elif(sdnprm==1):
+                md = checkfield(md,'fieldname','frontalforcings.sd_polynomialparams','NaN',1,'Inf',1,'size',[nbas,sdnbrk],'numel',nbas*(sdnbrk+1)*sdnprm)
+            if(sdnbrk>0):
+                md = checkfield(md, 'fieldname', 'frontalforcings.sd_datebreaks', 'NaN', 1, 'Inf', 1, 'size', [nbas,sdnbrk])
+            elif(np.size(md.frontalforcings.sd_datebreaks)==0 or np.all(np.isnan(md.frontalforcings.sd_datebreaks))):
+                pass
+            else:
+                raise RuntimeError('md.frontalforcings.sd_num_breaks is 0 but md.frontalforcings.sd_datebreaks is not empty')
+
         return md
     # }}}
@@ -201,9 +257,46 @@
             dMbreaks = np.copy(md.frontalforcings.monthlyvals_datebreaks)
 
+        ### Deal with the subglacial discharge polynomial ###
+        if(self.isdischargearma):
+            sdnprm  = md.frontalforcings.sd_num_params
+            sdnper  = md.frontalforcings.sd_num_breaks+1
+            sdpolyparamsScaled   = np.copy(md.frontalforcings.sd_polynomialparams)
+            sdpolyparams2dScaled = np.zeros((nbas,sdnper*sdnprm))
+            if(sdnprm>1):
+                # Case 3D #
+                if(nbas>1 and sdnper>1):
+                    for ii in range(sdnprm):
+                        sdpolyparamsScaled[:,:,ii] = sdpolyparamsScaled[:,:,ii]*(1/yts)**ii
+                    # Fit in 2D array #
+                    for ii in range(sdnprm):
+                        sdpolyparams2dScaled[:,ii*sdnper:(ii+1)*sdnper] = 1*sdpolyparamsScaled[:,:,ii]
+                # Case 2D and higher-order params at increasing row index #
+                elif(nbas==1):
+                    for ii in range(sdnprm):
+                        sdpolyparamsScaled[ii,:] = sdpolyparamsScaled[ii,:]*(1/yts)**ii
+                    # Fit in row array #
+                    for ii in range(nprm):
+                        sdpolyparams2dScaled[0,ii*sdnper:(ii+1)*sdnper] = 1*sdpolyparamsScaled[ii,:]
+                # Case 2D and higher-order params at incrasing column index #
+                elif(sdnper==1):
+                    for ii in range(sdnprm):
+                        sdpolyparamsScaled[:,ii] = sdpolyparamsScaled[:,ii]*(1/yts)**ii
+                    # 2D array is already in correct format #
+                    sdpolyparams2dScaled = np.copy(sdpolyparamsScaled)
+            else:
+                # 2D array is already in correct format and no need for scaling #
+                sdpolyparams2dScaled = np.copy(sdpolyparamsScaled)
+            if(sdnper==1):
+                sd_dbreaks = np.zeros((nbas,1))
+            else:
+                sd_dbreaks = np.copy(md.frontalforcings.sd_datebreaks)
+
+
+
+
         WriteData(fid, prefix, 'name', 'md.frontalforcings.parameterization', 'data', 3, 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'num_basins', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'num_breaks', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'num_params', 'format', 'Integer')
-        WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'subglacial_discharge', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'ar_order', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'ma_order', 'format', 'Integer')
@@ -218,5 +311,19 @@
         WriteData(fid,prefix,'data',interceptsM,'name','md.frontalforcings.monthlyvals_intercepts','format','DoubleMat')
         WriteData(fid,prefix,'data',trendsM,'name','md.frontalforcings.monthlyvals_trends','format','DoubleMat','scale',1/yts)
-    # }}}
-
-
+        WriteData(fid,prefix,'object',self,'fieldname','isdischargearma','format','Boolean')
+        if(self.isdischargearma==0):
+            WriteData(fid, prefix, 'object', self, 'class', 'frontalforcings', 'fieldname', 'subglacial_discharge', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        else:
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_num_breaks','format','Integer')
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_num_params','format','Integer')
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_ar_order','format','Integer')
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_ma_order','format','Integer')
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_arma_timestep','format','Double','scale',yts)
+            WriteData(fid,prefix,'data',sdpolyparams2dScaled,'name','md.frontalforcings.sd_polynomialparams','format','DoubleMat')
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_arlag_coefs','format','DoubleMat','name','md.frontalforcings.sd_arlag_coefs','yts',yts)
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_malag_coefs','format','DoubleMat','name','md.frontalforcings.sd_malag_coefs','yts',yts)
+            WriteData(fid,prefix,'data',sd_dbreaks,'name','md.frontalforcings.sd_datebreaks','format','DoubleMat','scale',yts)
+            WriteData(fid,prefix,'object',self,'class','frontalforcings','fieldname','sd_monthlyfrac','format','DoubleMat','name','md.frontalforcings.sd_monthlyfrac','yts',yts)
+    # }}}
+
+
Index: /issm/trunk/src/m/classes/giacaron.py
===================================================================
--- /issm/trunk/src/m/classes/giacaron.py	(revision 28012)
+++ /issm/trunk/src/m/classes/giacaron.py	(revision 28013)
@@ -16,5 +16,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         #Physical constants
         self.gravitational_constant         = np.nan
@@ -51,3 +51,3 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/groundingline.py
===================================================================
--- /issm/trunk/src/m/classes/groundingline.py	(revision 28012)
+++ /issm/trunk/src/m/classes/groundingline.py	(revision 28013)
@@ -37,5 +37,5 @@
         return ['Surface', 'Base','MaskOceanLevelset']
 
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
Index: /issm/trunk/src/m/classes/hydrologyarmapw.m
===================================================================
--- /issm/trunk/src/m/classes/hydrologyarmapw.m	(revision 28013)
+++ /issm/trunk/src/m/classes/hydrologyarmapw.m	(revision 28013)
@@ -0,0 +1,190 @@
+%HYDROLOGYARMAPW class definition
+%
+%   Usage:
+%      hydrologyarmapw=hydrologyarmapw();
+
+classdef hydrologyarmapw
+	properties (SetAccess=public) 
+		num_basins               = 0;
+      num_params               = 0;
+      num_breaks               = 0;
+		basin_id                 = NaN;
+      monthlyfactors           = NaN;
+		polynomialparams         = NaN;
+		ar_order                 = 0;
+      ma_order                 = 0;
+      arma_timestep            = 0;
+		arlag_coefs              = NaN;
+      malag_coefs              = NaN;
+		datebreaks               = NaN;
+	end
+	methods
+		function self = extrude(self,md) % {{{
+			self.basin_id    = project3d(md,'vector',self.basin_id,'type','element');
+		end % }}}
+		function self = hydrologyarmapw(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					self=structtoobj(self,varargin{1});
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function list = defaultoutputs(self,md) % {{{
+			list = {'FrictionWaterPressure'};
+		end % }}}    
+		function self = setdefaultparameters(self) % {{{
+			%No default parameters
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			%Early return
+			if ~ismember('HydrologyArmapwAnalysis',analyses)
+				return;
+			end
+
+			nbas  = md.hydrology.num_basins;
+         nprm  = md.hydrology.num_params;
+         nbrk  = md.hydrology.num_breaks;
+
+			md = checkfield(md,'fieldname','hydrology.num_basins','numel',1,'NaN',1,'Inf',1,'>',0);
+			md = checkfield(md,'fieldname','hydrology.num_breaks','numel',1,'NaN',1,'Inf',1,'>=',0);
+			md = checkfield(md,'fieldname','hydrology.num_params','numel',1,'NaN',1,'Inf',1,'>',0);
+			md = checkfield(md,'fieldname','hydrology.basin_id','Inf',1,'>=',0,'<=',md.hydrology.num_basins,'size',[md.mesh.numberofelements,1]);
+			
+			% Check if monthly factors are provided %
+			if(numel(md.hydrology.monthlyfactors)>1 || ~isnan(md.hydrology.monthlyfactors))
+				md = checkfield(md,'fieldname','hydrology.monthlyfactors','NaN',1,'Inf',1,'size',[md.hydrology.num_basins,12]);
+				isseasonality = false;
+				for(rr=[1:md.hydrology.num_basins])
+					for(cc=[1:12])
+						if(md.hydrology.monthlyfactors(rr,cc)~=1)
+							isseasonality = true;
+						end
+					end
+				end
+				if(isseasonality && md.timestepping.time_step>=1)
+					error('md.timestepping.time_step is too large to use hydrologyarmapw() with monthlyfactors');
+				end
+			end
+
+			if(nbas>1 && nbrk>=1 && nprm>1)
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1,nprm],'numel',nbas*(nbrk+1)*nprm);
+         elseif(nbas==1)
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nprm,nbrk+1],'numel',nbas*(nbrk+1)*nprm);
+         elseif(nbrk==0)
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nprm],'numel',nbas*(nbrk+1)*nprm);
+         elseif(nprm==1)
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1],'numel',nbas*(nbrk+1)*nprm);
+         end
+
+			md = checkfield(md,'fieldname','hydrology.ar_order','numel',1,'NaN',1,'Inf',1,'>=',0);
+         md = checkfield(md,'fieldname','hydrology.ma_order','numel',1,'NaN',1,'Inf',1,'>=',0);
+         md = checkfield(md,'fieldname','hydrology.arma_timestep','numel',1,'NaN',1,'Inf',1,'>=',md.timestepping.time_step); %ARMA time step cannot be finer than ISSM timestep
+         md = checkfield(md,'fieldname','hydrology.arlag_coefs','NaN',1,'Inf',1,'size',[md.hydrology.num_basins,md.hydrology.ar_order]);
+         md = checkfield(md,'fieldname','hydrology.malag_coefs','NaN',1,'Inf',1,'size',[md.hydrology.num_basins,md.hydrology.ma_order]);
+
+			if(nbrk>0)
+            md = checkfield(md,'fieldname','hydrology.datebreaks','NaN',1,'Inf',1,'size',[nbas,nbrk]);
+         elseif(numel(md.hydrology.datebreaks)==0 || all(isnan(md.hydrology.datebreaks)))
+            ;
+         else
+            error('md.hydrology.num_breaks is 0 but md.hydrology.datebreaks is not empty');
+         end
+
+		end % }}}
+		function disp(self) % {{{
+			disp(sprintf('   hydrologyarmapw'));
+			disp(sprintf('   subglacial water pressure is calculated as Pw=monthlyfactor[month]*(rho_water*g*bed+Pw_arma) where Pw_arma is the perturbation calculated as an ARMA process'));
+			disp(sprintf('   polynomialparams includes the constant, linear trend, quadratic trend, etc. of the ARMA process'));
+			disp(sprintf('   arlag_coefs and malag_coefs include the coefficients of the ARMA process'));
+			fielddisplay(self,'num_basins','number of different basins');
+			fielddisplay(self,'basin_id','basin number assigned to each element');
+			fielddisplay(self,'num_breaks','number of different breakpoints in the piecewise-polynomial (separating num_breaks+1 periods)');
+         fielddisplay(self,'num_params','number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)');
+			fielddisplay(self,'monthlyfactors','monthly multiplicative factor on the subglacial water pressure, specified per basin (size:[num_basins,12])');
+			fielddisplay(self,'polynomialparams','coefficients for the polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders');
+         disp(sprintf('%51s  ex: polyparams=cat(3,intercepts,trendlinearcoefs,trendquadraticcoefs)',' '));
+         fielddisplay(self,'datebreaks','dates at which the breakpoints in the piecewise polynomial occur (1 row per basin) [yr]');
+         fielddisplay(self,'ar_order','order of the autoregressive model [unitless]');
+         fielddisplay(self,'ma_order','order of the moving-average model [unitless]');
+         fielddisplay(self,'arma_timestep','time resolution of the autoregressive model [yr]');
+         fielddisplay(self,'arlag_coefs','basin-specific vectors of AR lag coefficients [unitless]');
+         fielddisplay(self,'malag_coefs','basin-specific vectors of MA lag coefficients [unitless]');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+
+			yts=md.constants.yts;
+         %%% Deal with polynomial %%%
+         nbas  = md.hydrology.num_basins;
+         nprm  = md.hydrology.num_params;
+         nper  = md.hydrology.num_breaks+1;
+         % Scale the parameters %
+         polyparamsScaled   = md.hydrology.polynomialparams;
+         polyparams2dScaled = zeros(nbas,nper*nprm);
+         if(nprm>1)
+            % Case 3D %
+            if(nbas>1 && nper>1)
+               for(ii=[1:nprm])
+                  polyparamsScaled(:,:,ii) = polyparamsScaled(:,:,ii)*((1/yts)^(ii-1));
+               end
+               % Fit in 2D array %
+               for(ii=[1:nprm])
+                  jj = 1+(ii-1)*nper;
+                  polyparams2dScaled(:,jj:jj+nper-1) = polyparamsScaled(:,:,ii);
+               end
+            % Case 2D and higher-order params at increasing row index %
+            elseif(nbas==1)
+               for(ii=[1:nprm])
+                  polyparamsScaled(ii,:) = polyparamsScaled(ii,:)*((1/yts)^(ii-1));
+               end
+               % Fit in row array %
+               for(ii=[1:nprm])
+                  jj = 1+(ii-1)*nper;
+                  polyparams2dScaled(1,jj:jj+nper-1) = polyparamsScaled(ii,:);
+               end
+            % Case 2D and higher-order params at incrasing column index %
+            elseif(nper==1)
+               for(ii=[1:nprm])
+                  polyparamsScaled(:,ii) = polyparamsScaled(:,ii)*((1/yts)^(ii-1));
+               end
+               % 2D array is already in correct format %
+               polyparams2dScaled = polyparamsScaled;
+            end
+         else
+            % 2D array is already in correct format and no need for scaling %
+            polyparams2dScaled = polyparamsScaled;
+         end
+         if(nper==1) %a single period (no break date)
+            dbreaks = zeros(nbas,1); %dummy
+         else
+            dbreaks = md.hydrology.datebreaks;
+         end
+
+			% If no monthlyfactors provided: set them all to 1 %
+			if(numel(md.hydrology.monthlyfactors)==1)
+				tempmonthlyfactors = ones(nbas,12);
+			else
+				tempmonthlyfactors = md.hydrology.monthlyfactors;
+			end
+
+			WriteData(fid,prefix,'name','md.hydrology.model','data',7,'format','Integer');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','num_basins','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','num_breaks','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','num_params','format','Integer');
+         WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','basin_id','data',self.basin_id-1,'name','md.hydrology.basin_id','format','IntMat','mattype',2); %0-indexed
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','ar_order','format','Integer');
+         WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','ma_order','format','Integer');
+         WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','arma_timestep','format','Double','scale',yts);
+         WriteData(fid,prefix,'data',polyparams2dScaled,'name','md.hydrology.polynomialparams','format','DoubleMat');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','arlag_coefs','format','DoubleMat','name','md.hydrology.arlag_coefs','yts',yts);
+         WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','malag_coefs','format','DoubleMat','name','md.hydrology.malag_coefs','yts',yts);
+			WriteData(fid,prefix,'data',dbreaks,'name','md.hydrology.datebreaks','format','DoubleMat','scale',yts);
+			WriteData(fid,prefix,'data',tempmonthlyfactors,'name','md.hydrology.monthlyfactors','format','DoubleMat');
+			WriteData(fid,prefix,'data',{'FrictionWaterPressure'},'name','md.hydrology.requested_outputs','format','StringArray');
+		end % }}}
+	end
+end
+
Index: /issm/trunk/src/m/classes/hydrologyarmapw.py
===================================================================
--- /issm/trunk/src/m/classes/hydrologyarmapw.py	(revision 28013)
+++ /issm/trunk/src/m/classes/hydrologyarmapw.py	(revision 28013)
@@ -0,0 +1,174 @@
+import numpy as np
+
+from checkfield import *
+from fielddisplay import fielddisplay
+from project3d import *
+from WriteData import *
+from GetAreas import *
+
+class hydrologyarmapw(object):
+    """HYDROLOGYARMAPW class definition
+
+    Usage:
+        hydrologyarmapw = hydrologyarmapw()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.num_basins = 0
+        self.num_params = 0
+        self.num_breaks = 0
+        self.polynomialparams = np.nan
+        self.arma_timestep = 0
+        self.ar_order = 0
+        self.ma_order = 0
+        self.arlag_coefs = np.nan
+        self.malag_coefs = np.nan
+        self.datebreaks = np.nan
+        self.basin_id = np.nan
+        self.monthlyfactors = np.nan
+
+        if len(args) == 0:
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = '   hydrologyarmapw\n'
+        s += 'subglacial water pressure is calculated as Pw=monthlyfactor[month]*(rho_water*g*bed+Pw_arma) where Pw_arma is the perturbation calculated as an ARMA process\n'
+        s += 'polynomialparams includes the constant, linear trend, quadratic trend, etc. of the ARMA process\n'
+        s += 'arlag_coefs and malag_coefs include the coefficients of the ARMA process\n'
+        s += '{}\n'.format(fielddisplay(self, 'num_basins', 'number of different basins [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'basin_id', 'basin number assigned to each element [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'num_breaks', 'number of different breakpoints in the piecewise-polynomial (separating num_breaks+1 periods)'))
+        s += '{}\n'.format(fielddisplay(self, 'num_params', 'number of different parameters in the piecewise-polynomial (1:intercept only, 2:with linear trend, 3:with quadratic trend, etc.)'))
+        s += '{}\n'.format(fielddisplay(self, 'monthlyfactors', 'monthly multiplicative factor on the subglacial water pressure, specified per basin (size:[num_basins,12])'))
+        s += '{}\n'.format(fielddisplay(self, 'polynomialparams', 'coefficients for the polynomial (const,trend,quadratic,etc.),dim1 for basins,dim2 for periods,dim3 for orders, ex: polyparams=cat(num_params,intercepts,trendlinearcoefs,trendquadraticcoefs)'))
+        s += '{}\n'.format(fielddisplay(self, 'datebreaks', 'dates at which the breakpoints in the piecewise polynomial occur (1 row per basin) [yr]'))
+        s += '{}\n'.format(fielddisplay(self, 'ar_order', 'order of the autoregressive model [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'ma_order', 'order of the moving-average model [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'arma_timestep', 'time resolution of the ARMA model [yr]'))
+        s += '{}\n'.format(fielddisplay(self, 'arlag_coefs', 'basin-specific vectors of AR lag coefficients [unitless]'))
+        s += '{}\n'.format(fielddisplay(self, 'malag_coefs', 'basin-specific vectors of MA lag coefficients [unitless]'))
+        return s
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
+        # No default parameters
+        return self # Nothing for now
+    # }}}
+
+    def extrude(self, md):  # {{{
+        self.basin_id = project3d(md,'vector',self.basin_id,'type','element')
+        return self # Nothing for now
+    # }}}
+
+    def defaultoutputs(self, md):  # {{{
+        return ['FrictionWaterPressure']
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        #Early return
+        if 'HydrologyArmapwAnalysis' not in analyses:
+            return md
+
+        nbas = md.hydrology.num_basins
+        nprm = md.hydrology.num_params
+        nbrk = md.hydrology.num_breaks
+        
+        md = checkfield(md, 'fieldname', 'hydrology.num_basins', 'numel', 1, 'NaN', 1, 'Inf', 1, '>', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.num_params', 'numel', 1, 'NaN', 1, 'Inf', 1, '>', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.num_breaks', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.basin_id', 'Inf', 1, '>=', 0, '<=', md.hydrology.num_basins, 'size', [md.mesh.numberofelements])
+
+        # Check if monthlyfactors are provided
+        if(np.size(md.hydrology.monthlyfactors)>1 or np.all(np.isnan(md.hydrology.monthlyfactors))==False):
+            md = checkfield(md,'fieldname','hydrology.monthlyfactors','NaN',1,'Inf',1,'size',[md.hydrology.num_basins,12])
+            if(np.any(md.hydrology.monthlyfactors!=1) and md.timestepping.time_step>=1):
+                raise RuntimeError('md.timestepping.time_step is too large to use hydrologyarmapw() with monthlyfactors')
+
+        if len(np.shape(self.polynomialparams)) == 1:
+            self.polynomialparams = np.array([[self.polynomialparams]])
+        if(nbas>1 and nbrk>=1 and nprm>1):
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1,nprm],'numel',nbas*(nbrk+1)*nprm) 
+        elif(nbas==1):
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nprm,nbrk+1],'numel',nbas*(nbrk+1)*nprm) 
+        elif(nbrk==0):
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nprm],'numel',nbas*(nbrk+1)*nprm)
+        elif(nprm==1):
+            md = checkfield(md,'fieldname','hydrology.polynomialparams','NaN',1,'Inf',1,'size',[nbas,nbrk+1],'numel',nbas*(nbrk+1)*nprm)
+        md = checkfield(md, 'fieldname', 'hydrology.ar_order', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.ma_order', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.arma_timestep', 'numel', 1, 'NaN', 1, 'Inf', 1, '>=', md.timestepping.time_step) # Autoregression time step cannot be finer than ISSM timestep
+        md = checkfield(md, 'fieldname', 'hydrology.arlag_coefs', 'NaN', 1, 'Inf', 1, 'size', [md.hydrology.num_basins, md.hydrology.ar_order])
+        md = checkfield(md, 'fieldname', 'hydrology.malag_coefs', 'NaN', 1, 'Inf', 1, 'size', [md.hydrology.num_basins, md.hydrology.ma_order])
+        if(nbrk>0):
+            md = checkfield(md, 'fieldname', 'hydrology.datebreaks', 'NaN', 1, 'Inf', 1, 'size', [nbas,nbrk])
+        elif(np.size(md.hydrology.datebreaks)==0 or np.all(np.isnan(md.hydrology.datebreaks))):
+            pass
+        else:
+            raise RuntimeError('md.hydrology.num_breaks is 0 but md.hydrology.datebreaks is not empty')
+
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
+        yts = md.constants.yts
+        nbas = md.hydrology.num_basins;
+        nprm = md.hydrology.num_params;
+        nper = md.hydrology.num_breaks+1;
+        # Scale the parameters #
+        polyparamsScaled   = np.copy(md.hydrology.polynomialparams)
+        polyparams2dScaled = np.zeros((nbas,nper*nprm))
+        if(nprm>1):
+            # Case 3D #
+            if(nbas>1 and nper>1):
+                for ii in range(nprm):
+                    polyparamsScaled[:,:,ii] = polyparamsScaled[:,:,ii]*(1/yts)**(ii)
+                # Fit in 2D array #
+                for ii in range(nprm):
+                    polyparams2dScaled[:,ii*nper:(ii+1)*nper] = 1*polyparamsScaled[:,:,ii]
+            # Case 2D and higher-order params at increasing row index #
+            elif(nbas==1):
+                for ii in range(nprm):
+                    polyparamsScaled[ii,:] = polyparamsScaled[ii,:]*(1/yts)**(ii)
+                # Fit in row array #
+                for ii in range(nprm):
+                    polyparams2dScaled[0,ii*nper:(ii+1)*nper] = 1*polyparamsScaled[ii,:]
+            # Case 2D and higher-order params at incrasing column index #
+            elif(nper==1):
+                for ii in range(nprm):
+                    polyparamsScaled[:,ii] = polyparamsScaled[:,ii]*(1/yts)**(ii)
+                # 2D array is already in correct format #
+                polyparams2dScaled = np.copy(polyparamsScaled)
+        else:
+            # 2D array is already in correct format and no need for scaling#
+            polyparams2dScaled = np.copy(polyparamsScaled)
+        
+        if(nper==1):
+            dbreaks = np.zeros((nbas,1))
+        else:
+            dbreaks = np.copy(md.hydrology.datebreaks)
+
+        # If no monthlyfactors provided: set them all to 1 #
+        if(np.size(md.hydrology.monthlyfactors)==1):
+            tempmonthlyfactors = np.ones((nbas,12))
+        else:
+            tempmonthlyfactors = np.copy(md.hydrology.monthlyfactors)
+
+        WriteData(fid, prefix, 'name', 'md.hydrology.model', 'data', 7, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'num_basins', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'num_breaks', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'num_params', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'ar_order', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'ma_order', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'arma_timestep', 'format', 'Double', 'scale', yts)
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'basin_id', 'data', self.basin_id - 1, 'name', 'md.hydrology.basin_id', 'format', 'IntMat', 'mattype', 2)  # 0-indexed
+        WriteData(fid, prefix, 'data', polyparams2dScaled, 'name', 'md.hydrology.polynomialparams', 'format', 'DoubleMat')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'arlag_coefs', 'format', 'DoubleMat', 'name', 'md.hydrology.arlag_coefs', 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'malag_coefs', 'format', 'DoubleMat', 'name', 'md.hydrology.malag_coefs', 'yts', yts)
+        WriteData(fid, prefix, 'data', dbreaks, 'name', 'md.hydrology.datebreaks', 'format', 'DoubleMat','scale',yts)
+        WriteData(fid,prefix,'data',tempmonthlyfactors,'name','md.hydrology.monthlyfactors','format','DoubleMat')
+        WriteData(fid,prefix,'data',{'FrictionWaterPressure'},'name','md.hydrology.requested_outputs','format','StringArray')
+
+    # }}}
Index: /issm/trunk/src/m/classes/hydrologyglads.m
===================================================================
--- /issm/trunk/src/m/classes/hydrologyglads.m	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologyglads.m	(revision 28013)
@@ -11,9 +11,14 @@
 		cavity_spacing            = 0.;
 		bump_height               = NaN;
+		omega                     = 0; 
+		sheet_alpha               = NaN; 
+		sheet_beta                = NaN; 
 
 		%Channels
 		ischannels           = 0;
-		channel_conductivity = 0.;
+		channel_conductivity = NaN;
 		channel_sheet_width  = 0.;
+		channel_alpha        = NaN; 
+		channel_beta         = NaN; 
 
 		%Other
@@ -24,4 +29,5 @@
 		requested_outputs    = {};
 		melt_flag            = 0;
+		istransition         = 0;
 	end
 	methods
@@ -45,4 +51,7 @@
 			self.pressure_melt_coefficient = 7.5e-8; %K/Pa (See table 1 in Erder et al. 2013)
 			self.cavity_spacing = 2.; %m
+			self.sheet_alpha = 5.0/4.0;
+			self.sheet_beta = 3.0/2.0;
+			self.omega = 1./2000.; 
 
 			%Channel parameters
@@ -50,9 +59,12 @@
 			self.channel_conductivity = 5.e-2; %Dow's default, Table uses 0.1
 			self.channel_sheet_width = 2.; %m
+			self.channel_alpha = 5.0/4.0;
+			self.channel_beta = 3.0/2.0;
 
-			%Other
+			%Otherself.omega = 1./2000.; 
 			self.englacial_void_ratio = 1.e-5;% Dow's default, Table from Werder et al. uses 1e-3;
 			self.requested_outputs={'default'};
 			self.melt_flag=0;
+			self.istransition = 0; %by default use GlaDS default turbulent code
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -64,13 +76,18 @@
 
 			%Sheet
-			md = checkfield(md,'fieldname','hydrology.pressure_melt_coefficient','numel',[1],'>=',0);	
-			md = checkfield(md,'fieldname','hydrology.sheet_conductivity','size',[md.mesh.numberofvertices 1],'>',0,'NaN',1,'Inf',1);	
-			md = checkfield(md,'fieldname','hydrology.cavity_spacing','numel',[1],'>',0);	
+			md = checkfield(md,'fieldname','hydrology.pressure_melt_coefficient','numel',[1],'>=',0);
+			md = checkfield(md,'fieldname','hydrology.sheet_conductivity','size',[md.mesh.numberofvertices 1],'>',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','hydrology.cavity_spacing','numel',[1],'>',0);
 			md = checkfield(md,'fieldname','hydrology.bump_height','size',[md.mesh.numberofvertices 1],'>=',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','hydrology.omega', 'numel', [1], '>=', 0); 
+			md = checkfield(md,'fieldname','hydrology.sheet_alpha', 'numel', [1], '>', 0); 
+			md = checkfield(md,'fieldname','hydrology.sheet_beta', 'numel', [1], '>', 0); 
 
 			%Channels
 			md = checkfield(md,'fieldname','hydrology.ischannels','numel',[1],'values',[0 1]);
-			md = checkfield(md,'fieldname','hydrology.channel_conductivity','numel',[1],'>',0);	
-			md = checkfield(md,'fieldname','hydrology.channel_sheet_width','numel',[1],'>=',0);	
+			md = checkfield(md,'fieldname','hydrology.channel_conductivity','size',[md.mesh.numberofvertices 1],'>=',0,'NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','hydrology.channel_sheet_width','numel',[1],'>=',0);
+			md = checkfield(md,'fieldname','hydrology.channel_alpha', 'numel', [1], '>', 0); 
+			md = checkfield(md,'fieldname','hydrology.channel_beta', 'numel', [1], '>', 0); 
 
 			%Other
@@ -80,6 +97,7 @@
 			md = checkfield(md,'fieldname','hydrology.neumannflux','timeseries',1,'NaN',1,'Inf',1);
 			md = checkfield(md,'fieldname','hydrology.requested_outputs','stringrow',1);
-			md = checkfield(md,'fieldname','hydrology.melt_flag','numel',[1],'values',[0 1]);
-			if self.melt_flag
+			md = checkfield(md,'fieldname','hydrology.melt_flag','numel',[1],'values',[0 1 2]);
+			md = checkfield(md,'fieldname','hydrology.istransition','numel',[1],'values',[0 1]);
+			if self.melt_flag==1 || self.melt_flag==2
 				md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'Inf',1,'timeseries',1);
 			end
@@ -90,9 +108,15 @@
 			fielddisplay(self,'pressure_melt_coefficient','Pressure melt coefficient (c_t) [K Pa^-1]');
 			fielddisplay(self,'sheet_conductivity','sheet conductivity (k) [m^(7/4) kg^(-1/2)]');
+			fielddisplay(self,'sheet_alpha','First sheet-flow exponent (alpha_s) []'); 
+			fielddisplay(self,'sheet_beta','Second sheet-flow exponent (beta_s) []'); 
 			fielddisplay(self,'cavity_spacing','cavity spacing (l_r) [m]');
 			fielddisplay(self,'bump_height','typical bump height (h_r) [m]');
+			fielddisplay(self,'omega','transition parameter (omega) []'); 
 			disp(sprintf('      CHANNELS'));
 			fielddisplay(self,'ischannels','Do we allow for channels? 1: yes, 0: no');
 			fielddisplay(self,'channel_conductivity','channel conductivity (k_c) [m^(3/2) kg^(-1/2)]');
+			fielddisplay(self,'channel_alpha','First channel-flow exponent (alpha_s) []'); 
+			fielddisplay(self,'channel_beta','Second channel-flow exponent (beta_s) []'); 
+			fielddisplay(self,'channel_sheet_width','channel sheet width [m]');
 			disp(sprintf('      OTHER'));
 			fielddisplay(self,'spcphi','Hydraulic potential Dirichlet constraints [Pa]');
@@ -102,4 +126,5 @@
 			fielddisplay(self,'requested_outputs','additional outputs requested');
 			fielddisplay(self,'melt_flag','User specified basal melt? 0: no (default), 1: use md.basalforcings.groundedice_melting_rate');
+			fielddisplay(self,'istransition','do we use standard [0, default] or transition model [1]');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -110,14 +135,19 @@
 			WriteData(fid,prefix,'name','md.hydrology.model','data',5,'format','Integer');
 
-			%Sheet
+						%Sheet
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','pressure_melt_coefficient','format','Double');
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','sheet_conductivity','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','cavity_spacing','format','Double');
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','bump_height','format','DoubleMat','mattype',1);
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','omega','format','Double'); 
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','sheet_alpha','format','Double'); 
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','sheet_beta','format','Double'); 
 
 			%Channels
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','ischannels','format','Boolean');
-			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_conductivity','format','Double');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_conductivity','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_sheet_width','format','Double');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_alpha','format','Double'); 
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_beta','format','Double'); 
 
 			%Others
@@ -126,5 +156,6 @@
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','moulin_input','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','englacial_void_ratio','format','Double');
-			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','melt_flag','format','Boolean');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','melt_flag','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','istransition','format','Boolean');
 			outputs = self.requested_outputs;
 			pos  = find(ismember(outputs,'default'));
Index: /issm/trunk/src/m/classes/hydrologyglads.py
===================================================================
--- /issm/trunk/src/m/classes/hydrologyglads.py	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologyglads.py	(revision 28013)
@@ -1,11 +1,11 @@
 import numpy as np
-
 from checkfield import checkfield
 from fielddisplay import fielddisplay
+from project3d import project3d
 from WriteData import WriteData
 
 
 class hydrologyglads(object):
-    """HYDROLOGYGLADS class definition
+    """hydrologyglads class definition
 
     Usage:
@@ -13,17 +13,22 @@
     """
 
-    def __init__(self):  # {{{
-        #Sheet
+    def __init__(self, *args):  # {{{
+       # Sheet
         self.pressure_melt_coefficient = 0.
         self.sheet_conductivity = np.nan
         self.cavity_spacing = 0.
         self.bump_height = np.nan
+        self.omega = 0.;
+        self.sheet_alpha = np.nan; 
+        self.sheet_beta = np.nan; 
 
-        #Channels
+        # Channels
         self.ischannels = 0
-        self.channel_conductivity = 0.
+        self.channel_conductivity = np.nan
         self.channel_sheet_width = 0.
+        self.channel_alpha = np.nan; 
+        self.channel_beta = np.nan; 
 
-        #Other
+        # Other
         self.spcphi = np.nan
         self.moulin_input = np.nan
@@ -32,26 +37,40 @@
         self.requested_outputs = []
         self.melt_flag = 0
+        self.istransition = 0
 
-        # set defaults
-        self.setdefaultparameters()
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
         # }}}
 
     def __repr__(self):  # {{{
-        string = '   GlaDS (hydrologyglads) solution parameters:'
-        string = "%s\n\t%s" % (string, '--SHEET')
-        string = "%s\n%s" % (string, fielddisplay(self, 'pressure_melt_coefficient', 'Pressure melt coefficient (c_t) [K Pa^ - 1]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'sheet_conductivity', 'sheet conductivity (k) [m^(7 / 4) kg^(- 1 / 2)]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'cavity_spacing', 'cavity spacing (l_r) [m]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'bump_height', 'typical bump height (h_r) [m]'))
-        string = "%s\n\t%s" % (string, '--CHANNELS')
-        string = "%s\n%s" % (string, fielddisplay(self, 'ischannels', 'Do we allow for channels? 1: yes, 0: no'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'channel_conductivity', 'channel conductivity (k_c) [m^(3 / 2) kg^(- 1 / 2)]'))
-        string = "%s\n\t%s" % (string, '--OTHER')
-        string = "%s\n%s" % (string, fielddisplay(self, 'spcphi', 'Hydraulic potential Dirichlet constraints [Pa]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'neumannflux', 'water flux applied along the model boundary (m^2 / s)'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'moulin_input', 'moulin input (Q_s) [m^3 / s]'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'englacial_void_ratio', 'englacial void ratio (e_v)'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
-        string = "%s\n%s" % (string, fielddisplay(self, 'melt_flag', 'User specified basal melt? 0: no (default), 1: use md.basalforcings.groundedice_melting_rate'))
+        s = '   GlaDS (hydrologyglads) solution parameters:\n'
+        s = '\t--SHEET\n'
+        s += '{}\n'.format(fielddisplay(self, 'pressure_melt_coefficient', 'Pressure melt coefficient (c_t) [K Pa^ - 1]'))
+        s += '{}\n'.format(fielddisplay(self, 'sheet_conductivity', 'sheet conductivity (k) [m^(7 / 4) kg^(- 1 / 2)]'))
+        s += '{}\n'.format(fielddisplay(self, 'sheet_alpha', 'First sheet-flow exponent (alpha_s) []')) #TH
+        s += '{}\n'.format(fielddisplay(self, 'sheet_beta', 'Second sheet-flow exponent (beta_s) []')) #TH
+        s += '{}\n'.format(fielddisplay(self, 'cavity_spacing', 'cavity spacing (l_r) [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'bump_height', 'typical bump height (h_r) [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'omega', 'transition parameter (omega) []')) #TH
+        s = '\t--CHANNELS\n'
+        s += '{}\n'.format(fielddisplay(self, 'ischannels', 'Do we allow for channels? 1: yes, 0: no'))
+        s += '{}\n'.format(fielddisplay(self, 'channel_conductivity', 'channel conductivity (k_c) [m^(3 / 2) kg^(- 1 / 2)]'))
+        s += '{}\n'.format(fielddisplay(self, 'channel_sheet_width', 'channel sheet width [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'channel_alpha', 'First channel-flow exponent (alpha_s) []')) #TH
+        s += '{}\n'.format(fielddisplay(self, 'channel_beta', 'Second channel-flow exponent (beta_s) []')) #TH
+        s = '\t--OTHER\n'
+        s += '{}\n'.format(fielddisplay(self, 'spcphi', 'Hydraulic potential Dirichlet constraints [Pa]'))
+        s += '{}\n'.format(fielddisplay(self, 'neumannflux', 'water flux applied along the model boundary (m^2 / s)'))
+        s += '{}\n'.format(fielddisplay(self, 'moulin_input', 'moulin input (Q_s) [m^3 / s]'))
+        s += '{}\n'.format(fielddisplay(self, 'englacial_void_ratio', 'englacial void ratio (e_v)'))
+        s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
+        s += '{}\n'.format(fielddisplay(self, 'melt_flag', 'User specified basal melt? 0: no (default), 1: use md.basalforcings.groundedice_melting_rate'))
+        s += '{}\n'.format(fielddisplay(self, 'istransition','do we use standard [0, default] or transition model [1]'))
         return string
     # }}}
@@ -62,19 +81,35 @@
     # }}}
 
+    def extrude(self, md):  # {{{
+        self.sheet_conductivity = project3d(md, 'vector', self.sheet_conductivity, 'type', 'node', 'layer', 1)
+        self.bump_height = project3d(md, 'vector', self.bump_height, 'type', 'node', 'layer', 1)
+
+        # Other
+        self.spcphi = project3d(md, 'vector', self.spcphi, 'type', 'node', 'layer', 1)
+        self.moulin_input = project3d(md, 'vector', self.moulin_input, 'type', 'node', 'layer', 1)
+        self.neumannflux = project3d(md, 'vector', self.neumannflux, 'type', 'node', 'layer', 1)
+        return self
+    # }}}
+
     def setdefaultparameters(self):  # {{{
-
-        #Sheet parameters
+        # Sheet parameters
         self.pressure_melt_coefficient = 7.5e-8  #K / Pa (See table 1 in Erder et al. 2013)
         self.cavity_spacing = 2.  #m
+        self.sheet_alpha = 5.0/4.0;
+        self.sheet_beta = 3.0/2.0;
+        self.omega = 1./2000.; 
 
-        #Channel parameters
+        # Channel parameters
         self.ischannels = False
         self.channel_conductivity = 5.e-2  #Dow's default, Table uses 0.1
         self.channel_sheet_width = 2.  #m
+        self.channel_alpha = 5.0/4.0;
+        self.channel_beta = 3.0/2.0;
 
-        #Other
+        # Other
         self.englacial_void_ratio = 1.e-5  #Dow's default, Table from Werder et al. uses 1e-3
         self.requested_outputs = ['default']
         self.melt_flag = 0
+        self.istransition = 0  #by default use turbulent physics
 
         return self
@@ -82,21 +117,25 @@
 
     def checkconsistency(self, md, solution, analyses):  # {{{
-
-        #Early return
+        # Early return
         if 'HydrologyGladsAnalysis' not in analyses:
             return md
 
-        #Sheet
+        # Sheet
         md = checkfield(md, 'fieldname', 'hydrology.pressure_melt_coefficient', 'numel', [1], '>=', 0)
         md = checkfield(md, 'fieldname', 'hydrology.sheet_conductivity', 'size', [md.mesh.numberofvertices], '>', 0, 'np.nan', 1, 'Inf', 1)
         md = checkfield(md, 'fieldname', 'hydrology.cavity_spacing', 'numel', [1], '>', 0)
         md = checkfield(md, 'fieldname', 'hydrology.bump_height', 'size', [md.mesh.numberofvertices], '>=', 0, 'np.nan', 1, 'Inf', 1)
+        md = checkfield(md,'fieldname','hydrology.omega', 'numel', [1], '>=', 0); 
+        md = checkfield(md,'fieldname','hydrology.sheet_alpha', 'numel', [1], '>', 0); 
+        md = checkfield(md,'fieldname','hydrology.sheet_beta', 'numel', [1], '>', 0); 
 
-        #Channels
+        # Channels
         md = checkfield(md, 'fieldname', 'hydrology.ischannels', 'numel', [1], 'values', [0, 1])
-        md = checkfield(md, 'fieldname', 'hydrology.channel_conductivity', 'numel', [1], '>', 0)
+        md = checkfield(md, 'fieldname', 'hydrology.channel_conductivity', 'size', [md.mesh.numberofvertices], '>', 0)
         md = checkfield(md, 'fieldname', 'hydrology.channel_sheet_width', 'numel', [1], '>=', 0)
+        md = checkfield(md,'fieldname','hydrology.channel_alpha', 'numel', [1], '>', 0); 
+        md = checkfield(md,'fieldname','hydrology.channel_beta', 'numel', [1], '>', 0); 
 
-        #Other
+        # Other
         md = checkfield(md, 'fieldname', 'hydrology.spcphi', 'Inf', 1, 'timeseries', 1)
         md = checkfield(md, 'fieldname', 'hydrology.englacial_void_ratio', 'numel', [1], '>=', 0)
@@ -105,5 +144,6 @@
         md = checkfield(md, 'fieldname', 'hydrology.requested_outputs', 'stringrow', 1)
         md = checkfield(md, 'fieldname', 'hydrology.melt_flag', 'numel', [1], 'values', [0, 1])
-        if self.melt_flag:
+        md = checkfield(md, 'fieldname', 'hydrology.istransition', 'numel', [1], 'values', [0, 1])
+        if self.melt_flag == 1 or self.melt_flag == 2:
             md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
     # }}}
@@ -111,24 +151,30 @@
     def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
-        #Marshall model code first
+        # Marshall model code first
         WriteData(fid, prefix, 'name', 'md.hydrology.model', 'data', 5, 'format', 'Integer')
 
-        #Sheet
+       # Sheet
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'pressure_melt_coefficient', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'sheet_conductivity', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'cavity_spacing', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'bump_height', 'format', 'DoubleMat', 'mattype', 1)
+        WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','omega','format','Double'); 
+        WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','sheet_alpha','format','Double'); 
+        WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','sheet_beta','format','Double'); 
 
-        #Channels
+        # Channels
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'ischannels', 'format', 'Boolean')
-        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'channel_conductivity', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'channel_conductivity', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'channel_sheet_width', 'format', 'Double')
+        WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_alpha','format','Double'); 
+        WriteData(fid,prefix,'object',self,'class','hydrology','fieldname','channel_beta','format','Double'); 
 
-        #Others
+        # Others
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'spcphi', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'neumannflux', 'format', 'DoubleMat', 'mattype', 2, 'timeserieslength', md.mesh.numberofelements + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'moulin_input', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
         WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'englacial_void_ratio', 'format', 'Double')
-        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'melt_flag', 'format', 'Boolean')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'melt_flag', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'hydrology', 'fieldname', 'istransition', 'format', 'Boolean')
 
         outputs = self.requested_outputs
@@ -138,4 +184,3 @@
             outputs = outputscopy
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.hydrology.requested_outputs', 'format', 'StringArray')
-
     # }}}
Index: /issm/trunk/src/m/classes/hydrologyshakti.m
===================================================================
--- /issm/trunk/src/m/classes/hydrologyshakti.m	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologyshakti.m	(revision 28013)
@@ -21,4 +21,13 @@
 	methods
 		function self = extrude(self,md) % {{{
+			self.head = project3d(md, 'vector', self.head , 'type', 'node');
+			self.gap_height = project3d(md, 'vector', self.gap_height, 'type', 'element');
+			self.bump_spacing = project3d(md, 'vector', self.bump_spacing, 'type', 'element');
+			self.bump_height = project3d(md, 'vector', self.bump_height, 'type', 'element');
+			self.englacial_input = project3d(md, 'vector', self.englacial_input, 'type', 'node');
+			self.moulin_input = project3d(md, 'vector', self.moulin_input, 'type', 'node');
+			self.reynolds = project3d(md, 'vector', self.reynolds, 'type', 'element');
+			self.neumannflux = project3d(md, 'vector', self.neumannflux, 'type', 'element');
+			self.spchead = project3d(md, 'vector', self.spchead, 'type', 'node');
 		end % }}}
 		function self = hydrologyshakti(varargin) % {{{
Index: /issm/trunk/src/m/classes/hydrologyshakti.py
===================================================================
--- /issm/trunk/src/m/classes/hydrologyshakti.py	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologyshakti.py	(revision 28013)
@@ -29,5 +29,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         string = '   hydrologyshakti solution parameters:'
@@ -45,9 +45,9 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -57,10 +57,10 @@
         self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
         list = ['HydrologyHead', 'HydrologyGapHeight', 'EffectivePressure', 'HydrologyBasalFlux', 'DegreeOfChannelization']
         return list
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/hydrologyshreve.py
===================================================================
--- /issm/trunk/src/m/classes/hydrologyshreve.py	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologyshreve.py	(revision 28013)
@@ -13,5 +13,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.spcwatercolumn = np.nan
         self.stabilization = 0
@@ -25,7 +25,7 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   hydrologyshreve solution parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'spcwatercolumn', 'water thickness constraints (NaN means no constraint) [m]'))
@@ -33,13 +33,13 @@
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         # Type of stabilization to use 0:nothing 1:artificial_diffusivity
         self.stabilization = 1
         self.requested_outputs = ['default']
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         #Early return
         if 'HydrologyShreveAnalysis' not in analyses or (solution == 'TransientSolution' and not md.transient.ishydrology):
@@ -51,9 +51,9 @@
     # }}}
 
-    def defaultoutputs(self, md): #{{{
+    def defaultoutputs(self, md):  # {{{
         return ['Watercolumn', 'HydrologyWaterVx', 'HydrologyWaterVy']
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'name', 'md.hydrology.model', 'data', 2, 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'spcwatercolumn', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
@@ -67,5 +67,5 @@
     # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/hydrologytws.py
===================================================================
--- /issm/trunk/src/m/classes/hydrologytws.py	(revision 28012)
+++ /issm/trunk/src/m/classes/hydrologytws.py	(revision 28013)
@@ -21,5 +21,5 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -28,18 +28,18 @@
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
-    def defaultoutputs(self, md): # {{{
+    def defaultoutputs(self, md):  # {{{
         return ['']
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         self.requested_outputs = ['defualt']
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -48,5 +48,5 @@
             return
         md = checkfield(md, 'fieldname', 'hydrology.spcwatercolumn', 'Inf', 1, 'timeseries', 1)
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
Index: /issm/trunk/src/m/classes/independent.m
===================================================================
--- /issm/trunk/src/m/classes/independent.m	(revision 28012)
+++ /issm/trunk/src/m/classes/independent.m	(revision 28013)
@@ -66,5 +66,5 @@
 			fielddisplay(self,'name','variable name (must match corresponding String)');
 			fielddisplay(self,'type','type of variable (''vertex'' or ''scalar'')');
-			fielddisplay(self,'nods','size of dependent variables');
+			fielddisplay(self,'nods','size of independent variables');
 			fielddisplay(self,'control_size','number of timesteps');
 			fielddisplay(self,'min_parameters','absolute minimum acceptable value of the inversed parameter on each vertex');
Index: /issm/trunk/src/m/classes/independent.py
===================================================================
--- /issm/trunk/src/m/classes/independent.py	(revision 28012)
+++ /issm/trunk/src/m/classes/independent.py	(revision 28013)
@@ -7,9 +7,8 @@
 
 class independent(object):
-    """
-    INDEPENDENT class definition
+    """independent class definition
 
-       Usage:
-          independent = independent()
+    Usage:
+        independent = independent()
     """
 
@@ -17,27 +16,39 @@
         self.name = ''
         self.type = ''
-        self.fos_forward_index = float('NaN')
+        self.fos_forward_index = np.nan
         self.fov_forward_indices = np.array([])
         self.nods = 0
+        self.min_parameters = np.nan
+        self.max_parameters = np.nan
+        self.control_scaling_factor = np.nan
+        self.control_size = 0
 
-    #set defaults
+        # Set defaults
         self.setdefaultparameters()
 
-    #use provided options to change fields
+        # Use provided options to change fields
         options = pairoptions(*args)
 
-    #OK get other fields
+        # Get other fields
         self = options.AssignObjectFields(self)
+
+        if self.control_size == 0:
+            self.control_size = 1
     # }}}
 
     def __repr__(self):  # {{{
-        s = "   independent variable:\n"
+        s = '   independent variable:\n'
 
-        s += "%s\n" % fielddisplay(self, 'name', "variable name (must match corresponding String)")
-        s += "%s\n" % fielddisplay(self, 'type', "type of variable ('vertex' or 'scalar')")
+        s += '{}\n'.format(fielddisplay(self, 'name', 'variable name (must match corresponding String)'))
+        s += '{}\n'.format(fielddisplay(self, 'type', 'type of variable (\'vertex\' or \'scalar\')'))
+        s += '{}\n'.format(fielddisplay(self, 'nods', 'size of independent variables'))
+        s += '{}\n'.format(fielddisplay(self, 'control_size', 'number of timesteps'))
+        s += '{}\n'.format(fielddisplay(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex'))
+        s += '{}\n'.format(fielddisplay(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex'))
+        s += '{}\n'.format(fielddisplay(self, 'control_scaling_factor', 'order of magnitude of each control (useful for multi-parameter optimization)'))
         if not np.isnan(self.fos_forward_index):
-            s += "%s\n" % fielddisplay(self, 'fos_forward_index', "index for fos_foward driver of ADOLC")
+            s += '{}\n'.format(fielddisplay(self, 'fos_forward_index', 'index for fos_foward driver of ADOLC'))
         if np.any(np.logical_not(np.isnan(self.fov_forward_indices))):
-            s += "%s\n" % fielddisplay(self, 'fov_forward_indices', "indices for fov_foward driver of ADOLC")
+            s += '{}\n'.format(fielddisplay(self, 'fov_forward_indices', 'indices for fov_foward driver of ADOLC'))
 
         return s
@@ -45,5 +56,5 @@
 
     def setdefaultparameters(self):  # {{{
-        #do nothing
+        # Do nothing
         return self
     # }}}
@@ -52,14 +63,14 @@
         if not np.isnan(self.fos_forward_index):
             if not strcmpi(driver, 'fos_forward'):
-                raise TypeError("cannot declare an independent with a fos_forward_index when the driver is not fos_forward!")
+                raise TypeError('cannot declare an independent with a fos_forward_index when the driver is not fos_forward!')
             if self.nods == 0:
-                raise TypeError("independent checkconsistency error: nods should be set to the size of the independent variable")
+                raise TypeError('independent checkconsistency error: nods should be set to the size of the independent variable')
 
         if len(self.fov_forward_indices) > 0:
             if not strcmpi(driver, 'fov_forward'):
-                raise TypeError("cannot declare an independent with fov_forward_indices when the driver is not fov_forward!")
+                raise TypeError('cannot declare an independent with fov_forward_indices when the driver is not fov_forward!')
             if self.nods == 0:
-                raise TypeError("independent checkconsistency error: nods should be set to the size of the independent variable")
-            md = checkfield(md, 'fieldname', "autodiff.independents[%d].fov_forward_indices" % i, '>=', 1, '<=', self.nods)
+                raise TypeError('independent checkconsistency error: nods should be set to the size of the independent variable')
+            md = checkfield(md, 'fieldname', 'autodiff.independents[%d].fov_forward_indices' % i, '>=', 1, '<=', self.nods)
 
         return md
@@ -68,8 +79,12 @@
     def typetoscalar(self):  # {{{
         if strcmpi(self.type, 'scalar'):
-            scalar = 0
+            scalartype = 0
         elif strcmpi(self.type, 'vertex'):
-            scalar = 1
+            scalartype = 1
+        elif strcmpi(self.type, 'matrix'):
+            scalartype = 1
+        else:
+            raise TypeError('{} not supported yet!'.format(self.type))
 
-        return scalar
+        return scalartype
     # }}}
Index: /issm/trunk/src/m/classes/initialization.m
===================================================================
--- /issm/trunk/src/m/classes/initialization.m	(revision 28012)
+++ /issm/trunk/src/m/classes/initialization.m	(revision 28013)
@@ -25,5 +25,6 @@
 		str                 = NaN;
 		sample              = NaN;
-                debris              = NaN;
+		debris              = NaN;
+		age                 = NaN;
 	end
 	methods
@@ -127,4 +128,9 @@
 				if ~isnan(md.initialization.debris)
 					md = checkfield(md,'fieldname','initialization.debris','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
+				end
+			end
+			if ismember('AgeAnalysis',analyses),
+				if ~isnan(md.initialization.age)
+					md = checkfield(md,'fieldname','initialization.age','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
 				end
 			end
@@ -152,4 +158,5 @@
 			fielddisplay(self,'str','Steric sea level.');
 			fielddisplay(self,'debris','Surface debris layer [m]');
+			fielddisplay(self,'age','Initial age [yr]');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -157,9 +164,9 @@
 			yts=md.constants.yts;
 
-			WriteData(fid,prefix,'object',self,'fieldname','vx','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
-			WriteData(fid,prefix,'object',self,'fieldname','vy','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','vx','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
+			WriteData(fid,prefix,'object',self,'fieldname','vy','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
 			WriteData(fid,prefix,'object',self,'fieldname','vz','format','DoubleMat','mattype',1,'scale',1./yts);
 			WriteData(fid,prefix,'object',self,'fieldname','pressure','format','DoubleMat','mattype',1);
-			WriteData(fid,prefix,'object',self,'fieldname','sealevel','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','sealevel','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',yts);
 			WriteData(fid,prefix,'object',self,'fieldname','bottompressure','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'fieldname','str','format','DoubleMat','mattype',1);
@@ -175,4 +182,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','sample','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'fieldname','debris','format','DoubleMat','mattype',1);
+			WriteData(fid,prefix,'object',self,'fieldname','age','format','DoubleMat','mattype',1,'scale',yts);
 
 			if md.thermal.isenthalpy,
@@ -206,4 +214,5 @@
 			self.str=project3d(md,'vector',self.str,'type','node','layer',1);
 			self.str=project3d(md,'vector',self.debris,'type','node','layer',1);
+			self.str=project3d(md,'vector',self.age,'type','node','layer',1);
 
 			%Lithostatic pressure by default
@@ -228,4 +237,5 @@
 			writejs1Darray(fid,[modelname '.initialization.sample'],self.sample);
 			writejs1Darray(fid,[modelname '.initialization.debris'],self.debris);
+			writejs1Darray(fid,[modelname '.initialization.age'],self.age);
 
 		end % }}}
Index: /issm/trunk/src/m/classes/initialization.py
===================================================================
--- /issm/trunk/src/m/classes/initialization.py	(revision 28012)
+++ /issm/trunk/src/m/classes/initialization.py	(revision 28013)
@@ -8,5 +8,5 @@
 
 class initialization(object):
-    """INITIALIZATION class definition
+    """initialization class definition
 
     Usage:
@@ -15,26 +15,28 @@
 
     def __init__(self):  #{{{
-        self.vx = np.nan
-        self.vy = np.nan
-        self.vz = np.nan
-        self.vel = np.nan
-        self.pressure = np.nan
-        self.temperature = np.nan
-        self.enthalpy = np.nan
-        self.waterfraction = np.nan
-        self.sediment_head = np.nan
-        self.epl_head = np.nan
-        self.epl_thickness = np.nan
-        self.watercolumn = np.nan
+        self.vx                  = np.nan
+        self.vy                  = np.nan
+        self.vz                  = np.nan
+        self.vel                 = np.nan
+        self.pressure            = np.nan
+        self.temperature         = np.nan
+        self.enthalpy            = np.nan
+        self.waterfraction       = np.nan
+        self.sediment_head       = np.nan
+        self.epl_head            = np.nan
+        self.epl_thickness       = np.nan
+        self.watercolumn         = np.nan
         self.hydraulic_potential = np.nan
-        self.channelarea = np.nan
-        self.sealevel = np.nan
-        self.bottompressure = np.nan
-        self.dsl = np.nan
-        self.str = np.nan
-        self.sample = np.nan
+        self.channelarea         = np.nan
+        self.sealevel            = np.nan
+        self.bottompressure      = np.nan
+        self.dsl                 = np.nan
+        self.str                 = np.nan
+        self.sample              = np.nan
+        self.debris              = np.nan
+        self.age                 = np.nan
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -55,16 +57,19 @@
         s += '{}\n'.format(fielddisplay(self, 'channelarea', 'subglaciale water channel area (for GlaDS) [m2]'))
         s += '{}\n'.format(fielddisplay(self, 'sample', 'Realization of a Gaussian random field'))
+        s += '{}\n'.format(fielddisplay(self, 'debris', 'Surface debris layer [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'age', 'Initial age [yr]'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
         return
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  #{{{
         if 'StressbalanceAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isstressbalance:
             if not np.any(np.logical_or(np.isnan(md.initialization.vx), np.isnan(md.initialization.vy))):
-                md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
-                md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
+                if np.size(md.initialization.vx) > 1 or np.size(md.initialization.vy) > 1:
+                    md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
+                    md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
         if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport:
             md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
@@ -79,7 +84,6 @@
             md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
             # Triangle with zero velocity
-            if np.any(np.logical_and(np.sum(np.abs(md.initialization.vx[md.mesh.elements - 1]), axis=1) == 0,
-                                     np.sum(np.abs(md.initialization.vy[md.mesh.elements - 1]), axis=1) == 0)):
-                md.checkmessage("at least one triangle has all its vertices with a zero velocity")
+            if np.any(np.logical_and(np.sum(np.abs(md.initialization.vx[md.mesh.elements - 1]), axis=1).reshape(-1, 1) == 0, np.sum(np.abs(md.initialization.vy[md.mesh.elements - 1]), axis=1).reshape(-1, 1) == 0, np.min(md.mask.ice_levelset[md.mesh.elements - 1], axis=1).reshape(-1, 1) < 0)):
+                md.checkmessage('at least one triangle has all its vertices with a zero velocity')
         if 'ThermalAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isthermal:
             md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
@@ -99,8 +103,4 @@
                 if (solution == 'TransientSolution' and md.transient.ishydrology) or solution == 'HydrologySolution':
                     md = checkfield(md, 'fieldname', 'initialization.watercolumn', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
-        if 'HydrologyDCAnalysis' in analyses:
-            if type(md.hydrology).__name__ == 'hydrologydc':
-                if (solution == 'TransientSolution' and md.transient.ishydrology) or solution == 'HydrologySolution':
-                    md = checkfield(md, 'fieldname', 'initialization.sediment_head', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
         if 'HydrologyTwsAnalysis' in analyses:
             if type(md.hydrology).__name__ == 'hydrologytws':
@@ -114,15 +114,31 @@
                 md = checkfield(md, 'fieldname', 'initialization.hydraulic_potential', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
                 md = checkfield(md, 'fieldname', 'initialization.channelarea', 'NaN', 1, 'Inf', 1, '>=', 0, 'size', [md.mesh.numberofelements])
+        if 'HydrologyDCInefficientAnalysis' in analyses:
+            if type(md.hydrology).__name__ == 'hydrologydc':
+                md = checkfield(md, 'fieldname', 'initialization.sediment_head', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
+        if 'HydrologyDCEfficientAnalysis' in analyses:
+            if type(md.hydrology).__name__ == 'hydrologydc':
+                if md.hydrology.isefficientlayer:
+                    md = checkfield(md, 'fieldname', 'initialization.epl_head', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
+                    md = checkfield(md, 'fieldname', 'initialization.epl_thickness', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
         if 'SamplingAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.issampling:
             if np.any(np.isnan(md.initialization.sample)):
                 md = checkfield(md, 'fieldname', 'initialization.sample', 'NaN', 1,'Inf', 1, 'size', [md.mesh.numberofvertices])
+        if 'DebrisAnalysis' in analyses:
+            if not np.isnan(md.initialization.debris):
+                if (solution == 'TransientSolution' and md.transient.ishydrology) or solution == 'HydrologySolution':
+                    md = checkfield(md, 'fieldname', 'initialization.debris', 'NaN', 1,'Inf', 1, 'size', [md.mesh.numberofvertices])
+        if 'AgeAnalysis' in analyses:
+            if not np.isnan(md.initialization.age):
+                if (solution == 'TransientSolution' and md.transient.ishydrology) or solution == 'HydrologySolution':
+                    md = checkfield(md, 'fieldname', 'initialization.age', 'NaN', 1,'Inf', 1, 'size', [md.mesh.numberofvertices])
         return md
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'vx', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts)
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'vy', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vx', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'vy', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'vz', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1 / yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'pressure', 'format', 'DoubleMat', 'mattype', 1)
@@ -140,4 +156,6 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'hydraulic_potential', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'sample', 'format', 'DoubleMat', 'mattype', 1)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'debris', 'format', 'DoubleMat', 'mattype', 1)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'age', 'format', 'DoubleMat', 'mattype', 1, 'scale', yts)
 
         if md.thermal.isenthalpy:
@@ -150,7 +168,7 @@
 
             WriteData(fid, prefix, 'data', self.enthalpy, 'format', 'DoubleMat', 'mattype', 1, 'name', 'md.initialization.enthalpy')
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         self.vx = project3d(md, 'vector', self.vx, 'type', 'node')
         self.vy = project3d(md, 'vector', self.vy, 'type', 'node')
@@ -168,4 +186,6 @@
         self.dsl = project3d(md, 'vector', self.dsl, 'type', 'node', 'layer', 1)
         self.str = project3d(md, 'vector', self.str, 'type', 'node', 'layer', 1)
+        self.debris = project3d(md, 'vector', self.debris, 'type', 'node', 'layer', 1)
+        self.age = project3d(md, 'vector', self.age, 'type', 'node', 'layer', 1)
 
         # Lithostatic pressure by default
@@ -177,3 +197,3 @@
 
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/inversion.py
===================================================================
--- /issm/trunk/src/m/classes/inversion.py	(revision 28012)
+++ /issm/trunk/src/m/classes/inversion.py	(revision 28013)
@@ -38,5 +38,5 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -70,5 +70,5 @@
         s += '{}\n'.format('   503: ThicknessAbsGradient')
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -100,5 +100,5 @@
         self.cost_function_threshold = np.nan  #not activated
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -114,5 +114,5 @@
             self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/issmsettings.js
===================================================================
--- /issm/trunk/src/m/classes/issmsettings.js	(revision 28012)
+++ /issm/trunk/src/m/classes/issmsettings.js	(revision 28013)
@@ -6,55 +6,55 @@
 function issmsettings (){
     //methods
-    this.setdefaultparameters = function(){// {{{
-        //are we short in memory ? (0 faster but requires more memory)
-        this.lowmem=0;
+	this.setdefaultparameters = function(){// {{{
+		//are we short in memory ? (0 faster but requires more memory)
+		this.lowmem=0;
 
-        //i/o:
-        this.io_gather=1;
+		//i/o:
+		this.io_gather=1;
 
-        //results frequency by default every step
-        this.output_frequency=1;
+		//results frequency by default every step
+		this.output_frequency=1;
 
-        //coupling frequency of the stress balance solver by default every step
-        this.sb_coupling_frequency=1;
+		//coupling frequency of the stress balance solver by default every step
+		this.sb_coupling_frequency=1;
 
-        //checkpoints frequency, by default never:
-        this.checkpoint_frequency=0;
+		//checkpoints frequency, by default never:
+		this.checkpoint_frequency=0;
 
-        //this option can be activated to load automatically the results
-        //onto the model after a parallel run by waiting for the lock file
-        //N minutes that is generated once the solution has converged
-        //0 to deactivate
-        this.waitonlock=Infinity;
+		//this option can be activated to load automatically the results
+		//onto the model after a parallel run by waiting for the lock file
+		//N minutes that is generated once the solution has converged
+		//0 to deactivate
+		this.waitonlock=Infinity;
 
-        //upload options:
-        this.upload_port         = 0;
+		//upload options:
+		this.upload_port         = 0;
 
-        //throw an error if solver residue exceeds this value
-        this.solver_residue_threshold=1e-6;
+		//throw an error if solver residue exceeds this value
+		this.solver_residue_threshold=1e-6;
 
-    }// }}}
-    this.disp= function(){// {{{
-        console.log(sprintf('   issmsettings class echo:'));
+	}// }}}
+	this.disp= function(){// {{{
+		console.log(sprintf('   issmsettings class echo:'));
 
-        fielddisplay(this,'results_on_nodes','results are output for all the nodes of each element');
-        fielddisplay(this,'io_gather','I/O gathering strategy for result outputs (default 1)');
-        fielddisplay(this,'lowmem','is the memory limited ? (0 or 1)');
-        fielddisplay(this,'output_frequency','frequency at which results are saved in all solutions with multiple time_steps');
-        fielddisplay(this,'checkpoint_frequency','frequency at which the runs are being recorded, allowing for a restart');
-        fielddisplay(this,'waitonlock','maximum number of minutes to wait for batch results (NaN to deactivate)');
-        fielddisplay(this,'upload_server','server hostname where model should be uploaded');
-        fielddisplay(this,'upload_path','path on server where model should be uploaded');
-        fielddisplay(this,'upload_login','server login');
-        fielddisplay(this,'upload_port','port login (default is 0)');
-        fielddisplay(this,'upload_filename','unique id generated when uploading the file to server');
-        fielddisplay(this,'solver_residue_threshold','throw an error if solver residue exceeds this value');
+		fielddisplay(this,'results_on_nodes','results are output for all the nodes of each element');
+		fielddisplay(this,'io_gather','I/O gathering strategy for result outputs (default 1)');
+		fielddisplay(this,'lowmem','is the memory limited ? (0 or 1)');
+		fielddisplay(this,'output_frequency','number of time steps between two saves (e.g., 5 means that results are only saved every 5 time steps)');
+		fielddisplay(this,'checkpoint_frequency','frequency at which the runs are being recorded, allowing for a restart');
+		fielddisplay(this,'waitonlock','maximum number of minutes to wait for batch results (NaN to deactivate)');
+		fielddisplay(this,'upload_server','server hostname where model should be uploaded');
+		fielddisplay(this,'upload_path','path on server where model should be uploaded');
+		fielddisplay(this,'upload_login','server login');
+		fielddisplay(this,'upload_port','port login (default is 0)');
+		fielddisplay(this,'upload_filename','unique id generated when uploading the file to server');
+		fielddisplay(this,'solver_residue_threshold','throw an error if solver residue exceeds this value');
 
 
-    }// }}}
-    this.classname= function(){// {{{
-        return "issmsettings";
+	}// }}}
+	this.classname= function(){// {{{
+		return "issmsettings";
 
-    }// }}}
+	}// }}}
 	this.checkconsistency = function(md,solution,analyses) { // {{{
 
@@ -81,19 +81,19 @@
 	}//}}}
 	//properties
-    // {{{
-    this.results_on_nodes        = '';
-    this.io_gather               = 0;
-    this.lowmem                  = 0;
-    this.output_frequency        = 0;
-    this.sb_coupling_frequency   = 0;
-    this.checkpoint_frequency    = 0;
-    this.waitonlock              = 0;
-    this.upload_server           = '';
-    this.upload_path             = '';
-    this.upload_login            = '';
-    this.upload_port             = 0;
-    this.upload_filename         = '';
-    this.solver_residue_threshold = 0;
-    this.setdefaultparameters();
-    //}}}
+	// {{{
+	this.results_on_nodes        = '';
+	this.io_gather               = 0;
+	this.lowmem                  = 0;
+	this.output_frequency        = 0;
+	this.sb_coupling_frequency   = 0;
+	this.checkpoint_frequency    = 0;
+	this.waitonlock              = 0;
+	this.upload_server           = '';
+	this.upload_path             = '';
+	this.upload_login            = '';
+	this.upload_port             = 0;
+	this.upload_filename         = '';
+	this.solver_residue_threshold = 0;
+	this.setdefaultparameters();
+	//}}}
 }
Index: /issm/trunk/src/m/classes/issmsettings.m
===================================================================
--- /issm/trunk/src/m/classes/issmsettings.m	(revision 28012)
+++ /issm/trunk/src/m/classes/issmsettings.m	(revision 28013)
@@ -92,5 +92,5 @@
 			fielddisplay(self,'io_gather','I/O gathering strategy for result outputs (default 1)');
 			fielddisplay(self,'lowmem','is the memory limited ? (0 or 1)');
-			fielddisplay(self,'output_frequency','frequency at which results are saved in all solutions with multiple time_steps');
+			fielddisplay(self,'output_frequency','number of time steps between two saves (e.g., 5 means that results are only saved every 5 time steps)');
 			fielddisplay(self,'sb_coupling_frequency','frequency at which StressBalance solver is coupled (default 1)');
 			fielddisplay(self,'checkpoint_frequency','frequency at which the runs are being recorded, allowing for a restart');
Index: /issm/trunk/src/m/classes/issmsettings.py
===================================================================
--- /issm/trunk/src/m/classes/issmsettings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/issmsettings.py	(revision 28013)
@@ -23,5 +23,5 @@
         # Set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -30,5 +30,5 @@
         s += '{}\n'.format(fielddisplay(self, "io_gather", "I / O gathering strategy for result outputs (default 1)"))
         s += '{}\n'.format(fielddisplay(self, "lowmem", "is the memory limited ? (0 or 1)"))
-        s += '{}\n'.format(fielddisplay(self, "output_frequency", "frequency at which results are saved in all solutions with multiple time_steps"))
+        s += '{}\n'.format(fielddisplay(self, "output_frequency", "number of time steps between two saves (e.g., 5 means that results are only saved every 5 time steps)"))
         s += '{}\n'.format(fielddisplay(self, "sb_coupling_frequency", "frequency at which StressBalance solver is coupled (default 1)"))
         s += '{}\n'.format(fielddisplay(self, "checkpoint_frequency", "frequency at which the runs are being recorded, allowing for a restart"))
@@ -36,5 +36,5 @@
         s += '{}\n'.format(fielddisplay(self, "solver_residue_threshold", "throw an error if solver residue exceeds this value (NaN to deactivate)"))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -58,5 +58,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/levelset.py
===================================================================
--- /issm/trunk/src/m/classes/levelset.py	(revision 28012)
+++ /issm/trunk/src/m/classes/levelset.py	(revision 28013)
@@ -24,5 +24,5 @@
         # Set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   Level-set parameters:\n'
@@ -35,5 +35,5 @@
 
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -48,5 +48,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -56,5 +56,5 @@
 
         md = checkfield(md, 'fieldname', 'levelset.spclevelset', 'Inf', 1, 'timeseries', 1)
-        md = checkfield(md, 'fieldname', 'levelset.stabilization', 'numel', [1], 'values', [0, 1, 2, 5])
+        md = checkfield(md, 'fieldname', 'levelset.stabilization', 'numel', [1], 'values', [0, 1, 2, 5, 6])
         md = checkfield(md, 'fieldname', 'levelset.kill_icebergs', 'numel', [1], 'values', [0, 1])
         md = checkfield(md, 'fieldname', 'levelset.migration_max', 'numel', [1], 'NaN', 1, 'Inf', 1, '>', 0)
@@ -67,5 +67,5 @@
         self.spclevelset = project3d(md, 'vector', self.spclevelset, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
Index: /issm/trunk/src/m/classes/linearbasalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/linearbasalforcings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/linearbasalforcings.py	(revision 28013)
@@ -14,5 +14,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         nargs = len(args)
         if nargs == 0:
@@ -43,7 +43,7 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   linear basal forcings parameters:\n'
         s += '{}\n'.format(fielddisplay(self, "deepwater_melting_rate", "basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]"))
@@ -55,19 +55,19 @@
         s += '{}\n'.format(fielddisplay(self, "geothermalflux", "geothermal heat flux [W/m^2]"))
         return s
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         self.perturbation_melting_rate = project3d(md, 'vector', self.perturbation_melting_rate, 'type', 'node', 'layer', 1)
         self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1)
         self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) # Bedrock only gets geothermal flux
         return self
-    #}}}
+    # }}}
 
-    def initialize(self, md): #{{{
+    def initialize(self, md):  # {{{
         if np.all(np.isnan(self.groundedice_melting_rate)):
             self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices))
             print("      no basalforcings.groundedice_melting_rate specified: values set as zero")
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -77,7 +77,7 @@
         self.upperwater_elevation = -400.0
         return self
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if not np.all(np.isnan(self.perturbation_melting_rate)):
             md = checkfield(md, 'fieldname', 'basalforcings.perturbation_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -103,7 +103,7 @@
 
         return md
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
@@ -116,3 +116,3 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_melting_rate', 'format', 'DoubleMat', 'mattype', 3, 'timeserieslength', 2, 'name', 'md.basalforcings.upperwater_melting_rate', 'scale', 1. / yts, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_elevation', 'format', 'DoubleMat', 'mattype', 3, 'name', 'md.basalforcings.upperwater_elevation', 'yts', yts)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/linearbasalforcingsarma.py
===================================================================
--- /issm/trunk/src/m/classes/linearbasalforcingsarma.py	(revision 28012)
+++ /issm/trunk/src/m/classes/linearbasalforcingsarma.py	(revision 28013)
@@ -31,6 +31,5 @@
         self.geothermalflux = np.nan
 
-        nargs = len(args)
-        if nargs == 0:
+        if len(args) == 0:
             self.setdefaultparameters()
         else:
@@ -60,5 +59,5 @@
     # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         self.ar_order = 0.0 # Autoregression model of order 0
         self.ma_order = 0.0 # Moving-average model of order 0
Index: /issm/trunk/src/m/classes/love.py
===================================================================
--- /issm/trunk/src/m/classes/love.py	(revision 28012)
+++ /issm/trunk/src/m/classes/love.py	(revision 28013)
@@ -12,5 +12,5 @@
     """
 
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.nfreq = 0
         self.frequencies = 0
@@ -44,7 +44,7 @@
 
         self.setdefaultparameters()
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         s = '{}\n'.format(fielddisplay(self, 'nfreq', 'number of frequencies sampled (default: 1, elastic) [Hz]'))
         s += '{}\n'.format(fielddisplay(self, 'frequencies', 'frequencies sampled (convention defaults to 0 for the elastic case) [Hz]'))
@@ -90,7 +90,7 @@
 
         return s
-    #}}}
-
-    def setdefaultparameters(self): #{{{
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
         # We setup an elastic love number computation by default
         self.nfreq = 1
@@ -125,7 +125,7 @@
         self.hypergeom_nz = 1
         self.hypergeom_z = 0
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if 'LoveAnalysis' not in analyses:
             return md
@@ -178,7 +178,7 @@
 
         return md
-    #}}}
-
-    def marshall(self, prefix, md, fid): #{{{
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'object', self, 'fieldname', 'nfreq', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'frequencies', 'format', 'DoubleMat', 'mattype',3)
@@ -211,11 +211,11 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'hypergeom_nz', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'hypergeom_z', 'format', 'DoubleMat', 'mattype', 3)
-    #}}}
-
-    def extrude(self, md): #{{{
+    # }}}
+
+    def extrude(self, md):  # {{{
         return self
-    #}}}
-
-    def build_frequencies_from_time(self): #{{{
+    # }}}
+
+    def build_frequencies_from_time(self):  # {{{
         if not self.istemporal:
             raise RuntimeError('cannot build frequencies for temporal love numbers if love.istemporal==0')
@@ -230,3 +230,3 @@
                     self.frequencies[(i - 1) * 2 * self.n_temporal_iterations + j] = j * np.log(2) / self.time[i] / 2 / np.pi
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/lovenumbers.m
===================================================================
--- /issm/trunk/src/m/classes/lovenumbers.m	(revision 28012)
+++ /issm/trunk/src/m/classes/lovenumbers.m	(revision 28013)
@@ -11,7 +11,7 @@
 
 		%loading love numbers:
-		h           	= []; %provided by PREM model
-		k           	= []; %idem
-		l           	= []; %idem
+		h             = []; %provided by PREM model
+		k             = []; %idem
+		l             = []; %idem
 		
 		%tidal love numbers for computing rotational feedback:
@@ -24,6 +24,6 @@
 
 		%time/frequency for visco-elastic love numbers
-		timefreq    = [];
-		istime      = 1;
+		timefreq      = [];
+		istime        = 1;
 
 	end
@@ -65,11 +65,11 @@
 
 			%secular fluid love number: 
-			self.tk2secular=0.942; 
+			self.tk2secular=0.942;
 
 			self.pmtf_colinear=0.0;
 			self.pmtf_ortho=0.0;
 			if maxdeg>=2
-				self.pmtf_colinear= (1.0+self.k(3,:))/(1.0-self.tk(3,:)/self.tk2secular); %valid only for elastic regime, not viscous. Also neglects chandler wobble
-				self.pmtf_ortho= 0.0;
+				self.pmtf_colinear=(1.0+self.k(3,:))/(1.0-self.tk(3,:)/self.tk2secular); %valid only for elastic regime, not viscous. Also neglects chandler wobble
+				self.pmtf_ortho=0.0;
 			end
 			%time: 
Index: /issm/trunk/src/m/classes/lovenumbers.py
===================================================================
--- /issm/trunk/src/m/classes/lovenumbers.py	(revision 28012)
+++ /issm/trunk/src/m/classes/lovenumbers.py	(revision 28013)
@@ -8,5 +8,5 @@
 
 class lovenumbers(object):  #{{{
-    """LOVENUMBERS class definition
+    """lovenumbers class definition
 
     Usage:
@@ -31,6 +31,4 @@
         self.pmtf_colinear = []
         self.pmtf_ortho = []
-        pmtf_colinear   = []
-        pmtf_ortho      = []
 
         # Time/frequency for visco-elastic love numbers
@@ -42,5 +40,5 @@
         referenceframe = options.getfieldvalue('referenceframe', 'CM')
         self.setdefaultparameters(maxdeg, referenceframe)
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -60,5 +58,5 @@
         s += '{}\n'.format(fielddisplay(self, 'pmtf_ortho', 'Orthogonal component of the Polar Motion Transfer Function (couples x and y components, only used for Chandler Wobble)'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self, maxdeg, referenceframe):  #{{{
@@ -73,9 +71,4 @@
         # Secular fluid love number
         self.tk2secular = 0.942
-        self.pmtf_colinear=0.0
-        self.pmtf_ortho=0.0
-        if maxdeg>=2:
-            self.pmtf_colinear= (1.0+self.k[3-1,:])/(1.0-self.tk[3-1,:]/self.tk2secular) #valid only for elastic regime, not viscous. Also neglects chandler wobble
-            self.pmtf_ortho= 0.0
 
         self.pmtf_colinear = np.array([0.0]).reshape(-1, 1)
@@ -88,5 +81,5 @@
         self.timefreq = np.zeros(1) # Elastic case by default
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  #{{{
@@ -117,9 +110,9 @@
             raise ValueError('temporal love numbers must start with elastic response, i.e. timefreq[0] = 0')
         return md
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  #{{{
         return[]
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -131,9 +124,7 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'tk', 'name', 'md.solidearth.lovenumbers.tk', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'tl', 'name', 'md.solidearth.lovenumbers.tl', 'format', 'DoubleMat', 'mattype', 1)
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_colinear', 'name', 'md.solidearth.lovenumbers.pmtf_colinear', 'format', 'DoubleMat', 'mattype', 1)
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_ortho', 'name', 'md.solidearth.lovenumbers.pmtf_ortho', 'format', 'DoubleMat', 'mattype', 1)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_colinear','name','md.solidearth.lovenumbers.pmtf_colinear','format','DoubleMat','mattype',1)
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_ortho','name','md.solidearth.lovenumbers.pmtf_ortho','format','DoubleMat','mattype',1)
         WriteData(fid, prefix, 'object', self, 'data', self.tk2secular, 'fieldname', 'lovenumbers.tk2secular', 'format', 'Double')
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_colinear','name','md.solidearth.lovenumbers.pmtf_colinear','format','DoubleMat','mattype',1);
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'pmtf_ortho','name','md.solidearth.lovenumbers.pmtf_ortho','format','DoubleMat','mattype',1);
 
         if (self.istime):
@@ -143,7 +134,7 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'istime', 'name', 'md.solidearth.lovenumbers.istime', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'timefreq', 'name', 'md.solidearth.lovenumbers.timefreq', 'format', 'DoubleMat', 'mattype', 1, 'scale', scale);
-    #}}}
- 
+    # }}}
+
     def extrude(self, md):  #{{{
         return
-#}}}
+    # }}}
Index: /issm/trunk/src/m/classes/m1qn3inversion.m
===================================================================
--- /issm/trunk/src/m/classes/m1qn3inversion.m	(revision 28012)
+++ /issm/trunk/src/m/classes/m1qn3inversion.m	(revision 28013)
@@ -13,4 +13,5 @@
 		maxiter                     = 0
 		dxmin                       = 0
+		dfmin_frac                  = 0
 		gttol                       = 0
 		cost_functions              = NaN
@@ -66,6 +67,7 @@
 
 			%m1qn3 parameters
-			self.dxmin  = 0.1;
-			self.gttol = 1e-4;
+			self.dxmin      = 0.1;
+			self.dfmin_frac = 1;
+			self.gttol      = 1e-4;
 
 		end % }}}
@@ -88,4 +90,5 @@
 			md = checkfield(md,'fieldname','inversion.maxiter','numel',1,'>=',0);
 			md = checkfield(md,'fieldname','inversion.dxmin','numel',1,'>',0);
+         md = checkfield(md,'fieldname','inversion.dfmin_frac','numel',1,'>=',0,'<=',1);
 			md = checkfield(md,'fieldname','inversion.gttol','numel',1,'>',0);
 			md = checkfield(md,'fieldname','inversion.cost_functions','size',[1 num_costfunc],'values',supportedcostfunctions());
@@ -115,4 +118,5 @@
 			fielddisplay(self,'maxiter','maximum number of Function evaluation (forward run)');
 			fielddisplay(self,'dxmin','convergence criterion: two points less than dxmin from eachother (sup-norm) are considered identical');
+         fielddisplay(self,'dfmin_frac','expected reduction of J during the first step (e.g., 0.3=30% reduction in cost function)');
 			fielddisplay(self,'gttol','convergence criterion: ||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)');
 			fielddisplay(self,'cost_functions','indicate the type of response for each optimization step');
@@ -148,4 +152,5 @@
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','maxiter','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','dxmin','format','Double');
+         WriteData(fid,prefix,'object',self,'class','inversion','fieldname','dfmin_frac','format','Double');
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','gttol','format','Double');
 			WriteData(fid,prefix,'object',self,'class','inversion','fieldname','cost_functions_coefficients','format','DoubleMat','mattype',1);
@@ -184,4 +189,5 @@
 			writejsdouble(fid,[modelname '.inversion.maxiter'],self.maxiter);
 			writejsdouble(fid,[modelname '.inversion.dxmin'],self.dxmin);
+         writejsdouble(fid,[modelname '.inversion.dfmin_frac'],self.dfmin_frac);
 			writejsdouble(fid,[modelname '.inversion.gttol'],self.gttol);
 			writejs2Darray(fid,[modelname '.inversion.cost_functions'],self.cost_functions);
Index: /issm/trunk/src/m/classes/m1qn3inversion.py
===================================================================
--- /issm/trunk/src/m/classes/m1qn3inversion.py	(revision 28012)
+++ /issm/trunk/src/m/classes/m1qn3inversion.py	(revision 28013)
@@ -19,21 +19,22 @@
         if not len(args):
             print('empty init')
-            self.iscontrol = 0
-            self.incomplete_adjoint = 0
-            self.control_parameters = np.nan
-            self.control_scaling_factors = np.nan
-            self.maxsteps = 0
-            self.maxiter = 0
-            self.dxmin = 0.
-            self.gttol = 0.
-            self.cost_functions = np.nan
+            self.iscontrol                   = 0
+            self.incomplete_adjoint          = 0
+            self.control_parameters          = np.nan
+            self.control_scaling_factors     = np.nan
+            self.maxsteps                    = 0
+            self.maxiter                     = 0
+            self.dxmin                       = 0.
+            self.dfmin_frac                  = 0.
+            self.gttol                       = 0.
+            self.cost_functions              = np.nan
             self.cost_functions_coefficients = np.nan
-            self.min_parameters = np.nan
-            self.max_parameters = np.nan
-            self.vx_obs = np.nan
-            self.vy_obs = np.nan
-            self.vz_obs = np.nan
-            self.vel_obs = np.nan
-            self.thickness_obs = np.nan
+            self.min_parameters              = np.nan
+            self.max_parameters              = np.nan
+            self.vx_obs                      = np.nan
+            self.vy_obs                      = np.nan
+            self.vz_obs                      = np.nan
+            self.vel_obs                     = np.nan
+            self.thickness_obs               = np.nan
 
             self.setdefaultparameters()
@@ -60,5 +61,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -71,4 +72,5 @@
         s += '{}\n'.format(fielddisplay(self, 'maxiter', 'maximum number of Function evaluation (forward run)'))
         s += '{}\n'.format(fielddisplay(self, 'dxmin', 'convergence criterion: two points less than dxmin from eachother (sup - norm) are considered identical'))
+        s += '{}\n'.format(fielddisplay(self, 'dfmin_frac', 'expected reduction of J during the first step (e.g., 0.3=30% reduction in cost function)'))
         s += '{}\n'.format(fielddisplay(self, 'gttol', '||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)'))
         s += '{}\n'.format(fielddisplay(self, 'cost_functions', 'indicate the type of response for each optimization step'))
@@ -91,5 +93,5 @@
         s += '{}\n'.format('   503: ThicknessAbsGradient')
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -108,8 +110,9 @@
         #m1qn3 parameters
         self.dxmin = 0.1
+        self.dfmin_frac = 1.
         self.gttol = 1e-4
 
         return self
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -125,5 +128,5 @@
             self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -142,4 +145,5 @@
         md = checkfield(md, 'fieldname', 'inversion.maxiter', 'numel', [1], '>=', 0)
         md = checkfield(md, 'fieldname', 'inversion.dxmin', 'numel', [1], '>', 0.)
+        md = checkfield(md, 'fieldname', 'inversion.dfmin_frac', 'numel', [1], '>=', 0., '<=', 1.)
         md = checkfield(md, 'fieldname', 'inversion.gttol', 'numel', [1], '>', 0.)
         md = checkfield(md, 'fieldname', 'inversion.cost_functions', 'size', [num_costfunc], 'values', supportedcostfunctions())
@@ -169,4 +173,5 @@
         WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'maxiter', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'dxmin', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'dfmin_frac', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'gttol', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'cost_functions_coefficients', 'format', 'DoubleMat', 'mattype', 1)
Index: /issm/trunk/src/m/classes/massfluxatgate.py
===================================================================
--- /issm/trunk/src/m/classes/massfluxatgate.py	(revision 28012)
+++ /issm/trunk/src/m/classes/massfluxatgate.py	(revision 28013)
@@ -32,5 +32,5 @@
         self = options.AssignObjectFields(self)
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -41,13 +41,13 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'profilename', 'name of file (shapefile or argus file) defining a profile (or gate)'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/masstransport.py
===================================================================
--- /issm/trunk/src/m/classes/masstransport.py	(revision 28012)
+++ /issm/trunk/src/m/classes/masstransport.py	(revision 28013)
@@ -27,5 +27,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -38,15 +38,16 @@
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.spcthickness = project3d(md, 'vector', self.spcthickness, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
         return ['Thickness', 'Surface', 'Base']
 
-    #}}}
+    # }}}
+
     def setdefaultparameters(self):  # {{{
         # Type of stabilization to use 0:nothing 1:artificial_diffusivity 3:Discontinuous Galerkin
@@ -61,5 +62,5 @@
         self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/matdamageice.py
===================================================================
--- /issm/trunk/src/m/classes/matdamageice.py	(revision 28012)
+++ /issm/trunk/src/m/classes/matdamageice.py	(revision 28013)
@@ -36,5 +36,5 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -58,5 +58,5 @@
         s += '{}\n'.format(fielddisplay(self, 'earth_density', 'Mantle density [kg m^-3]'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -64,5 +64,5 @@
         self.rheology_n = project3d(md, 'vector', self.rheology_n, 'type', 'element')
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -100,5 +100,5 @@
         self.earth_density = 5512  # average density of the Earth (kg/m^3)
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/materials.py
===================================================================
--- /issm/trunk/src/m/classes/materials.py	(revision 28012)
+++ /issm/trunk/src/m/classes/materials.py	(revision 28013)
@@ -13,5 +13,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.nature = []
         if len(args) == 0:
@@ -68,7 +68,7 @@
 
         self.setdefaultparameters()
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         s = '   Materials:\n'
         for i in range(len(self.nature)):
@@ -119,7 +119,7 @@
                 raise RuntimeError('materials constructor error message: nature of the material not supported yet! (\'ice\' or \'litho\' or \'hydro\')')
         return s
-    #}}}
-
-    def setdefaultparameters(self): #{{{
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
         for i in range(len(self.nature)):
             nat = self.nature[i]
@@ -208,7 +208,7 @@
             # Average density of the Earth (kg/m^3)
             self.earth_density = 5512
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
         for i in range(len(self.nature)):
             nat = self.nature[i]
@@ -261,7 +261,7 @@
 
         return md
-    #}}}
-
-    def marshall(self, prefix, md, fid): #{{{
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
         #1: MatdamageiceEnum 2: MatestarEnum 3: MaticeEnum 4: MatenhancediceEnum 5: MaterialsEnum
         WriteData(fid, prefix, 'name', 'md.materials.nature', 'data', naturetointeger(self.nature), 'format', 'IntMat', 'mattype', 3)
@@ -314,7 +314,7 @@
                 raise RuntimeError('materials constructor error message: nature of the material not supported yet! (\'ice\' or \'litho\' or \'hydro\')')
         WriteData(fid, prefix, 'data', self.earth_density, 'name', 'md.materials.earth_density', 'format', 'Double')
-    #}}}
-
-    def extrude(self, md): #{{{
+    # }}}
+
+    def extrude(self, md):  # {{{
         for i in range(len(self.nature)):
             nat = self.nature[i]
@@ -323,8 +323,8 @@
                 self.rheology_n = project3d(md, 'vector', self.rheology_n, 'type', 'element')
             return self
-    #}}}
-#}}}
-
-def naturetointeger(strnat): #{{{
+    # }}}
+# }}}
+
+def naturetointeger(strnat):  # {{{
     intnat = np.zeros(len(strnat))
 
@@ -349,3 +349,3 @@
 
     return intnat
-#}}}
+# }}}
Index: /issm/trunk/src/m/classes/mesh2d.m
===================================================================
--- /issm/trunk/src/m/classes/mesh2d.m	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh2d.m	(revision 28013)
@@ -76,9 +76,9 @@
 		function self = setdefaultparameters(self) % {{{
 
-			%the connectivity is the averaged number of nodes linked to a
-			%given node through an edge. This connectivity is used to initially
-			%allocate memory to the stiffness matrix. A value of 16 seems to
-			%give a good memory/time ration. This value can be checked in
-			%trunk/test/Miscellaneous/runme.m
+			%The connectivity is the average number of nodes linked to a given 
+			%node through an edge. This connectivity is used to initially allocate 
+			%memory to the stiffness matrix. A value of 16 seems to give a good 
+			%memory/time ratio. This value can be checked in
+			%test/Miscellaneous/runme.m
 			self.average_vertex_connectivity=25;
 		end % }}}
Index: /issm/trunk/src/m/classes/mesh2d.py
===================================================================
--- /issm/trunk/src/m/classes/mesh2d.py	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh2d.py	(revision 28013)
@@ -1,5 +1,5 @@
 import numpy as np
+from checkfield import checkfield
 from fielddisplay import fielddisplay
-from checkfield import checkfield
 import MatlabFuncs as m
 from WriteData import WriteData
@@ -7,82 +7,82 @@
 
 class mesh2d(object):
-    """
-    MESH2D class definition
+    """mesh2d class definition
 
-       Usage:
-          mesh2d = mesh2d()
+    Usage:
+        mesh2d = mesh2d()
     """
 
     def __init__(self):  # {{{
-        self.x = float('NaN')
-        self.y = float('NaN')
-        self.elements = float('NaN')
+        self.x = np.nan
+        self.y = np.nan
+        self.elements = np.nan
         self.numberofelements = 0
         self.numberofvertices = 0
         self.numberofedges = 0
 
-        self.lat = float('NaN')
-        self.long = float('NaN')
+        self.lat = np.nan
+        self.long = np.nan
         self.epsg = 0
-        self.scale_factor = float('NaN')
+        self.scale_factor = np.nan
 
-        self.vertexonboundary = float('NaN')
-        self.edges = float('NaN')
-        self.segments = float('NaN')
-        self.segmentmarkers = float('NaN')
-        self.vertexconnectivity = float('NaN')
-        self.elementconnectivity = float('NaN')
+        self.vertexonboundary = np.nan
+        self.edges = np.nan
+        self.segments = np.nan
+        self.segmentmarkers = np.nan
+        self.vertexconnectivity = np.nan
+        self.elementconnectivity = np.nan
         self.average_vertex_connectivity = 0
 
-        self.extractedvertices = float('NaN')
-        self.extractedelements = float('NaN')
+        self.extractedvertices = np.nan
+        self.extractedelements = np.nan
 
-    #set defaults
+        # Set defaults
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
-        string = "   2D tria Mesh (horizontal):"
+        s = '   2D tria Mesh (horizontal):\n'
 
-        string = "%s\n%s" % (string, "\n      Elements and vertices:")
-        string = "%s\n%s" % (string, fielddisplay(self, "numberofelements", "number of elements"))
-        string = "%s\n%s" % (string, fielddisplay(self, "numberofvertices", "number of vertices"))
-        string = "%s\n%s" % (string, fielddisplay(self, "elements", "vertex indices of the mesh elements"))
-        string = "%s\n%s" % (string, fielddisplay(self, "x", "vertices x coordinate [m]"))
-        string = "%s\n%s" % (string, fielddisplay(self, "y", "vertices y coordinate [m]"))
-        string = "%s\n%s" % (string, fielddisplay(self, "edges", "edges of the 2d mesh (vertex1 vertex2 element1 element2)"))
-        string = "%s\n%s" % (string, fielddisplay(self, "numberofedges", "number of edges of the 2d mesh"))
+        s += '{}\n'.format('      Elements and vertices:')
+        s += '{}\n'.format(fielddisplay(self, 'numberofelements', 'number of elements'))
+        s += '{}\n'.format(fielddisplay(self, 'numberofvertices', 'number of vertices'))
+        s += '{}\n'.format(fielddisplay(self, 'elements', 'vertex indices of the mesh elements'))
+        s += '{}\n'.format(fielddisplay(self, 'x', 'vertices x coordinate [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'y', 'vertices y coordinate [m]'))
+        s += '{}\n'.format(fielddisplay(self, 'edges', 'edges of the 2d mesh (vertex1 vertex2 element1 element2)'))
+        s += '{}\n'.format(fielddisplay(self, 'numberofedges', 'number of edges of the 2d mesh'))
+        s += '\n'
+        s += '{}\n'.format('      Properties:')
+        s += '{}\n'.format(fielddisplay(self, 'vertexonboundary', 'vertices on the boundary of the domain flag list'))
+        s += '{}\n'.format(fielddisplay(self, 'segments', 'edges on domain boundary (vertex1 vertex2 element)'))
+        s += '{}\n'.format(fielddisplay(self, 'segmentmarkers', 'number associated to each segment'))
+        s += '{}\n'.format(fielddisplay(self, 'vertexconnectivity', 'list of elements connected to vertex_i'))
+        s += '{}\n'.format(fielddisplay(self, 'elementconnectivity', 'list of elements adjacent to element_i'))
+        s += '{}\n'.format(fielddisplay(self, 'average_vertex_connectivity', 'average number of vertices connected to one vertex'))
+        s += '\n'
+        s += '{}\n'.format('      Extracted model:')
+        s += '{}\n'.format(fielddisplay(self, 'extractedvertices', 'vertices extracted from the model'))
+        s += '{}\n'.format(fielddisplay(self, 'extractedelements', 'elements extracted from the model'))
+        s += '\n'
+        s += '{}\n'.format('      Projection:')
+        s += '{}\n'.format(fielddisplay(self, 'lat', 'vertices latitude [degrees]'))
+        s += '{}\n'.format(fielddisplay(self, 'long', 'vertices longitude [degrees]'))
+        s += '{}\n'.format(fielddisplay(self, 'epsg', 'EPSG code (ex: 3413 for UPS Greenland, 3031 for UPS Antarctica)'))
+        s += '{}\n'.format(fielddisplay(self, 'scale_factor', 'Projection correction for volume, area, etc. computation'))
 
-        string = "%s%s" % (string, "\n\n      Properties:")
-        string = "%s\n%s" % (string, fielddisplay(self, "vertexonboundary", "vertices on the boundary of the domain flag list"))
-        string = "%s\n%s" % (string, fielddisplay(self, "segments", "edges on domain boundary (vertex1 vertex2 element)"))
-        string = "%s\n%s" % (string, fielddisplay(self, "segmentmarkers", "number associated to each segment"))
-        string = "%s\n%s" % (string, fielddisplay(self, "vertexconnectivity", "list of elements connected to vertex_i"))
-        string = "%s\n%s" % (string, fielddisplay(self, "elementconnectivity", "list of elements adjacent to element_i"))
-        string = "%s\n%s" % (string, fielddisplay(self, "average_vertex_connectivity", "average number of vertices connected to one vertex"))
-
-        string = "%s%s" % (string, "\n\n      Extracted model:")
-        string = "%s\n%s" % (string, fielddisplay(self, "extractedvertices", "vertices extracted from the model"))
-        string = "%s\n%s" % (string, fielddisplay(self, "extractedelements", "elements extracted from the model"))
-
-        string = "%s%s" % (string, "\n\n      Projection:")
-        string = "%s\n%s" % (string, fielddisplay(self, "lat", "vertices latitude [degrees]"))
-        string = "%s\n%s" % (string, fielddisplay(self, "long", "vertices longitude [degrees]"))
-        string = "%s\n%s" % (string, fielddisplay(self, "epsg", "EPSG code (ex: 3413 for UPS Greenland, 3031 for UPS Antarctica)"))
-        string = "%s\n%s" % (string, fielddisplay(self, "scale_factor", "Projection correction for volume, area, etc. computation"))
-        return string
-    #}}}
+        return s
+    # }}}
 
     def setdefaultparameters(self):  # {{{
-        #the connectivity is the averaged number of nodes linked to a
-        #given node through an edge. This connectivity is used to initially
-        #allocate memory to the stiffness matrix. A value of 16 seems to
-        #give a good memory / time ration. This value can be checked in
-        #trunk / test / Miscellaneous / runme.m
+        # The connectivity is the average number of nodes linked to a given 
+        # node through an edge. This connectivity is used to initially allocate 
+        # memory to the stiffness matrix. A value of 16 seems to give a good 
+        # memory/time ratio. This value can be checked in 
+        # test/Miscellaneous/runme.m
         self.average_vertex_connectivity = 25
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -95,8 +95,8 @@
         md = checkfield(md, 'fieldname', 'mesh.elements', 'size', [md.mesh.numberofelements, 3])
         if np.any(np.logical_not(m.ismember(np.arange(1, md.mesh.numberofvertices + 1), md.mesh.elements))):
-            md.checkmessage("orphan nodes have been found. Check the mesh outline")
+            md.checkmessage('orphan nodes have been found. Check the mesh outline')
         md = checkfield(md, 'fieldname', 'mesh.numberofelements', '>', 0)
         md = checkfield(md, 'fieldname', 'mesh.numberofvertices', '>', 0)
-        md = checkfield(md, 'fieldname', 'mesh.average_vertex_connectivity', '>=', 9, 'message', "'mesh.average_vertex_connectivity' should be at least 9 in 2d")
+        md = checkfield(md, 'fieldname', 'mesh.average_vertex_connectivity', '>=', 9, 'message', '\'mesh.average_vertex_connectivity\' should be at least 9 in 2d')
         md = checkfield(md, 'fieldname', 'mesh.segments', 'NaN', 1, 'Inf', 1, '>', 0, 'size', [np.nan, 3])
         if(np.size(self.scale_factor) > 1):
@@ -104,5 +104,5 @@
 
         if solution == 'ThermalSolution':
-            md.checkmessage("thermal not supported for 2d mesh")
+            md.checkmessage('thermal not supported for 2d mesh')
 
         return md
@@ -110,17 +110,17 @@
 
     def domaintype(self):  # {{{
-        return "2Dhorizontal"
-    #}}}
+        return '2Dhorizontal'
+    # }}}
 
     def dimension(self):  # {{{
         return 2
-    #}}}
+    # }}}
 
     def elementtype(self):  # {{{
-        return "Tria"
-    #}}}
+        return 'Tria'
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
-        WriteData(fid, prefix, 'name', 'md.mesh.domain_type', 'data', "Domain" + self.domaintype(), 'format', 'String')
+        WriteData(fid, prefix, 'name', 'md.mesh.domain_type', 'data', 'Domain' + self.domaintype(), 'format', 'String')
         WriteData(fid, prefix, 'name', 'md.mesh.domain_dimension', 'data', self.dimension(), 'format', 'Integer')
         WriteData(fid, prefix, 'name', 'md.mesh.elementtype', 'data', self.elementtype(), 'format', 'String')
Index: /issm/trunk/src/m/classes/mesh2dvertical.py
===================================================================
--- /issm/trunk/src/m/classes/mesh2dvertical.py	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh2dvertical.py	(revision 28013)
@@ -41,5 +41,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         string = "   2D tria Mesh (vertical):"
@@ -70,5 +70,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, "scale_factor", "Projection correction for volume, area, etc. computation"))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -81,5 +81,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -109,13 +109,13 @@
     def domaintype(self):  # {{{
         return "2Dvertical"
-    #}}}
+    # }}}
 
     def dimension(self):  # {{{
         return 2
-    #}}}
+    # }}}
 
     def elementtype(self):  # {{{
         return "Tria"
-    #}}}
+    # }}}
 
     def vertexflags(self, value):  # {{{
@@ -124,5 +124,5 @@
         flags[pos] = 1
         return flags
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
Index: /issm/trunk/src/m/classes/mesh3dprisms.m
===================================================================
--- /issm/trunk/src/m/classes/mesh3dprisms.m	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh3dprisms.m	(revision 28013)
@@ -35,4 +35,5 @@
 		y2d                         = NaN;
 		elements2d                  = NaN;
+		segments2d                  = NaN;
 		numberofvertices2d          = 0;
 		numberofelements2d          = 0;
@@ -136,4 +137,5 @@
 			fielddisplay(self,'numberofvertices2d','number of vertices');
 			fielddisplay(self,'elements2d','vertex indices of the mesh elements');
+			fielddisplay(self,'segments2d','edges on 2d domain boundary (vertex1 vertex2 element)');
 			fielddisplay(self,'x2d','vertices x coordinate [m]');
 			fielddisplay(self,'y2d','vertices y coordinate [m]');
@@ -190,4 +192,5 @@
 			WriteData(fid,prefix,'object',self,'class','mesh','fieldname','average_vertex_connectivity','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','mesh','fieldname','elements2d','format','DoubleMat','mattype',3);
+			WriteData(fid,prefix,'object',self,'class','mesh','fieldname','segments2d','format','DoubleMat','mattype',3);
 			WriteData(fid,prefix,'object',self,'class','mesh','fieldname','numberofvertices2d','format','Integer');
 			WriteData(fid,prefix,'object',self,'class','mesh','fieldname','numberofelements2d','format','Integer');
@@ -235,4 +238,5 @@
 			writejs1Darray(fid,[modelname '.mesh.y2d'],self.y2d);
 			writejs2Darray(fid,[modelname '.mesh.elements2d'],self.elements2d);
+			writejs2Darray(fid,[modelname '.mesh.segments2d'],self.segments2d);
 			writejsdouble(fid,[modelname '.mesh.numberofvertices2d'],self.numberofvertices2d);
 			writejsdouble(fid,[modelname '.mesh.numberofelements2d'],self.numberofelements2d);
Index: /issm/trunk/src/m/classes/mesh3dprisms.py
===================================================================
--- /issm/trunk/src/m/classes/mesh3dprisms.py	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh3dprisms.py	(revision 28013)
@@ -51,5 +51,5 @@
     #set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -95,5 +95,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, "scale_factor", "Projection correction for volume, area, etc. computation"))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -106,5 +106,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -130,13 +130,13 @@
     def domaintype(self):  # {{{
         return "3D"
-    #}}}
+    # }}}
 
     def dimension(self):  # {{{
         return 3
-    #}}}
+    # }}}
 
     def elementtype(self):  # {{{
         return "Penta"
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  # {{{
Index: /issm/trunk/src/m/classes/mesh3dsurface.py
===================================================================
--- /issm/trunk/src/m/classes/mesh3dsurface.py	(revision 28012)
+++ /issm/trunk/src/m/classes/mesh3dsurface.py	(revision 28013)
@@ -15,5 +15,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.x = np.nan
         self.y = np.nan
@@ -40,5 +40,5 @@
 
         nargs = len(args)
-        if not nargs:
+        if nargs == 0:
             self.setdefaultparameters()
         elif nargs == 1:
@@ -52,7 +52,7 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         s = '   2D tria Mesh (surface):'
 
@@ -84,7 +84,7 @@
 
         return s
-    #}}}
-
-    def setdefaultparameters(self): #{{{
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
         #The connectivity is the average number of nodes linked to a given node 
         #through an edge. This connectivity is used to initially allocate 
@@ -93,7 +93,7 @@
         #test/NightlyRun/runme.py.
         self.average_vertex_connectivity = 25
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
         md = checkfield(md, 'fieldname', 'mesh.x', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
         md = checkfield(md, 'fieldname', 'mesh.y', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
@@ -115,7 +115,7 @@
 
         return md
-    #}}}
-
-    def marshall(self, prefix, md, fid): #{{{
+    # }}}
+
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'name', 'md.mesh.domain_type', 'data', 'Domain' + self.domaintype(), 'format', 'String')
         WriteData(fid, prefix, 'name', 'md.mesh.domain_dimension', 'data', self.dimension(), 'format', 'Integer')
@@ -133,19 +133,19 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'average_vertex_connectivity', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'vertexonboundary', 'format', 'DoubleMat', 'mattype', 1)
-    #}}}
-
-    def domaintype(self): #{{{
+    # }}}
+
+    def domaintype(self):  # {{{
         return '3Dsurface'
-    #}}}
-
-    def dimension(self): #{{{
+    # }}}
+
+    def dimension(self):  # {{{
         return 2
-    #}}}
-
-    def elementtype(self): #{{{
+    # }}}
+
+    def elementtype(self):  # {{{
         return 'Tria'
-    #}}}
-
-    def processmesh(self, options): #{{{
+    # }}}
+
+    def processmesh(self, options):  # {{{
         isplanet = 1
         is2d = 0
@@ -157,7 +157,7 @@
 
         return x, y, z, elements, is2d, isplanet
-    #}}}
-
-    def savemodeljs(self, fid, modelname): #{{{
+    # }}}
+
+    def savemodeljs(self, fid, modelname):  # {{{
         fid.write('  #s.mesh = new mesh3dsurface()\n' % modelname)
         writejs1Darray(fid, [modelname, '.mesh.x'], self.x)
@@ -180,7 +180,7 @@
         writejs1Darray(fid, [modelname, '.mesh.extractedvertices'], self.extractedvertices)
         writejs1Darray(fid, [modelname, '.mesh.extractedelements'], self.extractedelements)
-    #}}}
-
-    def export(self, *args): #{{{
+    # }}}
+
+    def export(self, *args):  # {{{
         options = pairoptions(*args)
 
@@ -269,3 +269,3 @@
         #write style file:
         applyqgisstyle(filename, 'mesh')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/miscellaneous.m
===================================================================
--- /issm/trunk/src/m/classes/miscellaneous.m	(revision 28012)
+++ /issm/trunk/src/m/classes/miscellaneous.m	(revision 28013)
@@ -25,4 +25,13 @@
 		end % }}}
 		function self = setdefaultparameters(self) % {{{
+
+			%Add some information about the model for future reference
+			username = getenv('USER');
+			issmver = num2str(issmversion());
+			today   = date();
+			host    =  oshostname();
+			self.notes = [' Model created on ' today ' by ' username ' on ' host sprintf('\n') ...
+				' ISSM version: ' issmver sprintf('\n') ...
+				' (path: ' pwd() ')'];
 
 		end % }}}
Index: /issm/trunk/src/m/classes/miscellaneous.py
===================================================================
--- /issm/trunk/src/m/classes/miscellaneous.py	(revision 28012)
+++ /issm/trunk/src/m/classes/miscellaneous.py	(revision 28013)
@@ -21,5 +21,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         string = '   miscellaneous parameters:'
@@ -29,9 +29,9 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'dummy', 'empty field to store some data'))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/misfit.m
===================================================================
--- /issm/trunk/src/m/classes/misfit.m	(revision 28012)
+++ /issm/trunk/src/m/classes/misfit.m	(revision 28013)
@@ -27,5 +27,4 @@
 		weights            = NaN; %weight coefficients for every vertex
 		weights_string     = ''; %string to identify this particular set of weights
-		cumulated          = NaN; %do we cumulate misfit through time?
 	end
 	
Index: /issm/trunk/src/m/classes/misfit.py
===================================================================
--- /issm/trunk/src/m/classes/misfit.py	(revision 28012)
+++ /issm/trunk/src/m/classes/misfit.py	(revision 28013)
@@ -41,5 +41,5 @@
         #do we cumulate misfit through time?
         self.cumulated = cumulated if cumulated is not None else float('NaN')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -56,5 +56,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'weights_string', 'string for weights for identification purposes'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -64,5 +64,5 @@
             self.observation = project3d(md, 'vector', self.observation, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/mismipbasalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/mismipbasalforcings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/mismipbasalforcings.py	(revision 28013)
@@ -21,5 +21,5 @@
         self.geothermalflux           = np.nan
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   MISMIP + basal melt parameterization\n'
@@ -30,10 +30,10 @@
         s += '{}\n'.format(fielddisplay(self, "geothermalflux", "Geothermal heat flux [W / m^2]"))
         return s
-    #}}}
+    # }}}
     def extrude(self, md):  # {{{
         self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1)
         self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1)  #bedrock only gets geothermal flux
         return self
-    #}}}
+    # }}}
     def initialize(self, md):  # {{{
         if np.all(np.isnan(self.groundedice_melting_rate)):
@@ -44,5 +44,5 @@
             print("      no basalforcings.geothermalflux specified: values set as zero")
         return self
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         # default values for melting parameterization
@@ -51,5 +51,5 @@
         self.upperdepth_melt = -100.
         return self
-    #}}}
+    # }}}
     def checkconsistency(self, md, solution, analyses):  # {{{
         # Early return
Index: /issm/trunk/src/m/classes/model.m
===================================================================
--- /issm/trunk/src/m/classes/model.m	(revision 28012)
+++ /issm/trunk/src/m/classes/model.m	(revision 28013)
@@ -35,5 +35,5 @@
 		groundingline    = 0;
 		hydrology        = 0;
-		debris		 = 0;
+		debris           = 0;
 		masstransport    = 0;
 		thermal          = 0;
@@ -214,48 +214,48 @@
 		%}}}
 		function disp(self) % {{{
-			disp(sprintf('%19s: %-22s -- %s','mesh'            ,['[1x1 ' class(self.mesh) ']'],'mesh properties'));
-			disp(sprintf('%19s: %-22s -- %s','mask'            ,['[1x1 ' class(self.mask) ']'],'defines grounded and floating elements'));
-			disp(sprintf('%19s: %-22s -- %s','geometry'        ,['[1x1 ' class(self.geometry) ']'],'surface elevation, bedrock topography, ice thickness,...'));
-			disp(sprintf('%19s: %-22s -- %s','constants'       ,['[1x1 ' class(self.constants) ']'],'physical constants'));
-			disp(sprintf('%19s: %-22s -- %s','smb'             ,['[1x1 ' class(self.smb) ']'],'surface mass balance'));
-			disp(sprintf('%19s: %-22s -- %s','basalforcings'   ,['[1x1 ' class(self.basalforcings) ']'],'bed forcings'));
-			disp(sprintf('%19s: %-22s -- %s','materials'       ,['[1x1 ' class(self.materials) ']'],'material properties'));
-			disp(sprintf('%19s: %-22s -- %s','damage'          ,['[1x1 ' class(self.damage) ']'],'parameters for damage evolution solution'));
-			disp(sprintf('%19s: %-22s -- %s','friction'        ,['[1x1 ' class(self.friction) ']'],'basal friction/drag properties'));
-			disp(sprintf('%19s: %-22s -- %s','flowequation'    ,['[1x1 ' class(self.flowequation) ']'],'flow equations'));
-			disp(sprintf('%19s: %-22s -- %s','timestepping'    ,['[1x1 ' class(self.timestepping) ']'],'time stepping for transient models'));
-			disp(sprintf('%19s: %-22s -- %s','initialization'  ,['[1x1 ' class(self.initialization) ']'],'initial guess/state'));
-			disp(sprintf('%19s: %-22s -- %s','rifts'           ,['[1x1 ' class(self.rifts) ']'],'rifts properties'));
-			disp(sprintf('%19s: %-22s -- %s','solidearth'      ,['[1x1 ' class(self.solidearth) ']'],'solidearth inputs and settings'));
-			disp(sprintf('%19s: %-22s -- %s','dsl'             ,['[1x1 ' class(self.dsl) ']'],'dynamic sea-level '));
-			disp(sprintf('%19s: %-22s -- %s','debug'           ,['[1x1 ' class(self.debug) ']'],'debugging tools (valgrind, gprof)'));
-			disp(sprintf('%19s: %-22s -- %s','verbose'         ,['[1x1 ' class(self.verbose) ']'],'verbosity level in solve'));
-			disp(sprintf('%19s: %-22s -- %s','settings'        ,['[1x1 ' class(self.settings) ']'],'settings properties'));
-			disp(sprintf('%19s: %-22s -- %s','toolkits'        ,['[1x1 ' class(self.toolkits) ']'],'PETSc options for each solution'));
-			disp(sprintf('%19s: %-22s -- %s','cluster'         ,['[1x1 ' class(self.cluster) ']'],'cluster parameters (number of CPUs...)'));
-			disp(sprintf('%19s: %-22s -- %s','balancethickness',['[1x1 ' class(self.balancethickness) ']'],'parameters for balancethickness solution'));
-			disp(sprintf('%19s: %-22s -- %s','stressbalance'   ,['[1x1 ' class(self.stressbalance) ']'],'parameters for stressbalance solution'));
-			disp(sprintf('%19s: %-22s -- %s','groundingline'   ,['[1x1 ' class(self.groundingline) ']'],'parameters for groundingline solution'));
-			disp(sprintf('%19s: %-22s -- %s','hydrology'       ,['[1x1 ' class(self.hydrology) ']'],'parameters for hydrology solution'));
-			disp(sprintf('%19s: %-22s -- %s','debris' 	   ,['[1x1 ' class(self.debris) ']'],'parameters for debris solution'));
-			disp(sprintf('%19s: %-22s -- %s','masstransport'   ,['[1x1 ' class(self.masstransport) ']'],'parameters for masstransport solution'));
-			disp(sprintf('%19s: %-22s -- %s','thermal'         ,['[1x1 ' class(self.thermal) ']'],'parameters for thermal solution'));
-			disp(sprintf('%19s: %-22s -- %s','steadystate'     ,['[1x1 ' class(self.steadystate) ']'],'parameters for steadystate solution'));
-			disp(sprintf('%19s: %-22s -- %s','transient'       ,['[1x1 ' class(self.transient) ']'],'parameters for transient solution'));
-			disp(sprintf('%19s: %-22s -- %s','levelset'        ,['[1x1 ' class(self.levelset) ']'],'parameters for moving boundaries (level-set method)'));
-			disp(sprintf('%19s: %-22s -- %s','calving'         ,['[1x1 ' class(self.calving) ']'],'parameters for calving'));
-			disp(sprintf('%19s: %-22s -- %s','frontalforcings' ,['[1x1 ' class(self.frontalforcings) ']'],'parameters for frontalforcings'));
-			disp(sprintf('%19s: %-22s -- %s','esa'             ,['[1x1 ' class(self.esa) ']'],'parameters for elastic adjustment solution'));
-			disp(sprintf('%19s: %-22s -- %s','love'            ,['[1x1 ' class(self.love) ']'],'parameters for love solution'));
-			disp(sprintf('%19s: %-22s -- %s','sampling'        ,['[1x1 ' class(self.sampling) ']'],'parameters for stochastic sampler'));
-			disp(sprintf('%19s: %-22s -- %s','autodiff'        ,['[1x1 ' class(self.autodiff) ']'],'automatic differentiation parameters'));
-			disp(sprintf('%19s: %-22s -- %s','inversion'       ,['[1x1 ' class(self.inversion) ']'],'parameters for inverse methods'));
-			disp(sprintf('%19s: %-22s -- %s','qmu'             ,['[1x1 ' class(self.qmu) ']'],'Dakota properties'));
-			disp(sprintf('%19s: %-22s -- %s','amr'             ,['[1x1 ' class(self.amr) ']'],'adaptive mesh refinement properties'));
-			disp(sprintf('%19s: %-22s -- %s','outputdefinition',['[1x1 ' class(self.outputdefinition) ']'],'output definition'));
-			disp(sprintf('%19s: %-22s -- %s','results'         ,['[1x1 ' class(self.results) ']'],'model results'));
-			disp(sprintf('%19s: %-22s -- %s','radaroverlay'    ,['[1x1 ' class(self.radaroverlay) ']'],'radar image for plot overlay'));
-			disp(sprintf('%19s: %-22s -- %s','miscellaneous'   ,['[1x1 ' class(self.miscellaneous) ']'],'miscellaneous fields'));
-			disp(sprintf('%19s: %-22s -- %s','stochasticforcing',['[1x1 ' class(self.stochasticforcing) ']'],'stochasticity applied to model forcings'));
+			disp(sprintf('%19s: %-23s -- %s','mesh'            ,['[1x1 ' class(self.mesh) ']'],'mesh properties'));
+			disp(sprintf('%19s: %-23s -- %s','mask'            ,['[1x1 ' class(self.mask) ']'],'defines grounded and floating elements'));
+			disp(sprintf('%19s: %-23s -- %s','geometry'        ,['[1x1 ' class(self.geometry) ']'],'surface elevation, bedrock topography, ice thickness,...'));
+			disp(sprintf('%19s: %-23s -- %s','constants'       ,['[1x1 ' class(self.constants) ']'],'physical constants'));
+			disp(sprintf('%19s: %-23s -- %s','smb'             ,['[1x1 ' class(self.smb) ']'],'surface mass balance'));
+			disp(sprintf('%19s: %-23s -- %s','basalforcings'   ,['[1x1 ' class(self.basalforcings) ']'],'bed forcings'));
+			disp(sprintf('%19s: %-23s -- %s','materials'       ,['[1x1 ' class(self.materials) ']'],'material properties'));
+			disp(sprintf('%19s: %-23s -- %s','damage'          ,['[1x1 ' class(self.damage) ']'],'parameters for damage evolution solution'));
+			disp(sprintf('%19s: %-23s -- %s','friction'        ,['[1x1 ' class(self.friction) ']'],'basal friction/drag properties'));
+			disp(sprintf('%19s: %-23s -- %s','flowequation'    ,['[1x1 ' class(self.flowequation) ']'],'flow equations'));
+			disp(sprintf('%19s: %-23s -- %s','timestepping'    ,['[1x1 ' class(self.timestepping) ']'],'time stepping for transient models'));
+			disp(sprintf('%19s: %-23s -- %s','initialization'  ,['[1x1 ' class(self.initialization) ']'],'initial guess/state'));
+			disp(sprintf('%19s: %-23s -- %s','rifts'           ,['[1x1 ' class(self.rifts) ']'],'rifts properties'));
+			disp(sprintf('%19s: %-23s -- %s','solidearth'      ,['[1x1 ' class(self.solidearth) ']'],'solidearth inputs and settings'));
+			disp(sprintf('%19s: %-23s -- %s','dsl'             ,['[1x1 ' class(self.dsl) ']'],'dynamic sea-level '));
+			disp(sprintf('%19s: %-23s -- %s','debug'           ,['[1x1 ' class(self.debug) ']'],'debugging tools (valgrind, gprof)'));
+			disp(sprintf('%19s: %-23s -- %s','verbose'         ,['[1x1 ' class(self.verbose) ']'],'verbosity level in solve'));
+			disp(sprintf('%19s: %-23s -- %s','settings'        ,['[1x1 ' class(self.settings) ']'],'settings properties'));
+			disp(sprintf('%19s: %-23s -- %s','toolkits'        ,['[1x1 ' class(self.toolkits) ']'],'PETSc options for each solution'));
+			disp(sprintf('%19s: %-23s -- %s','cluster'         ,['[1x1 ' class(self.cluster) ']'],'cluster parameters (number of CPUs...)'));
+			disp(sprintf('%19s: %-23s -- %s','balancethickness',['[1x1 ' class(self.balancethickness) ']'],'parameters for balancethickness solution'));
+			disp(sprintf('%19s: %-23s -- %s','stressbalance'   ,['[1x1 ' class(self.stressbalance) ']'],'parameters for stressbalance solution'));
+			disp(sprintf('%19s: %-23s -- %s','groundingline'   ,['[1x1 ' class(self.groundingline) ']'],'parameters for groundingline solution'));
+			disp(sprintf('%19s: %-23s -- %s','hydrology'       ,['[1x1 ' class(self.hydrology) ']'],'parameters for hydrology solution'));
+			disp(sprintf('%19s: %-23s -- %s','debris' 	   ,['[1x1 ' class(self.debris) ']'],'parameters for debris solution'));
+			disp(sprintf('%19s: %-23s -- %s','masstransport'   ,['[1x1 ' class(self.masstransport) ']'],'parameters for masstransport solution'));
+			disp(sprintf('%19s: %-23s -- %s','thermal'         ,['[1x1 ' class(self.thermal) ']'],'parameters for thermal solution'));
+			disp(sprintf('%19s: %-23s -- %s','steadystate'     ,['[1x1 ' class(self.steadystate) ']'],'parameters for steadystate solution'));
+			disp(sprintf('%19s: %-23s -- %s','transient'       ,['[1x1 ' class(self.transient) ']'],'parameters for transient solution'));
+			disp(sprintf('%19s: %-23s -- %s','levelset'        ,['[1x1 ' class(self.levelset) ']'],'parameters for moving boundaries (level-set method)'));
+			disp(sprintf('%19s: %-23s -- %s','calving'         ,['[1x1 ' class(self.calving) ']'],'parameters for calving'));
+			disp(sprintf('%19s: %-23s -- %s','frontalforcings' ,['[1x1 ' class(self.frontalforcings) ']'],'parameters for frontalforcings'));
+			disp(sprintf('%19s: %-23s -- %s','esa'             ,['[1x1 ' class(self.esa) ']'],'parameters for elastic adjustment solution'));
+			disp(sprintf('%19s: %-23s -- %s','love'            ,['[1x1 ' class(self.love) ']'],'parameters for love solution'));
+			disp(sprintf('%19s: %-23s -- %s','sampling'        ,['[1x1 ' class(self.sampling) ']'],'parameters for stochastic sampler'));
+			disp(sprintf('%19s: %-23s -- %s','autodiff'        ,['[1x1 ' class(self.autodiff) ']'],'automatic differentiation parameters'));
+			disp(sprintf('%19s: %-23s -- %s','inversion'       ,['[1x1 ' class(self.inversion) ']'],'parameters for inverse methods'));
+			disp(sprintf('%19s: %-23s -- %s','qmu'             ,['[1x1 ' class(self.qmu) ']'],'Dakota properties'));
+			disp(sprintf('%19s: %-23s -- %s','amr'             ,['[1x1 ' class(self.amr) ']'],'adaptive mesh refinement properties'));
+			disp(sprintf('%19s: %-23s -- %s','outputdefinition',['[1x1 ' class(self.outputdefinition) ']'],'output definition'));
+			disp(sprintf('%19s: %-23s -- %s','results'         ,['[1x1 ' class(self.results) ']'],'model results'));
+			disp(sprintf('%19s: %-23s -- %s','radaroverlay'    ,['[1x1 ' class(self.radaroverlay) ']'],'radar image for plot overlay'));
+			disp(sprintf('%19s: %-23s -- %s','miscellaneous'   ,['[1x1 ' class(self.miscellaneous) ']'],'miscellaneous fields'));
+			disp(sprintf('%19s: %-23s -- %s','stochasticforcing',['[1x1 ' class(self.stochasticforcing) ']'],'stochasticity applied to model forcings'));
 		end % }}}
 		function md = setdefaultparameters(md,planet) % {{{
@@ -286,5 +286,5 @@
 			md.stressbalance    = stressbalance();
 			md.hydrology        = hydrologyshreve();
-			md.debris	    = debris();
+			md.debris           = debris();
 			md.masstransport    = masstransport();
 			md.thermal          = thermal();
@@ -543,5 +543,5 @@
 			end
 
-			%Initialize the 2d mesh
+			%Initialize 2d mesh
 			mesh=mesh2d();
 			mesh.x=md.mesh.x2d;
@@ -721,5 +721,5 @@
 			end
 
-			%Initial 2d mesh 
+			%Initial 2d mesh
 			if isa(md1.mesh,'mesh3dprisms'),
 				flag_elem_2d=flag_elem(1:md1.mesh.numberofelements2d);
@@ -810,5 +810,5 @@
 			nodestoflag2=Pnode(nodestoflag1);
 			if numel(md1.stressbalance.spcvx)>1 & numel(md1.stressbalance.spcvy)>1 & numel(md1.stressbalance.spcvz)>1,
-				if numel(md1.inversion.vx_obs)>1 & numel(md1.inversion.vy_obs)>1
+				if isprop(md1.inversion,'vx_obs') & numel(md1.inversion.vx_obs)>1 & numel(md1.inversion.vy_obs)>1
 					md2.stressbalance.spcvx(nodestoflag2)=md2.inversion.vx_obs(nodestoflag2); 
 					md2.stressbalance.spcvy(nodestoflag2)=md2.inversion.vy_obs(nodestoflag2);
@@ -873,5 +873,20 @@
 						elseif length(field)==numberofelements1,
 							md2.outputdefinition.definitions{i}.(solutionsubfields{j})=field(pos_elem);
+						elseif size(field,1)==numberofvertices1+1
+							md2.outputdefinition.definitions{i}.(solutionsubfields{j})=[field(pos_node,:); field(end,:)];
 						end
+					end
+				end
+			end
+			
+			%independents
+			for i=1:length(md1.autodiff.independents)
+				independentfield=fields(md1.autodiff.independents{i});
+				for j=1:length(independentfield)
+					field=md1.autodiff.independents{i}.(independentfield{j});
+					if length(field)==numberofvertices1
+						md2.autodiff.independents{i}.(independentfield{j})=field(pos_node);
+					elseif length(field)==numberofelements1
+						md2.autodiff.independents{i}.(independentfield{j})=field(pos_elem);
 					end
 				end
@@ -959,5 +974,5 @@
 			md2.mesh.vertexonboundary = zeros(md2.mesh.numberofvertices,1); md2.mesh.vertexonboundary(md2.mesh.segments(:,1:2)) = 1;
 
-			%Deal with boudary
+			%Deal with boundary
 			md2.mesh.vertexonboundary = [md.mesh.vertexonboundary;sum(md.mesh.vertexonboundary(edges),2)==2];
 			md2.mesh.elementconnectivity=bamgmesh_out.ElementConnectivity;
@@ -1091,5 +1106,5 @@
 			end
 
-			%Initialize with the 2d mesh
+			%Initialize with 2d mesh
 			mesh2d = md.mesh;
 			md.mesh=mesh3dprisms();
@@ -1112,4 +1127,6 @@
 			md.mesh.extractedvertices           = mesh2d.extractedvertices;
 			md.mesh.extractedelements           = mesh2d.extractedelements;
+
+			md.mesh.segments2d                  = mesh2d.segments;
 
 			x3d=[]; 
@@ -1196,4 +1213,5 @@
 			md.solidearth = extrude(md.solidearth,md);
 			md.dsl = extrude(md.dsl,md);
+			md.stochasticforcing = extrude(md.stochasticforcing,md);
 
 			%connectivity
@@ -1286,4 +1304,5 @@
 			if isfield(structmd,'cfl_coefficient'), md.timestepping.cfl_coefficient=structmd.cfl_coefficient; end
 			if isfield(structmd,'spcthickness'), md.masstransport.spcthickness=structmd.spcthickness; end
+			if isfield(structmd,'spcthickness'), md.debris.spcthickness=structmd.spcthickness; end
 			if isfield(structmd,'artificial_diffusivity'), md.masstransport.stabilization=structmd.artificial_diffusivity; end
 			if isfield(structmd,'hydrostatic_adjustment'), md.masstransport.hydrostatic_adjustment=structmd.hydrostatic_adjustment; end
Index: /issm/trunk/src/m/classes/model.py
===================================================================
--- /issm/trunk/src/m/classes/model.py	(revision 28012)
+++ /issm/trunk/src/m/classes/model.py	(revision 28013)
@@ -16,4 +16,5 @@
 from SMBmeltcomponents import SMBmeltcomponents
 from basalforcings import basalforcings
+from linearbasalforcings import linearbasalforcings
 from matice import matice
 from levelset import levelset
@@ -45,4 +46,5 @@
 from hydrologypism import hydrologypism
 from hydrologyshakti import hydrologyshakti
+from debris import debris
 from masstransport import masstransport
 from thermal import thermal
@@ -70,9 +72,9 @@
 from sampling import sampling
 from stochasticforcing import stochasticforcing
-#}}}
+# }}}
 
 
 class model(object):
-    """MODEL - class definition
+    """model class definition
 
     Usage:
@@ -97,5 +99,4 @@
         self.dsl = None
         self.solidearth = None
-
         self.debug = None
         self.verbose = None
@@ -103,9 +104,9 @@
         self.toolkits = None
         self.cluster = None
-
         self.balancethickness = None
         self.stressbalance = None
         self.groundingline = None
         self.hydrology = None
+        self.debris = None
         self.masstransport = None
         self.thermal = None
@@ -118,5 +119,4 @@
         self.esa = None
         self.sampling = None
-
         self.autodiff = None
         self.inversion = None
@@ -136,5 +136,5 @@
             planet = options.getfieldvalue('planet', 'earth')
             self.setdefaultparameters(planet)
-    #}}}
+    # }}}
 
     def __repr__(obj):  #{{{
@@ -143,49 +143,49 @@
         #   already converted <class>.__repr__ method for examples)
         #
-        s = "%19s: %-22s -- %s" % ("mesh", "[%s %s]" % ("1x1", obj.mesh.__class__.__name__), "mesh properties")
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("mask", "[%s %s]" % ("1x1", obj.mask.__class__.__name__), "defines grounded and floating elements"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("geometry", "[%s %s]" % ("1x1", obj.geometry.__class__.__name__), "surface elevation, bedrock topography, ice thickness, ..."))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("constants", "[%s %s]" % ("1x1", obj.constants.__class__.__name__), "physical constants"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("smb", "[%s %s]" % ("1x1", obj.smb.__class__.__name__), "surface mass balance"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("basalforcings", "[%s %s]" % ("1x1", obj.basalforcings.__class__.__name__), "bed forcings"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("materials", "[%s %s]" % ("1x1", obj.materials.__class__.__name__), "material properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("damage", "[%s %s]" % ("1x1", obj.damage.__class__.__name__), "damage propagation laws"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("friction", "[%s %s]" % ("1x1", obj.friction.__class__.__name__), "basal friction / drag properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("flowequation", "[%s %s]" % ("1x1", obj.flowequation.__class__.__name__), "flow equations"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("timestepping", "[%s %s]" % ("1x1", obj.timestepping.__class__.__name__), "time stepping for transient models"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("initialization", "[%s %s]" % ("1x1", obj.initialization.__class__.__name__), "initial guess / state"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("rifts", "[%s %s]" % ("1x1", obj.rifts.__class__.__name__), "rifts properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("solidearth", "[%s %s]" % ("1x1", obj.solidearth.__class__.__name__), "solidearth inputs and settings"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("dsl", "[%s %s]" % ("1x1", obj.dsl.__class__.__name__), "dynamic sea level"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("debug", "[%s %s]" % ("1x1", obj.debug.__class__.__name__), "debugging tools (valgrind, gprof)"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("verbose", "[%s %s]" % ("1x1", obj.verbose.__class__.__name__), "verbosity level in solve"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("settings", "[%s %s]" % ("1x1", obj.settings.__class__.__name__), "settings properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("toolkits", "[%s %s]" % ("1x1", obj.toolkits.__class__.__name__), "PETSc options for each solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("cluster", "[%s %s]" % ("1x1", obj.cluster.__class__.__name__), "cluster parameters (number of CPUs...)"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("balancethickness", "[%s %s]" % ("1x1", obj.balancethickness.__class__.__name__), "parameters for balancethickness solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("stressbalance", "[%s %s]" % ("1x1", obj.stressbalance.__class__.__name__), "parameters for stressbalance solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("groundingline", "[%s %s]" % ("1x1", obj.groundingline.__class__.__name__), "parameters for groundingline solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("hydrology", "[%s %s]" % ("1x1", obj.hydrology.__class__.__name__), "parameters for hydrology solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("masstransport", "[%s %s]" % ("1x1", obj.masstransport.__class__.__name__), "parameters for masstransport solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("thermal", "[%s %s]" % ("1x1", obj.thermal.__class__.__name__), "parameters for thermal solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("steadystate", "[%s %s]" % ("1x1", obj.steadystate.__class__.__name__), "parameters for steadystate solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("transient", "[%s %s]" % ("1x1", obj.transient.__class__.__name__), "parameters for transient solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("levelset", "[%s %s]" % ("1x1", obj.levelset.__class__.__name__), "parameters for moving boundaries (level-set method)"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("calving", "[%s %s]" % ("1x1", obj.calving.__class__.__name__), "parameters for calving"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("frontalforcings", "[%s %s]" % ("1x1", obj.frontalforcings.__class__.__name__), "parameters for frontalforcings"))
-        s = "%s\n%s" % (s, '%19s: %-22s -- %s' % ("esa", "[%s %s]" % ("1x1", obj.esa.__class__.__name__), "parameters for elastic adjustment solution"))
-        s = "%s\n%s" % (s, '%19s: %-22s -- %s' % ("sampling", "[%s %s]" % ("1x1", obj.sampling.__class__.__name__), "parameters for stochastic sampler"))
-        s = "%s\n%s" % (s, '%19s: %-22s -- %s' % ("love", "[%s %s]" % ("1x1", obj.love.__class__.__name__), "parameters for love solution"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("autodiff", "[%s %s]" % ("1x1", obj.autodiff.__class__.__name__), "automatic differentiation parameters"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("inversion", "[%s %s]" % ("1x1", obj.inversion.__class__.__name__), "parameters for inverse methods"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("qmu", "[%s %s]" % ("1x1", obj.qmu.__class__.__name__), "Dakota properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("amr", "[%s %s]" % ("1x1", obj.amr.__class__.__name__), "adaptive mesh refinement properties"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("outputdefinition", "[%s %s]" % ("1x1", obj.outputdefinition.__class__.__name__), "output definition"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("results", "[%s %s]" % ("1x1", obj.results.__class__.__name__), "model results"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("radaroverlay", "[%s %s]" % ("1x1", obj.radaroverlay.__class__.__name__), "radar image for plot overlay"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("miscellaneous", "[%s %s]" % ("1x1", obj.miscellaneous.__class__.__name__), "miscellaneous fields"))
-        s = "%s\n%s" % (s, "%19s: %-22s -- %s" % ("stochasticforcing", "[%s %s]" % ("1x1", obj.stochasticforcing.__class__.__name__), "stochasticity applied to model forcings"))
+        s = '%19s: %-23s -- %s' % ('mesh', '[%s %s]' % ('1x1', obj.mesh.__class__.__name__), 'mesh properties')
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('mask', '[%s %s]' % ('1x1', obj.mask.__class__.__name__), 'defines grounded and floating elements'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('geometry', '[%s %s]' % ('1x1', obj.geometry.__class__.__name__), 'surface elevation, bedrock topography, ice thickness, ...'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('constants', '[%s %s]' % ('1x1', obj.constants.__class__.__name__), 'physical constants'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('smb', '[%s %s]' % ('1x1', obj.smb.__class__.__name__), 'surface mass balance'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('basalforcings', '[%s %s]' % ('1x1', obj.basalforcings.__class__.__name__), 'bed forcings'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('materials', '[%s %s]' % ('1x1', obj.materials.__class__.__name__), 'material properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('damage', '[%s %s]' % ('1x1', obj.damage.__class__.__name__), 'damage propagation laws'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('friction', '[%s %s]' % ('1x1', obj.friction.__class__.__name__), 'basal friction / drag properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('flowequation', '[%s %s]' % ('1x1', obj.flowequation.__class__.__name__), 'flow equations'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('timestepping', '[%s %s]' % ('1x1', obj.timestepping.__class__.__name__), 'time stepping for transient models'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('initialization', '[%s %s]' % ('1x1', obj.initialization.__class__.__name__), 'initial guess / state'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('rifts', '[%s %s]' % ('1x1', obj.rifts.__class__.__name__), 'rifts properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('solidearth', '[%s %s]' % ('1x1', obj.solidearth.__class__.__name__), 'solidearth inputs and settings'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('dsl', '[%s %s]' % ('1x1', obj.dsl.__class__.__name__), 'dynamic sea level'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('debug', '[%s %s]' % ('1x1', obj.debug.__class__.__name__), 'debugging tools (valgrind, gprof)'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('verbose', '[%s %s]' % ('1x1', obj.verbose.__class__.__name__), 'verbosity level in solve'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('settings', '[%s %s]' % ('1x1', obj.settings.__class__.__name__), 'settings properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('toolkits', '[%s %s]' % ('1x1', obj.toolkits.__class__.__name__), 'PETSc options for each solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('cluster', '[%s %s]' % ('1x1', obj.cluster.__class__.__name__), 'cluster parameters (number of CPUs...)'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('balancethickness', '[%s %s]' % ('1x1', obj.balancethickness.__class__.__name__), 'parameters for balancethickness solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('stressbalance', '[%s %s]' % ('1x1', obj.stressbalance.__class__.__name__), 'parameters for stressbalance solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('groundingline', '[%s %s]' % ('1x1', obj.groundingline.__class__.__name__), 'parameters for groundingline solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('hydrology', '[%s %s]' % ('1x1', obj.hydrology.__class__.__name__), 'parameters for hydrology solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('masstransport', '[%s %s]' % ('1x1', obj.masstransport.__class__.__name__), 'parameters for masstransport solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('thermal', '[%s %s]' % ('1x1', obj.thermal.__class__.__name__), 'parameters for thermal solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('steadystate', '[%s %s]' % ('1x1', obj.steadystate.__class__.__name__), 'parameters for steadystate solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('transient', '[%s %s]' % ('1x1', obj.transient.__class__.__name__), 'parameters for transient solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('levelset', '[%s %s]' % ('1x1', obj.levelset.__class__.__name__), 'parameters for moving boundaries (level-set method)'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('calving', '[%s %s]' % ('1x1', obj.calving.__class__.__name__), 'parameters for calving'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('frontalforcings', '[%s %s]' % ('1x1', obj.frontalforcings.__class__.__name__), 'parameters for frontalforcings'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('esa', '[%s %s]' % ('1x1', obj.esa.__class__.__name__), 'parameters for elastic adjustment solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('sampling', '[%s %s]' % ('1x1', obj.sampling.__class__.__name__), 'parameters for stochastic sampler'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('love', '[%s %s]' % ('1x1', obj.love.__class__.__name__), 'parameters for love solution'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('autodiff', '[%s %s]' % ('1x1', obj.autodiff.__class__.__name__), 'automatic differentiation parameters'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('inversion', '[%s %s]' % ('1x1', obj.inversion.__class__.__name__), 'parameters for inverse methods'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('qmu', '[%s %s]' % ('1x1', obj.qmu.__class__.__name__), 'Dakota properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('amr', '[%s %s]' % ('1x1', obj.amr.__class__.__name__), 'adaptive mesh refinement properties'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('outputdefinition', '[%s %s]' % ('1x1', obj.outputdefinition.__class__.__name__), 'output definition'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('results', '[%s %s]' % ('1x1', obj.results.__class__.__name__), 'model results'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('radaroverlay', '[%s %s]' % ('1x1', obj.radaroverlay.__class__.__name__), 'radar image for plot overlay'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('miscellaneous', '[%s %s]' % ('1x1', obj.miscellaneous.__class__.__name__), 'miscellaneous fields'))
+        s = '%s\n%s' % (s, '%19s: %-23s -- %s' % ('stochasticforcing', '[%s %s]' % ('1x1', obj.stochasticforcing.__class__.__name__), 'stochasticity applied to model forcings'))
         return s
-    #}}}
+    # }}}
 
     def properties(self):  #{{{
@@ -216,4 +216,5 @@
             'groundingline',
             'hydrology',
+            'debris',
             'masstransport',
             'thermal',
@@ -237,5 +238,5 @@
             'stochasticforcing'
         ]
-    #}}}
+    # }}}
 
     def setdefaultparameters(self, planet):  #{{{
@@ -264,4 +265,5 @@
         self.stressbalance = stressbalance()
         self.hydrology = hydrologyshreve()
+        self.debris = debris()
         self.masstransport = masstransport()
         self.thermal = thermal()
@@ -284,5 +286,5 @@
         self.private = private()
         self.stochasticforcing = stochasticforcing()
-    #}}}
+    # }}}
 
     def checkmessage(self, string):  #{{{
@@ -290,5 +292,5 @@
         self.private.isconsistent = False
         return self
-    #}}}
+    # }}}
     #@staticmethod
 
@@ -570,5 +572,5 @@
 
         return md2
-    #}}}
+    # }}}
 
     def extrude(md, *args):  #{{{
@@ -597,5 +599,5 @@
         #some checks on list of arguments
         if len(args) > 3 or len(args) < 1:
-            raise RuntimeError("extrude error message")
+            raise RuntimeError('extrude error message')
 
         #Extrude the mesh
@@ -603,5 +605,5 @@
             clist = args[0]
             if any(clist < 0) or any(clist > 1):
-                raise TypeError("extrusioncoefficients must be between 0 and 1")
+                raise TypeError('extrusioncoefficients must be between 0 and 1')
             clist.extend([0., 1.])
             clist.sort()
@@ -611,5 +613,5 @@
         elif len(args) == 2:  #one polynomial law
             if args[1] <= 0:
-                raise TypeError("extrusionexponent must be >= 0")
+                raise TypeError('extrusionexponent must be >= 0')
             numlayers = args[0]
             extrusionlist = (np.arange(0., float(numlayers - 1) + 1., 1.) / float(numlayers - 1))**args[1]
@@ -621,5 +623,5 @@
 
             if args[1] <= 0 or args[2] <= 0:
-                raise TypeError("lower and upper extrusionexponents must be >= 0")
+                raise TypeError('lower and upper extrusionexponents must be >= 0')
 
             lowerextrusionlist = (np.arange(0., 1. + 2. / float(numlayers - 1), 2. / float(numlayers - 1)))**lowerexp / 2.
@@ -628,9 +630,9 @@
 
         if numlayers < 2:
-            raise TypeError("number of layers should be at least 2")
+            raise TypeError('number of layers should be at least 2')
         if md.mesh.__class__.__name__ == 'mesh3dprisms':
-            raise TypeError("Cannot extrude a 3d mesh (extrude cannot be called more than once)")
-
-        #Initialize with the 2d mesh
+            raise TypeError('Cannot extrude a 3d mesh (extrude cannot be called more than once)')
+
+        #Initialize with 2d mesh
         mesh2d = md.mesh
         md.mesh = mesh3dprisms()
@@ -732,6 +734,8 @@
         md.frontalforcings.extrude(md)
         md.hydrology.extrude(md)
+        md.debris.extrude(md)
         md.solidearth.extrude(md)
         md.dsl.extrude(md)
+        md.stochasticforcing.extrude(md)
 
         #connectivity
@@ -757,5 +761,5 @@
 
         return md
-    #}}}
+    # }}}
 
     def collapse(md):  #{{{
@@ -773,5 +777,5 @@
         # Check that the model is really a 3d model
         if md.mesh.elementtype() != 'Penta':
-            raise Exception("collapse error message: only a 3d mesh can be collapsed")
+            raise Exception('collapse error message: only a 3d mesh can be collapsed')
 
         # Start with changing all the fields from the 3d mesh
@@ -850,4 +854,6 @@
         if not np.isnan(md.initialization.watercolumn).all():
             md.initialization.watercolumn = project2d(md, md.initialization.watercolumn, 1)
+        if not np.isnan(md.initialization.debris).all():
+            md.initialization.debris = project2d(md, md.initialization.debris, 1)
 
         # elementstype
@@ -902,4 +908,10 @@
         md.materials.rheology_n = project2d(md, md.materials.rheology_n, 1)
 
+        # dsl
+        if np.size(md.dsl.sea_surface_height_above_geoid) > 1:
+            md.dsl.sea_surface_height_above_geoid = project2d(md, md.dsl.sea_surface_height_above_geoid, 1)
+        if np.size(md.dsl.sea_water_pressure_at_sea_floor) > 1:
+            md.dsl.sea_water_pressure_at_sea_floor = project2d(md, md.dsl.sea_water_pressure_at_sea_floor, 1)
+
         # Damage
         if md.damage.isdamage:
@@ -957,5 +969,5 @@
                                     setattr(fieldr, solutionsubfield, project2d(md, subfield, 1))
 
-        # Initialize he 2d mesh
+        # Initialize 2d mesh
         mesh = mesh2d()
         mesh.x = md.mesh.x2d
@@ -985,3 +997,3 @@
 
         return md
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/modellist.m
===================================================================
--- /issm/trunk/src/m/classes/modellist.m	(revision 28012)
+++ /issm/trunk/src/m/classes/modellist.m	(revision 28013)
@@ -298,5 +298,5 @@
 %      BuildMultipleQueueingScript(executionpath,codepath)
 
-disp('building queueing script');
+disp('building queuing script');
 
 %First try and figure out if there is a special script for this particular cluster
@@ -363,12 +363,12 @@
 end% }}}
 function md=LaunchMultipleQueueJobgemini(cluster,name,executionpath)% {{{
-%LAUNCHMULTIPLEQUEUEJOBGEMINI - Launch multiple queueing script on Gemini cluster
+%LAUNCHMULTIPLEQUEUEJOBGEMINI - Launch multiple queuing script on Gemini cluster
 %
 %   Usage:
 %      LaunchMultipleQueueJobgemini(cluster,name,executionpath)
 
-%first, check we have the binary file and the queueing script
+%first, check we have the binary file and the queuing script
 if ~exist([ name '.queue'],'file'),
-	error('LaunchMultipleQueueJobgemini error message: queueing script issing, cannot go forward');
+	error('LaunchMultipleQueueJobgemini error message: queuing script issing, cannot go forward');
 end
 
@@ -378,5 +378,5 @@
 
 %upload both files to cluster
-disp('uploading input file,  queueing script and variables script');
+disp('uploading input file,  queuing script and variables script');
 eval(['!scp ModelList.tar.gz ' name '.queue '  cluster ':' executionpath]);
 
Index: /issm/trunk/src/m/classes/nodalvalue.py
===================================================================
--- /issm/trunk/src/m/classes/nodalvalue.py	(revision 28012)
+++ /issm/trunk/src/m/classes/nodalvalue.py	(revision 28013)
@@ -33,5 +33,5 @@
         self.model_string = options.getfieldvalue('model_string', '')
         self.node = options.getfieldvalue('node', '')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -42,9 +42,9 @@
         s += '{}\n'.format(fielddisplay(self, 'node', 'vertex index at which we retrieve the value'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -57,5 +57,5 @@
         md = checkfield(md, 'fieldname', 'self.node', 'field', self.node, 'values', range(md.mesh.numberofvertices))
         return md
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -64,3 +64,3 @@
         WriteData(fid, prefix, 'data', self.model_string, 'name', 'md.nodalvalue.model_enum', 'format', 'String')
         WriteData(fid, prefix, 'data', self.node, 'name', 'md.nodalvalue.node', 'format', 'Integer')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/offlinesolidearthsolution.py
===================================================================
--- /issm/trunk/src/m/classes/offlinesolidearthsolution.py	(revision 28012)
+++ /issm/trunk/src/m/classes/offlinesolidearthsolution.py	(revision 28013)
@@ -13,5 +13,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.displacementeast = None
         self.displacementnorth = None
@@ -23,7 +23,7 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '         units for time series is (yr)\n       external: offlinesolidearth solution\n'
         s += '{}\n'.format(fielddisplay(self, 'displacementeast', 'solid-Earth Eastwards bedrock displacement series (m)'))
@@ -33,14 +33,14 @@
 
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         self.displacementeast = []
         self.displacementnorth = []
         self.displacementup = []
         self.geoid = []
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if ('SealevelchangeAnalysis' not in analyses) or ((solution=='TransientSolution') and (md.solidearth.settings.isgrd==1)): 
             print('offlinesolidearthsolution checkconsistency error message: trying to run GRD patterns while supplying an offline solution for those patterns!')
@@ -50,7 +50,7 @@
         md = checkfield(md, 'fieldname', 'solidearth.external.displacementup', 'Inf', 1, 'timeseries', 1)
         md = checkfield(md, 'fieldname', 'solidearth.external.geoid', 'Inf', 1, 'timeseries', 1)
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
@@ -79,7 +79,7 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'displacementnorth', 'data', displacementnorth_rate,'format', 'DoubleMat', 'name', 'md.solidearth.external.displacementnorth', 'mattype', 1, 'scale', 1 / yts,'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts);
         WriteData(fid, prefix, 'object', self, 'fieldname', 'geoid', 'data', geoid_rate,'format', 'DoubleMat', 'name', 'md.solidearth.external.geoid', 'mattype', 1, 'scale', 1 / yts,'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts);
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/organizer.py
===================================================================
--- /issm/trunk/src/m/classes/organizer.py	(revision 28012)
+++ /issm/trunk/src/m/classes/organizer.py	(revision 28013)
@@ -72,5 +72,5 @@
                 raise TypeError("trunkprefix should not have any white space")
             self.trunkprefix = trunkprefix
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -83,5 +83,5 @@
             for step in self.steps:
                 s += "%s\n" % "   step  #%2i: '%s'", step['id'], step['string']
-    #}}}
+    # }}}
 
     def load(self, string):  # {{{
@@ -100,5 +100,5 @@
 
         return md
-    #}}}
+    # }}}
 
     def loadmodel(self, string):  # {{{
@@ -128,5 +128,5 @@
         else:
             raise IOError("Could not find '%s'" % path1)
-    #}}}
+    # }}}
 
     def perform(self, string):  # {{{
@@ -165,5 +165,5 @@
 
         return bool
-    #}}}
+    # }}}
 
     def savemodel(self, md, name='default'):  # {{{
@@ -188,3 +188,3 @@
     #save model
         savevars(name, 'md', md)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/outputdefinition.py
===================================================================
--- /issm/trunk/src/m/classes/outputdefinition.py	(revision 28012)
+++ /issm/trunk/src/m/classes/outputdefinition.py	(revision 28013)
@@ -15,5 +15,5 @@
     def __init__(self):  # {{{
         self.definitions = []
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -23,5 +23,5 @@
 
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -30,9 +30,9 @@
 
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/pairoptions.m
===================================================================
--- /issm/trunk/src/m/classes/pairoptions.m	(revision 28012)
+++ /issm/trunk/src/m/classes/pairoptions.m	(revision 28013)
@@ -5,5 +5,5 @@
 %      pairoptions=pairoptions('module',true,'solver',false);
 
-classdef pairoptions < handle
+classdef pairoptions < matlab.mixin.Copyable
 	properties (SetAccess = private,GetAccess = private) 
 		functionname = '';
Index: /issm/trunk/src/m/classes/pairoptions.py
===================================================================
--- /issm/trunk/src/m/classes/pairoptions.py	(revision 28012)
+++ /issm/trunk/src/m/classes/pairoptions.py	(revision 28013)
@@ -99,5 +99,5 @@
     # }}}
 
-    def displayunused(self): #{{{
+    def displayunused(self):  # {{{
         """DISPLAYUNUSED - display unused options
         """
Index: /issm/trunk/src/m/classes/plotoptions.py
===================================================================
--- /issm/trunk/src/m/classes/plotoptions.py	(revision 28012)
+++ /issm/trunk/src/m/classes/plotoptions.py	(revision 28013)
@@ -18,5 +18,5 @@
 
         self.buildlist(*arg)
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -32,5 +32,5 @@
             s += "    list: empty\n"
         return s
-    #}}}
+    # }}}
 
     def buildlist(self, *arg):  #{{{
@@ -125,3 +125,3 @@
                 if j + 1 > numberofplots:
                     print(("WARNING: too many instances of '%s' in options" % rawlist[i][0]))
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/private.py
===================================================================
--- /issm/trunk/src/m/classes/private.py	(revision 28012)
+++ /issm/trunk/src/m/classes/private.py	(revision 28013)
@@ -20,5 +20,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -30,9 +30,9 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'solution', 'type of solution launched'))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/qmu.py
===================================================================
--- /issm/trunk/src/m/classes/qmu.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmu.py	(revision 28013)
@@ -44,5 +44,5 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         s = '   qmu parameters:\n'
@@ -128,9 +128,9 @@
     def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.py	(revision 28013)
@@ -27,13 +27,13 @@
     '''
 
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.descriptor = ''
         self.pairs_per_variable = []
         self.abscissas = []
         self.counts = []
-    #}}}
-
-    @staticmethod
-    def histogram_bin_uncertain(*args): #{{{
+    # }}}
+
+    @staticmethod
+    def histogram_bin_uncertain(*args):  # {{{
         nargin = len(args)
 
@@ -69,5 +69,5 @@
 
     @staticmethod
-    def __repr__(hbu): #{{{
+    def __repr__(hbu):  # {{{
         s = ""
         for i in range(len(hbu)):
@@ -79,9 +79,9 @@
 
         return s
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
         return
-    #}}}
+    # }}}
 
     #virtual functions needed by qmu processing algorithms
@@ -89,5 +89,5 @@
 
     @staticmethod
-    def prop_desc(hbu, dstr): #{{{
+    def prop_desc(hbu, dstr):  # {{{
         desc = ['' for i in range(np.size(hbu))]
         for i in range(np.size(hbu)):
@@ -102,37 +102,37 @@
 
         return desc
-    #}}}
-
-    @staticmethod
-    def prop_mean(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_mean(hbu):  # {{{
         mean = np.zeros(np.size(hbu))
         for i in range(np.size(hbu)):
             mean[i] = hbu[i].mean
         return mean
-    #}}}
-
-    @staticmethod
-    def prop_stddev(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stddev(hbu):  # {{{
         stddev = np.zeros(np.size(hbu))
         for i in range(np.size(hbu)):
             stddev[i] = hbu[i].stddev
         return stddev
-    #}}}
-
-    @staticmethod
-    def prop_lower(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_lower(hbu):  # {{{
         lower = []
         return
-    #}}}
-
-    @staticmethod
-    def prop_upper(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_upper(hbu):  # {{{
         upper = []
         return upper
-    #}}}
+    # }}}
 
     #default
     @staticmethod
-    def prop_abscissas(hbu): #{{{
+    def prop_abscissas(hbu):  # {{{
         abscissas = []
         for i in range(len(hbu)):
@@ -140,8 +140,8 @@
         abscissas = allequal(abscissas, -np.inf)
         return abscissas
-    #}}}
-
-    @staticmethod
-    def prop_pairs_per_variable(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_pairs_per_variable(hbu):  # {{{
         pairs_per_variable = np.zeros((1, len(hbu)))
         for i in range(len(hbu)):
@@ -149,8 +149,8 @@
         abscissas = allequal(pairs_per_variable, -np.inf)
         return pairs_per_variable
-    #}}}
-
-    @staticmethod
-    def prop_counts(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_counts(hbu):  # {{{
         counts = []
         for i in range(len(hbu)):
@@ -158,37 +158,37 @@
         counts = allequal(counts, -np.inf)
         return counts
-    #}}}
-
-    @staticmethod
-    def prop_initpt(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_initpt(hbu):  # {{{
         initpt = []
         return initpt
-    #}}}
-
-    @staticmethod
-    def prop_initst(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_initst(hbu):  # {{{
         inist = []
         return inist
-    #}}}
-
-    @staticmethod
-    def prop_stype(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stype(hbu):  # {{{
         stype = []
         return stype
-    #}}}
-
-    @staticmethod
-    def prop_scale(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_scale(hbu):  # {{{
         scale = []
         return scale
-    #}}}
+    # }}}
 
     #new methods:
-    def isscaled(self): #{{{
+    def isscaled(self):  # {{{
         if strncmp(self.descriptor, 'scaled_', 7):
             return True
         else:
             return False
-    #}}}
+    # }}}
 
     @staticmethod
@@ -204,3 +204,3 @@
         if len(hbu) > 0:
             vlist_write(fidi, 'histogram_bin_uncertain', 'hbu', hbu)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/qmu/normal_uncertain.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/normal_uncertain.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmu/normal_uncertain.py	(revision 28013)
@@ -39,5 +39,5 @@
     """
 
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.descriptor = ''
         self.mean       = np.nan
@@ -45,8 +45,8 @@
         self.partition  = []
         self.nsteps     = 0
-    #}}}
-
-    @staticmethod
-    def normal_uncertain(*args): #{{{
+    # }}}
+
+    @staticmethod
+    def normal_uncertain(*args):  # {{{
         nargin = len(args)
 
@@ -91,7 +91,7 @@
 
         return [nuv] # Always return a list, so we have something akin to a MATLAB single row matrix
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         string = '   normal uncertain variable: '
         string = "%s\n%s" % (string, fielddisplay(self, 'descriptor', 'name tag'))
@@ -103,14 +103,14 @@
 
         return string
-    #}}}
-
-    def __len__(self): #{{{
+    # }}}
+
+    def __len__(self):  # {{{
         if type(self.mean) in [list, np.ndarray]:
             return len(self.mean)
         else:
             return 1
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
         md = checkfield(md, 'field', self.mean, 'fieldname', 'normal_uncertain.mean', 'NaN', 1, 'Inf', 1, '>=', 0)
         md = checkfield(md, 'field', self.stddev, 'fieldname', 'normal_uncertain.stddev', 'NaN', 1, 'Inf', 1, '>=', 0)
@@ -139,5 +139,5 @@
             if partmax > nmax:
                 raise Exception("normal_uncertain error message: partition vector's values cannot go over the number of vertices or elements")
-    #}}}
+    # }}}
 
     #virtual functions needed by qmu processing algorithms
@@ -145,5 +145,5 @@
 
     @staticmethod
-    def prop_desc(nuv, dstr): #{{{
+    def prop_desc(nuv, dstr):  # {{{
         desc = ['' for i in range(np.size(nuv))]
         for i in range(np.size(nuv)):
@@ -158,90 +158,90 @@
 
         return desc
-    #}}}
-
-    @staticmethod
-    def prop_mean(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_mean(nuv):  # {{{
         mean = np.zeros(np.size(nuv))
         for i in range(np.size(nuv)):
             mean[i] = nuv[i].mean
         return mean
-    #}}}
-
-    @staticmethod
-    def prop_stddev(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stddev(nuv):  # {{{
         stddev = np.zeros(np.size(nuv))
         for i in range(np.size(nuv)):
             stddev[i] = nuv[i].stddev
         return stddev
-    #}}}
-
-    @staticmethod
-    def prop_lower(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_lower(nuv):  # {{{
         lower = []
         return lower
-    #}}}
-
-    @staticmethod
-    def prop_upper(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_upper(nuv):  # {{{
         upper = []
         return upper
-    #}}}
+    # }}}
 
     #default
     @staticmethod
-    def prop_abscissas(hbu): #{{{
+    def prop_abscissas(hbu):  # {{{
         abscissas = []
         return abscissas
-    #}}}
-
-    @staticmethod
-    def prop_pairs_per_variable(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_pairs_per_variable(hbu):  # {{{
         pairs_per_variable = []
         return pairs_per_variable
-    #}}}
-
-    @staticmethod
-    def prop_counts(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_counts(hbu):  # {{{
         counts = []
         return counts
-    #}}}
-    @staticmethod
-    def prop_initpt(nuv): #{{{
+    # }}}
+    @staticmethod
+    def prop_initpt(nuv):  # {{{
         initpt = []
         return initpt
-    #}}}
-
-    @staticmethod
-    def prop_initst(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_initst(nuv):  # {{{
         inist = []
         return inist
-    #}}}
-
-    @staticmethod
-    def prop_stype(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stype(nuv):  # {{{
         stype = []
         return stype
-    #}}}
-
-    @staticmethod
-    def prop_scale(nuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_scale(nuv):  # {{{
         scale = []
         return scale
-    #}}}
+    # }}}
 
     #new methods:
-    def isdistributed(self): #{{{
+    def isdistributed(self):  # {{{
         if strncmp(self.descriptor, 'distributed_', 12):
             return True
         else:
             return False
-    #}}}
+    # }}}
     
-    def isscaled(self): #{{{
+    def isscaled(self):  # {{{
         if strncmp(self.descriptor, 'scaled_', 7):
             return True
         else:
             return False
-    #}}}
+    # }}}
 
     @staticmethod
@@ -257,4 +257,4 @@
         if len(nuv) > 0:
             vlist_write(fidi, 'normal_uncertain', 'nuv', nuv)
-    #}}}
-
+    # }}}
+
Index: /issm/trunk/src/m/classes/qmu/response_function.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/response_function.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmu/response_function.py	(revision 28013)
@@ -103,5 +103,5 @@
         return [rf] # Always return a list, so we have something akin to a MATLAB single row matrix
 
-    def __repr__(rf): #{{{
+    def __repr__(rf):  # {{{
         # display the object
         string = 'class "response_function" object = \n'
@@ -116,12 +116,12 @@
 
         return string
-    #}}}
-
-    def __len__(self): #{{{
+    # }}}
+
+    def __len__(self):  # {{{
         return max(len(self.respl), len(self.probl), len(self.rell), len(self.grell))
-    #}}}
-
-    @staticmethod
-    def prop_desc(rf, dstr): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_desc(rf, dstr):  # {{{
         desc = ['' for i in range(np.size(rf))]
         for i in range(np.size(rf)):
@@ -135,41 +135,41 @@
         desc = allempty(desc)
         return desc
-    #}}}
-
-    @staticmethod
-    def prop_stype(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stype(rf):  # {{{
         stype = []
         return stype
-    #}}}
-
-    @staticmethod
-    def prop_scale(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_scale(rf):  # {{{
         scale = []
         return scale
-    #}}}
-
-    @staticmethod
-    def prop_weight(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_weight(rf):  # {{{
         weight = []
         return weight
-    #}}}
-
-    @staticmethod
-    def prop_lower(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_lower(rf):  # {{{
         lower = []
         return lower
-    #}}}
-
-    @staticmethod
-    def prop_upper(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_upper(rf):  # {{{
         upper = []
         return upper
-    #}}}
-
-    @staticmethod
-    def prop_target(rf): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_target(rf):  # {{{
         target = []
         return target
-    #}}}
+    # }}}
 
     @staticmethod
@@ -191,10 +191,10 @@
 
     #new methods:
-    def isscaled(self): #{{{
+    def isscaled(self):  # {{{
         if strncmpi(self.descriptor, 'scaled_', 7):
             return True
         else:
             return False
-    #}}}
+    # }}}
 
     @staticmethod
Index: /issm/trunk/src/m/classes/qmu/uniform_uncertain.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/uniform_uncertain.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmu/uniform_uncertain.py	(revision 28013)
@@ -39,5 +39,5 @@
             )
     '''
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         self.descriptor = ''
         self.lower      = -np.inf
@@ -45,8 +45,8 @@
         self.partition  = []
         self.nsteps     = 0
-    #}}}
-
-    @staticmethod
-    def uniform_uncertain(*args): #{{{
+    # }}}
+
+    @staticmethod
+    def uniform_uncertain(*args):  # {{{
         nargin = len(args)
 
@@ -91,7 +91,7 @@
 
         return [uuv] # Always return a list, so we have something akin to a MATLAB single row matrix
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         string = '   uniform uncertain variable: '
         string = "%s\n%s" % (string, fielddisplay(self, 'descriptor', 'name tag'))
@@ -103,14 +103,14 @@
 
         return string
-    #}}}
-
-    def __len__(self): #{{{
+    # }}}
+
+    def __len__(self):  # {{{
         if type(self.lower) in [list, np.ndarray]:
             return len(self.lower)
         else:
             return 1
-    #}}}
+    # }}}
     
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         md = checkfield(md, 'field', self.upper, 'fieldname', 'uniform_uncertain.upper', 'NaN', 1, 'Inf', 1, '>', self.lower, 'numel', len(self.lower))
         md = checkfield(md, 'field', self.lower, 'fieldname', 'uniform_uncertain.upper', 'NaN', 1, 'Inf', 1, '<', self.upper, 'numel', len(self.upper))
@@ -140,5 +140,5 @@
             if partmax > nmax:
                 raise Exception("uniform_uncertain error message: partition vector's values cannot go over the number of vertices or elements")
-    #}}}
+    # }}}
 
     #virtual functions needed by qmu processing algorithms:
@@ -146,5 +146,5 @@
 
     @staticmethod
-    def prop_desc(uuv, dstr): #{{{
+    def prop_desc(uuv, dstr):  # {{{
         desc = ['' for i in range(np.size(uuv))]
         for i in range(np.size(uuv)):
@@ -159,20 +159,20 @@
 
         return desc
-    #}}}
-
-    @staticmethod
-    def prop_stddev(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stddev(uuv):  # {{{
         stddev = []
         return stddev
-    #}}}
-
-    @staticmethod
-    def prop_mean(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_mean(uuv):  # {{{
         mean = []
         return mean
-    #}}}
-
-    @staticmethod
-    def prop_lower(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_lower(uuv):  # {{{
         lower = np.zeros(np.size(uuv))
         for i in range(np.size(uuv)):
@@ -182,8 +182,8 @@
 
         return lower
-    #}}}
-
-    @staticmethod
-    def prop_upper(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_upper(uuv):  # {{{
         upper = np.zeros(np.size(uuv))
         for i in range(np.size(uuv)):
@@ -193,58 +193,58 @@
 
         return upper
-    #}}}
-
-    @staticmethod
-    def prop_abscissas(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_abscissas(hbu):  # {{{
         abscissas = []
         return abscissas
-    #}}}
-
-    @staticmethod
-    def prop_pairs_per_variable(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_pairs_per_variable(hbu):  # {{{
         pairs_per_variable = []
         return pairs_per_variable
-    #}}}
-
-    @staticmethod
-    def prop_counts(hbu): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_counts(hbu):  # {{{
         counts = []
         return counts
-    #}}}
-
-    @staticmethod
-    def prop_initpt(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_initpt(uuv):  # {{{
         initpt = []
         return initpt
-    #}}}
-
-    @staticmethod
-    def prop_initst(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_initst(uuv):  # {{{
         initst = []
         return initst
-    #}}}
-
-    @staticmethod
-    def prop_stype(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_stype(uuv):  # {{{
         stype = []
         return stype
-    #}}}
-
-    @staticmethod
-    def prop_scale(uuv): #{{{
+    # }}}
+
+    @staticmethod
+    def prop_scale(uuv):  # {{{
         scale = []
         return scale
-    #}}}
+    # }}}
 
     #new methods:
-    def isscaled(self): #{{{
+    def isscaled(self):  # {{{
         if strncmp(self.descriptor, 'scaled_', 7):
             return True
         else:
             return False
-    #}}}
-
-    @staticmethod
-    def dakota_write(fidi, dvar): #{{{
+    # }}}
+
+    @staticmethod
+    def dakota_write(fidi, dvar):  # {{{
         # possible namespace pollution, the above import seems not to work
         from vlist_write import vlist_write
@@ -257,3 +257,3 @@
         if len(uuv) > 0:
             vlist_write(fidi, 'uniform_uncertain', 'uuv', uuv)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/qmustatistics.py
===================================================================
--- /issm/trunk/src/m/classes/qmustatistics.py	(revision 28012)
+++ /issm/trunk/src/m/classes/qmustatistics.py	(revision 28013)
@@ -44,5 +44,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -56,5 +56,5 @@
             s += '{}\n'.format(self.method[i])
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -63,5 +63,5 @@
         self.ndirectories = 50  # Number of output directories; should be < numcpus
         return self
-    #}}}
+    # }}}
 
     @staticmethod
@@ -91,12 +91,12 @@
                 if m['steps'][s] > md.mesh.numberofvertices:
                     raise Exception('qmustatistics consistency check error: qmu.statistics.method[{}][\'steps\'][{}] should be < md.mesh.numberofvertices!'.format(i, s))
-    #}}}
+    # }}}
 
-    def defaultoutputs(self, md): #{{{
+    def defaultoutputs(self, md):  # {{{
         outputs = []
         return outputs
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         if self.method[0]['name'] == 'None':
             WriteData(fid, prefix, 'name', 'md.qmu.statistics', 'data', 0, 'format', 'Boolean')
@@ -124,11 +124,11 @@
                 else:
                     raise Exception('qmustatistics marshall error message: unknown type ''{}'' for qmu.statistics.method[{}]'.format(m['name'], i))
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         return self
-    #}}}
+    # }}}
 
-    def addmethod(self, *args): #{{{
+    def addmethod(self, *args):  # {{{
         """ADDMETHOD - Add new, empty method or passed dict to self.method
         """
@@ -140,3 +140,3 @@
         else:
             raise Exception('Number of args should be 0 (appends empty dict to methods member) or 1 (appends passed dict to methods member)')
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/radaroverlay.py
===================================================================
--- /issm/trunk/src/m/classes/radaroverlay.py	(revision 28012)
+++ /issm/trunk/src/m/classes/radaroverlay.py	(revision 28013)
@@ -18,5 +18,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -26,7 +26,7 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'y', 'corresponding y coordinates [m]'))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/regionaloutput.py
===================================================================
--- /issm/trunk/src/m/classes/regionaloutput.py	(revision 28012)
+++ /issm/trunk/src/m/classes/regionaloutput.py	(revision 28013)
@@ -50,5 +50,5 @@
         #     raise IOError('regionaloutput error message: ''mask'' field or ''maskexpstring'' and ''model'' fields should be defined!')
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -60,14 +60,14 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'maskexpstring', 'name of Argus file that can be passed in to define the regional mask'))
         return string
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
         self.mask = project3d(md, 'vector', self.mask, 'type', 'node')
         return self
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def setmaskfromexp(self, md):  # {{{
Index: /issm/trunk/src/m/classes/results.py
===================================================================
--- /issm/trunk/src/m/classes/results.py	(revision 28012)
+++ /issm/trunk/src/m/classes/results.py	(revision 28013)
@@ -15,5 +15,5 @@
     def __init__(self):  #{{{
         pass
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -30,19 +30,19 @@
 
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
         #do nothing
         return self
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses):  #{{{
-        return md
-    #}}}
-
-    def marshall(self, prefix, md, fid):  #{{{
-        pass
-    #}}}
-#}}}
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  #{{{
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  #{{{
+        pass
+    # }}}
+# }}}
 
 
@@ -59,5 +59,5 @@
     def __init__(self):  #{{{
         pass
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -71,23 +71,23 @@
             s += '\n'
         return s
-    #}}}
+    # }}}
 
     def __len__(self):  #{{{
         return len(self.__dict__.keys())
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
         #do nothing
         return self
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses):  #{{{
-        return md
-    #}}}
-
-    def marshall(self, prefix, md, fid):  #{{{
-        pass
-    #}}}
-#}}}
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  #{{{
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  #{{{
+        pass
+    # }}}
+# }}}
 
 
@@ -117,9 +117,9 @@
         else:
             self.steps = [solutionstep()]
-    #}}}
+    # }}}
 
     def __deepcopy__(self, memo):  #{{{
         return solution(deepcopy(self.steps, memo))
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -136,9 +136,9 @@
 
         return s
-    #}}}
+    # }}}
 
     def __len__(self):  #{{{
         return len(self.steps)
-    #}}}
+    # }}}
 
     def __getattr__(self, key):  #{{{
@@ -151,5 +151,5 @@
         # else:
         #     raise Exception('<results>.<solution> error: Currently, can only get a field if we are not working with a transient solution.')
-    #}}}
+    # }}}
 
     def __getitem__(self, index):  #{{{
@@ -161,18 +161,18 @@
         else:
             raise Exception('<results>.<solution>: either request a specific result by index or make sure that there is only a single result for this solution (cannot be a transient solution)')
-    #}}}
-
-    def setdefaultparameters(self):  #{{{
-        return self
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses):  #{{{
-        return md
-    #}}}
-
-    def marshall(self, prefix, md, fid):  #{{{
-        pass
-    #}}}
-#}}}
+    # }}}
+
+    def setdefaultparameters(self):  #{{{
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  #{{{
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  #{{{
+        pass
+    # }}}
+# }}}
 
 
@@ -186,5 +186,5 @@
     def __init__(self, *args):  #{{{
         pass
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -195,9 +195,9 @@
 
         return s
-    #}}}
+    # }}}
 
     def getfieldnames(self):  #{{{
         return self.__dict__.keys()
-    #}}}
+    # }}}
 
     def getlongestfieldname(self):  #{{{
@@ -209,16 +209,16 @@
 
         return maxlength
-    #}}}
-
-    def setdefaultparameters(self):  #{{{
-        return self
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses):  #{{{
-        return md
-    #}}}
-
-    def marshall(self, prefix, md, fid):  #{{{
-        pass
-    #}}}
-#}}}
+    # }}}
+
+    def setdefaultparameters(self):  #{{{
+        return self
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  #{{{
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):  #{{{
+        pass
+    # }}}
+# }}}
Index: /issm/trunk/src/m/classes/rifts.py
===================================================================
--- /issm/trunk/src/m/classes/rifts.py	(revision 28012)
+++ /issm/trunk/src/m/classes/rifts.py	(revision 28013)
@@ -22,5 +22,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -30,9 +30,9 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'riftproperties', ''))
         return string
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/rotational.py
===================================================================
--- /issm/trunk/src/m/classes/rotational.py	(revision 28012)
+++ /issm/trunk/src/m/classes/rotational.py	(revision 28013)
@@ -21,5 +21,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -29,5 +29,5 @@
         s += '{}\n'.format(fielddisplay(self, 'angularvelocity', 'mean rotational velocity of earth [per second]'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -39,5 +39,5 @@
         self.angularvelocity = 7.2921 * pow(10, -5) # [s^-1]
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -48,9 +48,9 @@
         md = checkfield(md, 'fieldname', 'solidearth.rotational.angularvelocity', 'NaN', 1, 'Inf', 1)
         return md
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  #{{{
         return []
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -58,7 +58,7 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'polarmoi', 'name', 'md.solidearth.rotational.polarmoi', 'format', 'Double')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'angularvelocity', 'name', 'md.solidearth.rotational.angularvelocity', 'format', 'Double')
-    #}}}
+    # }}}
 
     def extrude(self, md):  #{{{
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/sampling.m
===================================================================
--- /issm/trunk/src/m/classes/sampling.m	(revision 28012)
+++ /issm/trunk/src/m/classes/sampling.m	(revision 28013)
@@ -75,5 +75,5 @@
 			if(md.sampling.robin)
 				md = checkfield(md,'fieldname','sampling.beta','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1],'>',0);
-            end
+			end
 			md = checkfield(md,'fieldname','sampling.alpha','NaN',1,'Inf',1,'numel',1,'>',0);
 			md = checkfield(md,'fieldname','sampling.seed','NaN',1,'Inf',1,'numel',1);
Index: /issm/trunk/src/m/classes/sampling.py
===================================================================
--- /issm/trunk/src/m/classes/sampling.py	(revision 28012)
+++ /issm/trunk/src/m/classes/sampling.py	(revision 28013)
@@ -10,5 +10,5 @@
 
 class sampling(object):
-    """SAMPLING class definition
+    """sampling class definition
 
     Usage:
@@ -16,9 +16,9 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         self.kappa = np.nan
         self.tau = 0
         self.beta = np.nan
-        self.phi = 0
+        self.phi = np.nan
         self.alpha = 0
         self.robin = 0
@@ -30,7 +30,7 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   Sampling parameters::\n'
         s += '      Parameters of PDE operator (kappa^2 I-Laplacian)^(alpha/2)(tau):\n'
@@ -51,18 +51,13 @@
 
         return s
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
-        # Scaling coefficient
-        self.tau = 1
+    def setdefaultparameters(self):  # {{{
 
         # Apply Robin boundary conditions
         self.robin = 0
 
-        # Temporal correlation factor
-        self.phi = 0
-
         # Exponent in fraction SPDE (default: 2, biLaplacian covariance operator)
-        self.alpha = 2
+        self.alpha = 2 # Default
 
         # Seed for pseudorandom number generator (default: -1, for random seed)
@@ -73,11 +68,11 @@
 
         return self
-    #}}}
+    # }}}
 
-    def defaultoutputs(self, md): #{{{
+    def defaultoutputs(self, md):  # {{{
         return []
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if ('SamplingAnalysis' not in analyses):
             return md
@@ -89,5 +84,4 @@
             md = checkfield(md, 'fieldname', 'sampling.beta', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices], '>', 0)
         end
-        md = checkfield(md, 'fieldname', 'sampling.phi', 'NaN', 1, 'Inf', 1, 'numel', 1, '>=', 0)
         md = checkfield(md, 'fieldname', 'sampling.alpha', 'NaN', 1, 'Inf', 1, 'numel', 1, '>', 0)
         md = checkfield(md, 'fieldname', 'sampling.seed', 'NaN', 1, 'Inf', 1, 'numel', 1)
@@ -95,11 +89,11 @@
 
         return md
-    #}}}
+    # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'object', self, 'fieldname', 'kappa', 'format', 'DoubleMat', 'mattype', 1)
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'tau', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'tau', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'beta', 'format', 'DoubleMat', 'mattype', 1)
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'phi', 'format', 'Double')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'phi', 'format', 'DoubleMat', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'alpha', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'robin', 'format', 'Boolean')
@@ -113,7 +107,7 @@
             outputs = outputscopy
         WriteData(fid, prefix, 'data', outputs, 'name', 'md.sampling.requested_outputs', 'format', 'StringArray')
-    #}}}
+    # }}}
 
-    def setparameters(self, md, lc, sigma): #{{{
+    def setparameters(self, md, lc, sigma):  # {{{
         nu = self.alpha - 1
         KAPPA = pow((8 * nu), 0.5) / lc
@@ -123,3 +117,3 @@
 
         return md
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/sealevelmodel.py
===================================================================
--- /issm/trunk/src/m/classes/sealevelmodel.py	(revision 28012)
+++ /issm/trunk/src/m/classes/sealevelmodel.py	(revision 28013)
@@ -163,5 +163,5 @@
     # }}}
 
-    def ncaps(self): #{{{
+    def ncaps(self):  # {{{
         return len(self.icecaps)
     # }}}
@@ -188,5 +188,5 @@
     # }}}
 
-    def intersections2d(self, *args): #{{{
+    def intersections2d(self, *args):  # {{{
         options = pairoptions(*args)
         force = options.getfieldvalue('force', 0)
@@ -241,5 +241,5 @@
             self.eltransitions.append(meshintersect3d(xe, ye, ze, xei, yei, zei, 'force', force))
 
-            self.earth.solidearth.transfercount[self.transitions[i]] = self.earth.solidearth/transfercount[self.transitions[i]] + 1
+            self.earth.solidearth.transfercount[self.transitions[i]] = self.earth.solidearth.transfercount[self.transitions[i]] + 1
 
         for i in range(len(self.icecaps)):
Index: /issm/trunk/src/m/classes/slr.py
===================================================================
--- /issm/trunk/src/m/classes/slr.py	(revision 28012)
+++ /issm/trunk/src/m/classes/slr.py	(revision 28013)
@@ -47,5 +47,5 @@
         #set defaults
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -116,5 +116,5 @@
         self.planetradius = planetradius('earth')
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/solidearthsettings.m
===================================================================
--- /issm/trunk/src/m/classes/solidearthsettings.m	(revision 28012)
+++ /issm/trunk/src/m/classes/solidearthsettings.m	(revision 28013)
@@ -88,5 +88,5 @@
 			self.cross_section_shape=1; %square as default (see iedge in GiaDeflectionCorex)
 
-			%no grd model by default:
+			%grd model by default
 			self.grdmodel=1;
 
@@ -171,5 +171,4 @@
 			WriteData(fid,prefix,'object',self,'fieldname','viscous','name','md.solidearth.settings.viscous','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','rotation','name','md.solidearth.settings.rotation','format','Boolean');
-			WriteData(fid,prefix,'object',self,'fieldname','rotation','name','md.solidearth.settings.satellitegravity','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','grdocean','name','md.solidearth.settings.grdocean','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','ocean_area_scaling','name','md.solidearth.settings.ocean_area_scaling','format','Boolean');
Index: /issm/trunk/src/m/classes/solidearthsettings.py
===================================================================
--- /issm/trunk/src/m/classes/solidearthsettings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/solidearthsettings.py	(revision 28013)
@@ -7,5 +7,5 @@
 
 class solidearthsettings(object):
-    """SOLIDEARTHSETTINGS class definition
+    """solidearthsettings class definition
 
     Usage:
@@ -93,6 +93,6 @@
         self.cross_section_shape = 1 # Square as default (see iedge in GiaDeflectionCorex)
 
-        # No GRD model by default
-        self.grdmodel = 0
+        # GRD model by default
+        self.grdmodel = 1
     # }}}
 
Index: /issm/trunk/src/m/classes/spatiallinearbasalforcings.m
===================================================================
--- /issm/trunk/src/m/classes/spatiallinearbasalforcings.m	(revision 28012)
+++ /issm/trunk/src/m/classes/spatiallinearbasalforcings.m	(revision 28013)
@@ -21,13 +21,15 @@
 				case 1
 					lb=varargin{1};
-					if strcmpi(class(lb),'linearbasalforcings'),
+					if isa(lb,'linearbasalforcings');
 						nvertices=length(lb.groundedice_melting_rate);
 						self.groundedice_melting_rate=lb.groundedice_melting_rate;
 						self.geothermalflux=lb.geothermalflux;
-						self.deepwater_elevation=lb.deepwater_elevation*ones(nvertices,1);
-						self.deepwater_melting_rate=lb.deepwater_melting_rate*ones(nvertices,1);
-						self.upperwater_melting_rate=lb.upperwater_melting_rate*ones(nvertices,1);
-						self.upperwater_elevation=lb.upperwater_elevation*ones(nvertices,1);
-						self.perturbation_melting_rate=lb.perturbation_melting_rate*ones(nvertices,1);
+						self.deepwater_elevation       = lb.deepwater_elevation*ones(nvertices,1);
+						self.deepwater_melting_rate    = lb.deepwater_melting_rate*ones(nvertices,1);
+						self.upperwater_melting_rate   = lb.upperwater_melting_rate*ones(nvertices,1);
+						self.upperwater_elevation      = lb.upperwater_elevation*ones(nvertices,1);
+						if ~isnan(lb.perturbation_melting_rate)
+							self.perturbation_melting_rate = lb.perturbation_melting_rate*ones(nvertices,1);
+						end
 					else 
 						self=structtoobj(spatiallinearbasalforcings(),varargin{1});
Index: /issm/trunk/src/m/classes/spatiallinearbasalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/spatiallinearbasalforcings.py	(revision 28012)
+++ /issm/trunk/src/m/classes/spatiallinearbasalforcings.py	(revision 28013)
@@ -15,5 +15,5 @@
     """
 
-    def __init__(self, *args): #{{{
+    def __init__(self, *args):  # {{{
         nargs = len(args)
         if nargs == 0:
@@ -43,7 +43,7 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
-    def __repr__(self): #{{{
+    def __repr__(self):  # {{{
         s = '   spatial linear basal forcings parameters:\n'
         s += '{}\n'.format(fielddisplay(self, 'groundedice_melting_rate', 'basal melting rate (positive if melting) [m/yr]'))
@@ -55,7 +55,7 @@
         s += '{}\n'.format(fielddisplay(self, 'geothermalflux', 'geothermal heat flux [W/m^2]'))
         return s
-    #}}}
+    # }}}
 
-    def extrude(self, md): #{{{
+    def extrude(self, md):  # {{{
         self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1) 
         self.deepwater_melting_rate = project3d(md, 'vector', self.deepwater_melting_rate, 'type', 'node', 'layer', 1) 
@@ -66,18 +66,18 @@
         self.perturbation_melting_rate = project3d(md, 'vector', self.upperwater_melting_rate, 'type', 'node', 'layer', 1) 
         return self
-    #}}}
+    # }}}
 
-    def initialize(self, md): #{{{
+    def initialize(self, md):  # {{{
         if np.all(np.isnan(self.groundedice_melting_rate)):
             self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices))
             print('      no basalforcings.groundedice_melting_rate specified: values set as zero')
         return self
-    #}}}
+    # }}}
 
-    def setdefaultparameters(self): #{{{
+    def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
-    def checkconsistency(self, md, solution, analyses): #{{{
+    def checkconsistency(self, md, solution, analyses):  # {{{
         if not np.all(np.isnan(self.perturbation_melting_rate)):
             md = checkfield(md, 'fieldname', 'basalforcings.perturbation_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -106,5 +106,5 @@
     # }}}
 
-    def marshall(self, prefix, md, fid): #{{{
+    def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
@@ -117,3 +117,3 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_elevation', 'format', 'DoubleMat', 'name', 'md.basalforcings.upperwater_elevation', 'mattype', 1)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'perturbation_melting_rate', 'format', 'DoubleMat', 'name', 'md.basalforcings.perturbation_melting_rate', 'scale', 1. / yts, 'mattype', 1)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/steadystate.py
===================================================================
--- /issm/trunk/src/m/classes/steadystate.py	(revision 28012)
+++ /issm/trunk/src/m/classes/steadystate.py	(revision 28013)
@@ -21,5 +21,5 @@
         self.setdefaultparameters()
 
-    #}}}
+    # }}}
     def __repr__(self):  # {{{
         string = '   steadystate solution parameters:'
@@ -28,10 +28,10 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional requested outputs'))
         return string
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
         return md.stressbalance.defaultoutputs(md) + md.thermal.defaultoutputs(md)
 
-    #}}}
+    # }}}
     def setdefaultparameters(self):  # {{{
         #maximum of steady state iterations
@@ -42,5 +42,5 @@
         self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/stochasticforcing.m
===================================================================
--- /issm/trunk/src/m/classes/stochasticforcing.m	(revision 28012)
+++ /issm/trunk/src/m/classes/stochasticforcing.m	(revision 28013)
@@ -11,4 +11,5 @@
 		default_id				= NaN;
 		covariance				= NaN;
+		timecovariance			= NaN;
 		stochastictimestep   = 0;
 		randomflag				= 1;
@@ -24,5 +25,5 @@
 		end % }}}
 		function self = extrude(self,md) % {{{
-			%Nothing for now
+			self.default_id = project3d(md,'vector',self.default_id,'type','element');
 		end % }}}
 		function self = setdefaultparameters(self) % {{{
@@ -40,9 +41,26 @@
 			end
 
-			%Check that covariance matrix is positive definite
-			try
-				chol(self.covariance);
-			catch
-				error('md.stochasticforcing.covariance is not positive definite');
+
+			if(numel(size(self.covariance)==3))
+				numtcovmat = numel(self.covariance(1,1,:)); %number of covariance matrices in time
+				lsCovmats = {};
+				for ii=[1:numtcovmat] %loop over 3rd dimension
+					lsCovmats{ii} = self.covariance(:,:,ii);
+      			%Check that covariance matrix is positive definite
+      			try
+      				chol(self.covariance(:,:,ii));
+      			catch
+      				error('an entry in md.stochasticforcing.covariance is not positive definite');
+					end
+				end
+			elseif(numel(size(self.covariance)==2))
+				numtcovmat = 1; %number of covariance matrices in time
+				lsCovmats = {self.covariance};
+   			%Check that covariance matrix is positive definite
+   			try
+   				chol(self.covariance);
+   			catch
+   				error('md.stochasticforcing.covariance is not positive definite');
+   			end
 			end
 
@@ -50,4 +68,10 @@
 			checkdefaults	= false; %need to check defaults only if one of the fields does not have its own dimensionality
 			structstoch		= structstochforcing();
+			% Check if hydrolgyarmapw is used
+			if(strcmp(class(md.hydrology),'hydrologyarmapw') && md.transient.ishydrology==1)
+				ispwHydroarma = 1;
+			else
+				ispwHydroarma = 0;
+			end
 			for field=self.fields
 				%Checking agreement of classes
@@ -95,30 +119,38 @@
 				end
 				if(contains(field,'WaterPressure'))
-					mdname = structstoch.mdnames(find(strcmp(structstoch.fields,char(field))));
-					if~(isequal(class(md.friction),char(mdname)))
-                  error('md.friction does not agree with stochasticforcing field %s', char(field));
-               end
+
+					mdnames = structstoch.mdnames(find(strcmp(structstoch.fields,char(field))));
+					found   = 0;
+					for(ii=[1:numel(mdnames)])
+						if(isequal(class(md.friction),char(mdnames{ii}))) found=1; end
+					end
+					if(found==0)
+						error('md.friction does not agree with stochasticforcing field %s', char(field));
+					end
 					if(strcmp(class(md.friction),'friction') || strcmp(class(md.friction),'frictionschoof') || strcmp(class(md.friction),'frictioncoulomb'))
-                  if(md.friction.coupling~=0 && md.friction.coupling~=1 && md.friction.coupling~=2)
-                     error('stochasticforcing field %s is only implemented for cases md.friction.coupling 0 or 1 or 2', char(field));
-                  end
+						if(md.friction.coupling~=0 && md.friction.coupling~=1 && md.friction.coupling~=2)
+							error('stochasticforcing field %s is only implemented for cases md.friction.coupling 0 or 1 or 2', char(field));
+						end
 					end
 					if(strcmp(class(md.friction),'friction'))
-                  if(any(md.friction.q==0))
-                     error('stochasticforcing field %s requires non-zero q exponent',char(field));
-                  end
-					end
-            end
+						if(any(md.friction.q==0))
+							error('stochasticforcing field %s requires non-zero q exponent',char(field));
+						end
+					end
+				end
 				%Checking for specific dimensions
-				if ~(strcmp(field,'SMBarma') || strcmp(field,'FrontalForcingsRignotarma') || strcmp(field,'BasalforcingsDeepwaterMeltingRatearma'))
+				if ~(strcmp(field,'SMBarma') || strcmp(field,'FrontalForcingsRignotarma') || strcmp(field,'BasalforcingsDeepwaterMeltingRatearma') || strcmp(field,'FrontalForcingsSubglacialDischargearma') || ((strcmp(field,'FrictionWaterPressure') && ispwHydroarma)))
 					checkdefaults = true; %field with non-specific dimensionality
 				end
 			end
-
 			%Retrieve all the field dimensionalities
 			dimensions = self.defaultdimension*ones(1,num_fields);
 			indSMBarma = -1; %about to check for index of SMBarma
 			indTFarma  = -1; %about to check for index of FrontalForcingsRignotarma
-			indBDWarma  = -1; %about to check for index of BasalforcingsDeepwaterMeltingRatearma
+			indSdarma  = -1; %about to check for index of SubglacialDischargearma
+			indBDWarma = -1; %about to check for index of BasalforcingsDeepwaterMeltingRatearma
+			indPwarma  = -1; %about to check for index of hydrologyarmapw
+			
+
 			if any(contains(self.fields,'SMBarma'))
 				indSMBarma = find(contains(self.fields,'SMBarma')); %index of SMBarma, now check for consistency with other arma timesteps 
@@ -135,4 +167,11 @@
 				end
 			end
+			if any(contains(self.fields,'FrontalForcingsSubglacialDischargearma'))
+				indSdarma	= find(contains(self.fields,'FrontalForcingsSubglacialDischargearma')); %index of Sdarma, now check for consistency with other arma timesteps 
+				dimensions(indSdarma) = md.frontalforcings.num_basins;
+				if(md.frontalforcings.sd_arma_timestep<self.stochastictimestep)
+					error('FrontalForcingsSubglacialDischargearma cannot have a timestep shorter than stochastictimestep');
+				end
+			end
 			if any(contains(self.fields,'BasalforcingsDeepwaterMeltingRatearma'))
 				indBDWarma	= find(contains(self.fields,'BasalforcingsDeepwaterMeltingRatearma')); %index of BDWarma, now check for consistency with other arma timesteps 
@@ -142,35 +181,133 @@
 				end
 			end
+			if (any(contains(self.fields,'FrictionWaterPressure')) && ispwHydroarma)
+				indPwarma	= find(contains(self.fields,'FrictionWaterPressure')); %index of Pwarma, now check for consistency with other arma timesteps 
+				dimensions(indPwarma) = md.hydrology.num_basins;
+				if(md.hydrology.arma_timestep<self.stochastictimestep)
+					error('hydrologyarmapw cannot have a timestep shorter than stochastictimestep');
+				end
+			end
 			size_tot = sum(dimensions);
 
-			if(indSMBarma~=-1 && indTFarma~=-1) %both ARMA models are used: check ARMA time step consistency
-				if(md.smb.arma_timestep~=md.frontalforcings.arma_timestep)
-					crossentries = reshape(self.covariance(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma))),1,[]);
-					if any(crossentries~=0)
-						error('SMBarma and FrontalForcingsRignotarma have different arma_timestep and non-zero covariance');
-					end
-				end
-			end
-			if(indSMBarma~=-1 && indBDWarma~=-1) %both ARMA models are used: check ARMA time step consistency
-				if(md.smb.arma_timestep~=md.basalforcings.arma_timestep)
-					crossentries = reshape(self.covariance(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma))),1,[]);
-					if any(crossentries~=0)
-						error('SMBarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance');
-					end
-				end
-			end
-			if(indTFarma~=-1 && indBDWarma~=-1) %both ARMA models are used: check ARMA time step consistency
-				if(md.frontalforcings.arma_timestep~=md.basalforcings.arma_timestep)
-					crossentries = reshape(self.covariance(1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma)),1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma))),1,[]);
-					if any(crossentries~=0)
-						error('FrontalForcingsRignotarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance');
-					end
-				end
-			end
+			%%% Check consistency between ARMA models %%%
+			if(indBDWarma~=-1)
+				if(indPwarma~=-1)
+					if(md.basalforcings.arma_timestep~=md.hydrology.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma)),1+sum(dimensions(1:indPwarma-1)):sum(dimensions(1:indPwarma))),1,[]);
+							if any(crossentries~=0)
+								error('BasalforcingsDeepwaterMeltingRatearma and hydrologyarmapw have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indSdarma~=-1)
+					if(md.frontalforcings.sd_arma_timestep~=md.basalforcings.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSdarma-1)):sum(dimensions(1:indSdarma)),1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma))),1,[]);
+							if any(crossentries~=0)
+								error('FrontalForcingsSubglacialDischargearma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indSMBarma~=-1)
+					if(md.smb.arma_timestep~=md.basalforcings.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma))),1,[]);
+							if any(crossentries~=0)
+								error('SMBarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indTFarma~=-1)
+					if(md.frontalforcings.arma_timestep~=md.basalforcings.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma)),1+sum(dimensions(1:indBDWarma-1)):sum(dimensions(1:indBDWarma))),1,[]);
+							if any(crossentries~=0)
+								error('FrontalForcingsRignotarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				end
+			elseif(indPwarma~=-1)
+				if(indSdarma~=-1)
+					if(md.frontalforcings.sd_arma_timestep~=md.hydrology.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSdarma-1)):sum(dimensions(1:indSdarma)),1+sum(dimensions(1:indPwarma-1)):sum(dimensions(1:indPwarma))),1,[]);
+							if any(crossentries~=0)
+								error('FrontalForcingsSubglacialDischargearma and hydrologyarmapw have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indSMBarma~=-1)
+					if(md.smb.arma_timestep~=md.hydrology.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indPwarma-1)):sum(dimensions(1:indPwarma))),1,[]);
+							if any(crossentries~=0)
+								error('SMBarma and hydrologyarmapw have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indTFarma~=-1)
+					if(md.frontalforcings.arma_timestep~=md.hydrology.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma)),1+sum(dimensions(1:indPwarma-1)):sum(dimensions(1:indPwarma))),1,[]);
+							if any(crossentries~=0)
+								error('FrontalForcingsRignotarma and hydrologyarmapw have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				end
+			elseif(indSdarma~=-1)
+				if(indSMBarma~=-1)
+					if(md.smb.arma_timestep~=md.frontalforcings.sd_arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indSdarma-1)):sum(dimensions(1:indSdarma))),1,[]);
+							if any(crossentries~=0)
+								error('SMBarma and FrontalForcingsSubglacialDischargearma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				elseif(indTFarma~=-1)
+					if(md.frontalforcings.sd_arma_timestep~=md.frontalforcings.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSdarma-1)):sum(dimensions(1:indSdarma)),1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma))),1,[]);
+							if any(crossentries~=0)
+								error('FrontalForcingsRignotarma and FrontalForcingsSubglacialDischargearma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				end
+			elseif(indSMBarma~=-1)
+				if(indTFarma~=-1)
+					if(md.smb.arma_timestep~=md.frontalforcings.arma_timestep)
+						for(ii=[1:numel(lsCovmats)])
+							covm = lsCovmats{ii};
+							crossentries = reshape(covm(1+sum(dimensions(1:indSMBarma-1)):sum(dimensions(1:indSMBarma)),1+sum(dimensions(1:indTFarma-1)):sum(dimensions(1:indTFarma))),1,[]);
+							if any(crossentries~=0)
+								error('SMBarma and FrontalForcingsRignotarma have different arma_timestep and non-zero covariance');
+							end
+						end
+					end
+				end
+			end
+			%%% End of consistency checks between ARMA models %%%
+
 			md = checkfield(md,'fieldname','stochasticforcing.isstochasticforcing','values',[0 1]);
 			md = checkfield(md,'fieldname','stochasticforcing.fields','numel',num_fields,'cell',1,'values',supportedstochforcings());
-			md = checkfield(md,'fieldname','stochasticforcing.covariance','NaN',1,'Inf',1,'size',[size_tot,size_tot]); %global covariance matrix
+			md = checkfield(md,'fieldname','stochasticforcing.covariance','NaN',1,'Inf',1,'size',[size_tot,size_tot,numtcovmat]); %global covariance matrix
 			md = checkfield(md,'fieldname','stochasticforcing.stochastictimestep','NaN',1,'Inf',1,'>=',md.timestepping.time_step);
 			md = checkfield(md,'fieldname','stochasticforcing.randomflag','numel',[1],'values',[0 1]);
+			if(numtcovmat>1) %check the time steps at which each covariance matrix starts to be applied
+				md = checkfield(md,'fieldname','stochasticforcing.timecovariance','NaN',1,'Inf',1,'>=',md.timestepping.start_time,'<=',md.timestepping.final_time,'size',[1,numtcovmat]);
+			end
 			if(checkdefaults) %need to check the defaults
 				md = checkfield(md,'fieldname','stochasticforcing.defaultdimension','numel',1,'NaN',1,'Inf',1,'>',0);
@@ -184,5 +321,6 @@
 			fielddisplay(self,'defaultdimension','dimensionality of the noise terms (does not apply to fields with their specific dimension)');
 			fielddisplay(self,'default_id','id of each element for partitioning of the noise terms (does not apply to fields with their specific partition)');
-			fielddisplay(self,'covariance','covariance matrix for within- and between-fields covariance (units must be squared field units)');
+			fielddisplay(self,'covariance',{'covariance matrix for within- and between-fields covariance (units must be squared field units)','multiple matrices can be concatenated along 3rd dimension to apply different covariances in time'}); 
+			fielddisplay(self,'timecovariance','starting dates at which covariances apply (only applicabe if multiple covariance matrices are prescribed)'); 
 			fielddisplay(self,'stochastictimestep','timestep at which new stochastic noise terms are generated (default: md.timestepping.time_step)');
 			fielddisplay(self,'randomflag','whether to apply real randomness (true) or pseudo-randomness with fixed seed (false)');
@@ -207,4 +345,10 @@
 				end
 
+				% Check if hydrolgyarmapw is used
+				if(strcmp(class(md.hydrology),'hydrologyarmapw') && md.transient.ishydrology==1)
+         	   ispwHydroarma = 1;
+         	else
+         	   ispwHydroarma = 0;
+         	end
 				%Retrieve dimensionality of each field
 				dimensions = self.defaultdimension*ones(1,num_fields);
@@ -218,22 +362,57 @@
 						dimensions(ind) = md.frontalforcings.num_basins;
 					end
+					if(strcmp(field,'FrontalForcingsSubglacialDischargearma'))
+						dimensions(ind) = md.frontalforcings.num_basins;
+					end
 					if(strcmp(field,'BasalforcingsDeepwaterMeltingRatearma'))
 						dimensions(ind) = md.basalforcings.num_basins;
 					end
+					if(strcmp(field,'BasalforcingsDeepwaterMeltingRatearma'))
+						dimensions(ind) = md.basalforcings.num_basins;
+					end
+					if(strcmp(field,'FrictionWaterPressure') && ispwHydroarma)
+						dimensions(ind) = md.hydrology.num_basins;
+				   end
 					ind = ind+1;
 				end
 
+   			if(numel(size(self.covariance))==3)
+   				[nrow,ncol,numtcovmat] = size(self.covariance);
+   				lsCovmats = {};
+   				for ii=[1:numtcovmat] %loop over 3rd dimension
+   					lsCovmats{ii} = self.covariance(:,:,ii);
+   				end
+					if(md.timestepping.interp_forcing==1)
+					   disp('WARNING: md.timestepping.interp_forcing is 1, but be aware that there is no interpolation between covariance matrices');
+					   disp('         the changes between covariance matrices occur at the time steps specified in md.stochasticforcing.timecovariance');
+					end
+				elseif(numel(size(self.covariance)==2))
+   				[nrow,ncol] = size(self.covariance);
+   				numtcovmat = 1; %number of covariance matrices in time
+   				lsCovmats = {self.covariance};
+   			end
+   
 				%Scaling covariance matrix (scale column-by-column and row-by-row)
 				scaledfields = {'BasalforcingsDeepwaterMeltingRatearma','BasalforcingsSpatialDeepwaterMeltingRate','DefaultCalving','FloatingMeltRate','SMBarma','SMBforcing'}; %list of fields that need scaling *1/yts
-				tempcovariance = self.covariance; %copy of covariance to avoid writing back in member variable
-				for i=1:num_fields
-					if any(strcmp(scaledfields,self.fields(i)))
-						inds = [1+sum(dimensions(1:i-1)):1:sum(dimensions(1:i))];
-						for row=inds %scale rows corresponding to scaled field
-							tempcovariance(row,:) = 1./yts*tempcovariance(row,:);
-						end
-						for col=inds %scale columns corresponding to scaled field
-							tempcovariance(:,col) = 1./yts*tempcovariance(:,col);
-						end
+				tempcovariance2d = zeros(numtcovmat,sum(nrow*ncol)); %covariance matrices in 2d array
+				% Loop over covariance matrices %
+				for kk=[1:numtcovmat]
+					kkcov = self.covariance(:,:,kk); %extract covariance at index kk
+					% Loop over the fields %
+					for i=1:num_fields
+						if any(strcmp(scaledfields,self.fields(i)))
+							inds = [1+sum(dimensions(1:i-1)):1:sum(dimensions(1:i))];
+							for row=inds %scale rows corresponding to scaled field
+								kkcov(row,:) = 1./yts*kkcov(row,:);
+							end
+							for col=inds %scale columns corresponding to scaled field
+								kkcov(:,col) = 1./yts*kkcov(:,col);
+							end
+						end
+					end
+					% Save scaled covariance %
+					for rr=[1:nrow]
+						ind0 = 1+(rr-1)*ncol;
+						tempcovariance2d(kk,ind0:ind0+ncol-1) = kkcov(rr,:);
 					end
 				end
@@ -241,4 +420,8 @@
 				if isnan(self.default_id)
 					self.default_id = zeros(md.mesh.numberofelements,1);
+				end
+				%Set dummy timecovariance vector if a single covariance matrix is used
+				if(numtcovmat==1)
+					self.timecovariance = [md.timestepping.start_time];
 				end
 
@@ -248,5 +431,7 @@
 				WriteData(fid,prefix,'object',self,'fieldname','default_id','data',self.default_id-1,'format','IntMat','mattype',2); %0-indexed
 				WriteData(fid,prefix,'object',self,'fieldname','defaultdimension','format','Integer');
-				WriteData(fid,prefix,'data',tempcovariance,'name','md.stochasticforcing.covariance','format','DoubleMat');
+				WriteData(fid,prefix,'data',numtcovmat,'name','md.stochasticforcing.num_timescovariance','format','Integer');
+				WriteData(fid,prefix,'data',tempcovariance2d,'name','md.stochasticforcing.covariance','format','DoubleMat');
+				WriteData(fid,prefix,'object',self,'fieldname','timecovariance','format','DoubleMat','scale',yts);
 				WriteData(fid,prefix,'object',self,'fieldname','stochastictimestep','format','Double','scale',yts);
 				WriteData(fid,prefix,'object',self,'fieldname','randomflag','format','Boolean');
@@ -271,7 +456,8 @@
 		'FloatingMeltRate',...
 		'FrictionWaterPressure',...
-		'FrictionCoulombWaterPressure',...
-		'FrictionSchoofWaterPressure',...
+		'FrictionWaterPressure',...
+		'FrictionWaterPressure',...
 		'FrontalForcingsRignotarma',...
+		'FrontalForcingsSubglacialDischargearma',...
 		'SMBarma',...
 		'SMBforcing'
@@ -286,4 +472,5 @@
 		'frictionschoof',...
 		'frontalforcingsrignotarma',...
+		'frontalforcingsrignotarma',...
 		'SMBarma',...
 		'SMBforcing'
Index: /issm/trunk/src/m/classes/stochasticforcing.py
===================================================================
--- /issm/trunk/src/m/classes/stochasticforcing.py	(revision 28012)
+++ /issm/trunk/src/m/classes/stochasticforcing.py	(revision 28013)
@@ -2,9 +2,10 @@
 from checkfield import checkfield
 from fielddisplay import fielddisplay
+from project3d import project3d
 from WriteData import WriteData
 
 
 class stochasticforcing(object):
-    """STOCHASTICFORCING class definition
+    """stochasticforcing class definition
 
     Usage:
@@ -18,4 +19,5 @@
         self.default_id = np.nan
         self.covariance = np.nan
+        self.timecovariance = np.nan
         self.stochastictimestep = 0
         self.randomflag = 1
@@ -32,17 +34,22 @@
         s += '{}\n'.format(fielddisplay(self, 'defaultdimension', 'dimensionality of the noise terms (does not apply to fields with their specific dimension)'))
         s += '{}\n'.format(fielddisplay(self, 'default_id', 'id of each element for partitioning of the noise terms (does not apply to fields with their specific partition)'))
-        s += '{}\n'.format(fielddisplay(self, 'covariance', 'covariance matrix for within- and between-fields covariance (units must be squared field units)'))
+        s += '{}\n'.format(fielddisplay(self, 'covariance', 'covariance matrix for within- and between-fields covariance (units must be squared field units),multiple matrices can be concatenated along 3rd dimension to apply different covariances in time'))
+        s += '{}\n'.format(fielddisplay(self, 'timecovariance', 'starting dates at which covariances apply (only applicabe if multiple covariance matrices are prescribed)'))
         s += '{}\n'.format(fielddisplay(self, 'stochastictimestep', 'timestep at which new stochastic noise terms are generated (default: md.timestepping.time_step)'))
         s += '{}\n'.format(fielddisplay(self, 'randomflag', 'whether to apply real randomness (true) or pseudo-randomness with fixed seed (false)'))
         s += 'Available fields:\n'
+        s += '   BasalforcingsDeepwaterMeltingRatearma\n'
         s += '   BasalforcingsSpatialDeepwaterMeltingRate\n'
         s += '   DefaultCalving\n'
         s += '   FloatingMeltRate\n'
         s += '   FrictionWaterPressure\n'
+        s += '   FrictionCoulombWaterPressure\n'
+        s += '   FrictionSchoofWaterPressure\n'
         s += '   FrontalForcingsRignotarma (thermal forcing)\n'
+        s += '   FrontalForcingsSubglacialDischargearma\n'
         s += '   SMBarma\n'
         s += '   SMBforcing\n'
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -52,5 +59,5 @@
         self.randomflag = 1  # true randomness is implemented by default
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -60,103 +67,193 @@
 
         num_fields = len(self.fields)
-        if(self.stochastictimestep==0):
+        if self.stochastictimestep == 0:
             md.stochasticforcing.stochastictimestep = md.timestepping.time_step #by default: stochastictimestep set to ISSM time step
             print('      stochasticforcing.stocahstictimestep not specified: set to md.timestepping.time_step')
 
-        # Check that covariance matrix is positive definite (this is done internally by linalg)
-        try:
-            np.linalg.cholesky(self.covariance)
-        except:
-            raise TypeError('md.stochasticforcing.covariance is not positive definite')
+        if(len(np.shape(self.covariance))==3):
+           numtcovmat = np.shape(self.covariance)[2] #number of covariance matrices in time
+           lsCovmats = []
+           for ii in range(numtcovmat):
+               lsCovmats.append(self.covariance[:,:,ii])
+               try:
+                   np.linalg.cholesky(self.covariance[:,:,ii])
+               except:
+                   raise TypeError('an entry in md.stochasticforcing.covariance is not positive definite')
+        elif(len(np.shape(self.covariance))==2):
+            numtcovmat = 1
+            lsCovmats = [self.covariance]
+            # Check that covariance matrix is positive definite (this is done internally by linalg)
+            try:
+                np.linalg.cholesky(self.covariance)
+            except:
+                raise TypeError('md.stochasticforcing.covariance is not positive definite')
 
         # Check that all fields agree with the corresponding md class and if any field needs the default params
         checkdefaults = False  # Need to check defaults only if one of the fields does not have its own dimensionality
         structstoch = self.structstochforcing()
+        # Check if hydrologyarmapw is used
+        if((type(md.hydrology).__name__ == 'hydrologyarmapw') and md.transient.ishydrology==1):
+            ispwHydroarma = 1
+        else:
+            ispwHydroarma = 0
         for field in self.fields:
             # Checking agreement of classes
             if 'SMBarma' in field:
                 mdname = structstoch[field]
-                if (type(md.smb).__name__ != mdname):
+                if type(md.smb).__name__ != mdname:
                     raise TypeError('md.smb does not agree with stochasticforcing field {}'.format(field))
             if 'SMBforcing' in field:
                 mdname = structstoch[field]
-                if (type(md.smb).__name__ != mdname):
+                if type(md.smb).__name__ != mdname:
                     raise TypeError('md.smb does not agree with stochasticforcing field {}'.format(field))
             if 'FrontalForcings' in field:
                 mdname = structstoch[field]
-                if (type(md.frontalforcings).__name__ != mdname):
+                if type(md.frontalforcings).__name__ != mdname:
                     raise TypeError('md.frontalforcings does not agree with stochasticforcing field {}'.format(field))
             if 'Calving' in field:
                 mdname = structstoch[field]
-                if (type(md.calving).__name__ != mdname):
+                if type(md.calving).__name__ != mdname:
                     raise TypeError('md.calving does not agree with stochasticforcing field {}'.format(field))
             if 'BasalforcingsFloatingice' in field:
                 mdname = structstoch[field]
-                if (type(md.basalforcings).__name__ != mdname):
+                if type(md.basalforcings).__name__ != mdname:
                     raise TypeError('md.basalforcings does not agree with stochasticforcing field {}'.format(field))
             if 'BasalforcingsSpatialDeepwaterMeltingRate' in field:
                 mdname = structstoch[field]
-                if (type(md.basalforcings).__name__ != mdname):
+                if type(md.basalforcings).__name__ != mdname:
                     raise TypeError('md.basalforcings does not agree with stochasticforcing field {}'.format(field))
             if 'BasalforcingsDeepwaterMeltingRatearma' in field:
                 mdname = structstoch[field]
-                if (type(md.basalforcings).__name__ != mdname):
+                if type(md.basalforcings).__name__ != mdname:
                     raise TypeError('md.basalforcings does not agree with stochasticforcing field {}'.format(field))
             if 'WaterPressure' in field:
-                mdname = structstoch[field]
-                if (type(md.friction).__name__ != mdname):
+                #mdname = structstoch[field]
+                mdnames = ['friction','frictioncoulomb','frictionschoof']
+                found   = 0
+                for ii in range(len(mdnames)):
+                    if type(md.friction).__name__ == mdnames[ii]:
+                        found = 1
+                if not found:
                     raise TypeError('md.friction does not agree with stochasticforcing field {}'.format(field))
-                if (type(md.friction).__name__=='friction' or type(md.friction).__name__=='frictionschoof' or type(md.friction).__name__=='frictioncoulomb'):
+                #if (type(md.friction).__name__ != mdname):
+                #    raise TypeError('md.friction does not agree with stochasticforcing field {}'.format(field))
+                if type(md.friction).__name__ == 'friction' or type(md.friction).__name__ == 'frictionschoof' or type(md.friction).__name__=='frictioncoulomb':
                     if md.friction.coupling not in[0, 1, 2]:
                         raise TypeError('stochasticforcing field {} is only implemented for cases md.friction.coupling 0 or 1 or 2'.format(field))
-                if (type(md.friction).__name__=='friction'):
+                if type(md.friction).__name__ == 'friction':
                     if (np.any(md.friction.q == 0)):
                         raise TypeError('stochasticforcing field {} requires non-zero q exponent'.format(field))
 
             # Checking for specific dimensions
-            if field not in['SMBarma', 'FrontalForcingsRignotarma','BasalforcingsDeepwaterMeltingRatearma']:
+            if field not in ['SMBarma', 'FrontalForcingsRignotarma','BasalforcingsDeepwaterMeltingRatearma']  and not (field == 'FrictionWaterPressure' and ispwHydroarma == True):
                 checkdefaults = True  # field with non-specific dimensionality
 
         # Retrieve sum of all the field dimensionalities
-        dimensions = self.defaultdimension*np.ones((num_fields))
+        dimensions = self.defaultdimension * np.ones((num_fields))
         indSMBarma   = -1  # About to check for index of SMBarma
         indTFarma    = -1  # About to check for index of FrontalForcingsRignotarma
+        indSdarma    = -1  # About to check for index of FrontalForcingsSubglacialDischargearma
         indBDWarma   = -1  # About to check for index of BasalforcingsDeepwaterMeltingRatearma
-        if ('SMBarma' in self.fields):
+        indPwarma    = -1  # About to check for index of hydrologyarmapw
+        if 'SMBarma' in self.fields:
             indSMBarma = self.fields.index('SMBarma')  # Index of SMBarma, now check for consistency with other timesteps
             dimensions[indSMBarma] = md.smb.num_basins
             if(md.smb.arma_timestep<self.stochastictimestep):
                 raise TypeError('SMBarma cannot have a timestep shorter than stochastictimestep')
-        if ('FrontalForcingsRignotarma' in self.fields):
+        if 'FrontalForcingsRignotarma' in self.fields:
             indTFarma = self.fields.index('FrontalForcingsRignotarma')  # Index of TFarma, now check for consistency with other timesteps
             dimensions[indTFarma] = md.frontalforcings.num_basins
-            if(md.frontalforcings.arma_timestep<self.stochastictimestep):
+            if md.frontalforcings.arma_timestep < self.stochastictimestep:
                 raise TypeError('FrontalForcingsRignotarma cannot have a timestep shorter than stochastictimestep')
-        if ('BasalforcingsDeepwaterMeltingRatearma' in self.fields):
+        if 'FrontalForcingsSubglacialDischargearma' in self.fields:
+            indSdarma = self.fields.index('FrontalForcingsSubglacialDischargearma')  # Index of Sdarma, now check for consistency with other timesteps
+            dimensions[indSdarma] = md.frontalforcings.num_basins
+            if md.frontalforcings.sd_arma_timestep < self.stochastictimestep:
+                raise TypeError('FrontalForcingsSubglacialDischargearma cannot have a timestep shorter than stochastictimestep')
+        if 'BasalforcingsDeepwaterMeltingRatearma' in self.fields:
             indBDWarma = self.fields.index('BasalforcingsDeepwaterMeltingRatearma')  # Index of BDWarma, now check for consistency with other timesteps
             dimensions[indTFarma] = md.basalforcings.num_basins
-            if(md.basalforcings.arma_timestep<self.stochastictimestep):
+            if md.basalforcings.arma_timestep < self.stochastictimestep:
                 raise TypeError('BasalforcingsDeepwaterMeltingRatearma cannot have a timestep shorter than stochastictimestep')
+        if 'FrictionWaterPressure' in self.fields and ispwHydroarma:
+            indPwarma = self.fields.index('FrictionWaterPressure')  # Index of Pwarma, now check for consistency with other timesteps
+            dimensions[indPwarma] = md.hydrology.num_basins
+            if md.hydrology.arma_timestep < self.stochastictimestep:
+                raise TypeError('hydrologyarmapw cannot have a timestep shorter than stochastictimestep')
         size_tot = np.sum(dimensions)
 
-        if (indSMBarma != -1 and indTFarma != -1):  # Both ARMA models are used: check ARMA time step consistency
-            covsum = self.covariance[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int)]
-            if((md.smb.arma_timestep != md.frontalforcings.arma_timestep) and np.any(covsum != 0)):
-                raise IOError('SMBarma and FrontalForcingsRignotarma have different arma_timestep and non-zero covariance')
-        if (indSMBarma != -1 and indBDWarma != -1):  # Both ARMA models are used: check ARMA time step consistency
-            covsum = self.covariance[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int)]
-            if((md.smb.arma_timestep != md.basalforcings.arma_timestep) and np.any(covsum != 0)):
-                raise IOError('SMBarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance')
-        if (indTFarma != -1 and indBDWarma != -1):  # Both ARMA models are used: check ARMA time step consistency
-            covsum = self.covariance[np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int), np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int)]
-            if((md.frontalforcings.arma_timestep != md.basalforcings.arma_timestep) and np.any(covsum != 0)):
-                raise IOError('FrontalForcingsRignotarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance')
+        if indBDWarma != -1:
+            if indPwarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int), np.sum(dimensions[0:indPwarma]).astype(int):np.sum(dimensions[0:indPwarma + 1]).astype(int)]
+                    if md.smb.arma_timestep != md.hydrology.arma_timestep and np.any(covsum != 0):
+                        raise IOError('BasalforcingsDeepwaterMeltingRatarma and hydrologyarmapw have different arma_timestep and non-zero covariance')
+            elif indSdarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSdarma]).astype(int):np.sum(dimensions[0:indSdarma + 1]).astype(int), np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int)]
+                    if md.frontalforcings.sd_arma_timestep != md.basalforcings.arma_timestep and np.any(covsum != 0):
+                        raise IOError('FrontalForcingsSubglacialDischargearma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance')
+            elif indSMBarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int)]
+                    if md.smb.arma_timestep != md.basalforcings.arma_timestep and np.any(covsum != 0):
+                        raise IOError('SMBarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance')
+            elif indTFarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int), np.sum(dimensions[0:indBDWarma]).astype(int):np.sum(dimensions[0:indBDWarma + 1]).astype(int)]
+                    if md.frontalforcings.arma_timestep != md.basalforcings.arma_timestep and np.any(covsum != 0):
+                        raise IOError('FrontalForcingsRignotarma and BasalforcingsDeepwaterMeltingRatearma have different arma_timestep and non-zero covariance')
+        elif indPwarma != -1:
+            if indSdarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSdarma]).astype(int):np.sum(dimensions[0:indSdarma + 1]).astype(int), np.sum(dimensions[0:indPwarma]).astype(int):np.sum(dimensions[0:indPwarma + 1]).astype(int)]
+                    if md.frontalforcings.sd_arma_timestep != md.hydrology.arma_timestep and np.any(covsum != 0):
+                        raise IOError('FrontalForingsSubglacialDischargearma and hydrologyarmapw have different arma_timestep and non-zero covariance')
+            elif indSMBarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indPwarma]).astype(int):np.sum(dimensions[0:indPwarma + 1]).astype(int)]
+                    if md.smb.arma_timestep != md.hydrology.arma_timestep and np.any(covsum != 0):
+                        raise IOError('SMBarma and hydrologyarmapw have different arma_timestep and non-zero covariance')
+            elif indTFarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int), np.sum(dimensions[0:indPwarma]).astype(int):np.sum(dimensions[0:indPwarma + 1]).astype(int)]
+                    if md.frontalforcings.arma_timestep != md.hydrology.arma_timestep and np.any(covsum != 0):
+                        raise IOError('FrontalForcingsRignotarma and hydrologyarmapw have different arma_timestep and non-zero covariance')
+        elif indSdarma != -1:
+            if indSMBarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indSdarma]).astype(int):np.sum(dimensions[0:indSdarma + 1]).astype(int)]
+                    if md.smb.arma_timestep != md.frontalforcings.sd_arma_timestep and np.any(covsum != 0):
+                        raise IOError('SMBarma and FrontalForcingsSubglacialDischargearma have different arma_timestep and non-zero covariance')
+            elif indTFarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSdarma]).astype(int):np.sum(dimensions[0:indSdarma + 1]).astype(int), np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int)]
+                    if md.frontalforcings.sd_arma_timestep != md.frontalforcings.arma_timestep and np.any(covsum != 0):
+                        raise IOError('FrontalForcingsSubglacialDischargearma and FrontalForcingsRignotarma have different arma_timestep and non-zero covariance')
+        elif indSMBarma != -1:
+            if indTFarma != -1:
+                for ii in range(len(lsCovmats)):
+                    covm = lsCovmats[ii]
+                    covsum = covm[np.sum(dimensions[0:indSMBarma]).astype(int):np.sum(dimensions[0:indSMBarma + 1]).astype(int), np.sum(dimensions[0:indTFarma]).astype(int):np.sum(dimensions[0:indTFarma + 1]).astype(int)]
+                    if md.smb.arma_timestep != md.frontalforcings.arma_timestep and np.any(covsum != 0):
+                        raise IOError('SMBarma and FrontalForcingsRignotarma have different arma_timestep and non-zero covariance')
 
         md = checkfield(md, 'fieldname', 'stochasticforcing.isstochasticforcing', 'values', [0, 1])
         md = checkfield(md, 'fieldname', 'stochasticforcing.fields', 'numel', num_fields, 'cell', 1, 'values', self.supportedstochforcings())
-        md = checkfield(md, 'fieldname', 'stochasticforcing.covariance', 'NaN', 1, 'Inf', 1, 'size', [size_tot, size_tot])  # global covariance matrix
+        md = checkfield(md, 'fieldname', 'stochasticforcing.covariance', 'NaN', 1, 'Inf', 1, 'size', [size_tot, size_tot, numtcovmat])  # global covariance matrix
         md = checkfield(md, 'fieldname', 'stochasticforcing.stochastictimestep', 'NaN', 1,'Inf', 1, '>=', md.timestepping.time_step)
         md = checkfield(md, 'fieldname', 'stochasticforcing.randomflag', 'numel', [1], 'values', [0, 1])
-        if (checkdefaults):
+        if(numtcovmat>1): #check the time steps at which each covariance matrix starts to be applie
+            md = checkfield(md, 'fieldname', 'stochasticforcing.timecovariance', 'NaN', 1, 'Inf', 1, '>=',md.timestepping.start_time,'<=',md.timestepping.final_time,'size',[1,numtcovmat])  # global covariance matrix
+        if checkdefaults:
             md = checkfield(md, 'fieldname', 'stochasticforcing.defaultdimension', 'numel', 1, 'NaN', 1, 'Inf', 1, '>', 0)
             md = checkfield(md, 'fieldname', 'stochasticforcing.default_id', 'Inf', 1, 'NaN', 1, '>=', 0, '<=', self.defaultdimension, 'size', [md.mesh.numberofelements])
@@ -165,5 +262,5 @@
 
     def extrude(self, md):  # {{{
-        # Nothing for now
+        self.default_id = project3d(md,'vector',self.default_id,'type','element')
         return self
     # }}}
@@ -175,33 +272,67 @@
         if not self.isstochasticforcing:
             return md
-
         else:
             num_fields = len(self.fields)
-            if(self.stochastictimestep==0):
+            if self.stochastictimestep == 0:
                 md.stochasticforcing.stochastictimestep = md.timestepping.time_step #by default: stochastictimestep set to ISSM time step
+            # Check if hydroarmapw is used
+            if((type(md.hydrology).__name__ == 'hydrologyarmapw') and md.transient.ishydrology==1):
+                ispwHydroarma = 1
+            else:
+                ispwHydroarma = 0
+
             # Retrieve dimensionality of each field
             dimensions = self.defaultdimension * np.ones((num_fields))
             for ind, field in enumerate(self.fields):
                 # Checking for specific dimensions
-                if (field == 'SMBarma'):
+                if field == 'SMBarma':
                     dimensions[ind] = md.smb.num_basins
-                if (field == 'FrontalForcingsRignotarma'):
+                elif field == 'FrontalForcingsRignotarma':
                     dimensions[ind] = md.frontalforcings.num_basins
-                if (field == 'BasalforcingsDeepwaterMeltingRatearma'):
+                elif field == 'FrontalForcingsSubglacialDischargearma':
+                    dimensions[ind] = md.frontalforcings.num_basins
+                elif field == 'BasalforcingsDeepwaterMeltingRatearma':
                     dimensions[ind] = md.basalforcings.num_basins
+                elif field == 'FrictionWaterPressure' and ispwHydroarma:
+                    dimensions[ind] = md.hydrology.num_basins
+            
+            if(len(np.shape(self.covariance))==3):
+                nrow,ncol,numtcovmat = np.shape(self.covariance)
+                lsCovmats = []
+                for ii in range(numtcovmat):
+                    lsCovmats.append(self.covariance[:,:,ii])
+                if(md.timestepping.interp_forcing==1):
+                    print('WARNING: md.timestepping.interp_forcing is 1, but be aware that there is no interpolation between covariance matrices')
+                    print('         the changes between covariance matrices occur at the time steps specified in md.stochasticforcing.timecovariance')
+            elif(len(np.shape(self.covariance))==2):
+                nrow,ncol = np.shape(self.covariance)
+                numtcovmat = 1
+                lsCovmats = [self.covariance]
 
             # Scaling covariance matrix (scale column-by-column and row-by-row)
-            scaledfields = ['BasalforcingsDeepwaterMeltingRatearma','BasalforcingsSpatialDeepwaterMeltingRate','DefaultCalving', 'FloatingMeltRate', 'SMBarma', 'SMBforcing']  # list of fields that need scaling * 1/yts
-            tempcovariance = np.copy(self.covariance)
-            for i in range(num_fields):
-                if self.fields[i] in scaledfields:
-                    inds = range(int(np.sum(dimensions[0:i])), int(np.sum(dimensions[0:i + 1])))
-                    for row in inds:  # scale rows corresponding to scaled field
-                        tempcovariance[row, :] = 1 / yts * tempcovariance[row, :]
-                    for col in inds:  # scale columns corresponding to scaled field
-                        tempcovariance[:, col] = 1 / yts * tempcovariance[:, col]
+            scaledfields = ['BasalforcingsDeepwaterMeltingRatearma','BasalforcingsSpatialDeepwaterMeltingRate','DefaultCalving', 'FloatingMeltRate', 'SMBarma', 'SMBforcing']  # list of fields that need scaling * 1 / yts
+            tempcovariance2d = np.zeros((numtcovmat,nrow*ncol))
+            # Loop over covariance matrices #
+            for kk in range(numtcovmat):
+                kkcov = lsCovmats[kk]
+                # Loop over the fields #
+                for i in range(num_fields):
+                    if self.fields[i] in scaledfields:
+                        inds = range(int(np.sum(dimensions[0:i])), int(np.sum(dimensions[0:i + 1])))
+                        for row in inds:  # scale rows corresponding to scaled field
+                            kkcov[row, :] = 1 / yts * kkcov[row, :]
+                        for col in inds:  # scale columns corresponding to scaled field
+                            kkcov[:, col] = 1 / yts * kkcov[:, col]
+                # Save scaled covariance #
+                for rr in range(nrow):
+                    ind0 = rr*ncol
+                    tempcovariance2d[kk,ind0:ind0+ncol] = np.copy(kkcov[rr,:])
+                    
             # Set dummy default_id vector if defaults not used
             if np.any(np.isnan(self.default_id)):
                 self.default_id = np.zeros(md.mesh.numberofelements)
+            # Set dummy timecovariance vector if a single covariance matrix is used
+            if(numtcovmat==1):
+                self.timecovariance = np.array([md.timestepping.start_time])
             # Reshape dimensions as column array for marshalling
             dimensions = dimensions.reshape(1, len(dimensions))
@@ -212,5 +343,7 @@
             WriteData(fid, prefix, 'object', self, 'fieldname', 'default_id', 'data', self.default_id - 1, 'format', 'IntMat', 'mattype', 2)  #12Nov2021 make sure this is zero-indexed!
             WriteData(fid, prefix, 'object', self, 'fieldname', 'defaultdimension', 'format', 'Integer')
-            WriteData(fid, prefix, 'data', tempcovariance, 'name', 'md.stochasticforcing.covariance', 'format', 'DoubleMat')
+            WriteData(fid, prefix, 'data', numtcovmat, 'name', 'md.stochasticforcing.num_timescovariance', 'format', 'Integer')
+            WriteData(fid, prefix, 'data', tempcovariance2d, 'name', 'md.stochasticforcing.covariance', 'format', 'DoubleMat')
+            WriteData(fid, prefix, 'object', self, 'fieldname', 'timecovariance', 'format', 'DoubleMat', 'scale', yts)
             WriteData(fid, prefix, 'object', self, 'fieldname', 'stochastictimestep', 'format', 'Double', 'scale', yts)
             WriteData(fid, prefix, 'object', self, 'fieldname', 'randomflag', 'format', 'Boolean')
@@ -223,5 +356,5 @@
         list1 = list1.keys()
         return list(list1)
-    #}}}
+    # }}}
 
     def structstochforcing(self):  # {{{
@@ -234,7 +367,8 @@
                      'FloatingMeltRate': 'basalforcings',
                      'FrictionWaterPressure': 'friction',
-                     'FrictionCoulombWaterPressure': 'frictioncoulomb',
-                     'FrictionSchoofWaterPressure': 'frictionschoof',
+                     'FrictionWaterPressure': 'frictioncoulomb',
+                     'FrictionWaterPressure': 'frictionschoof',
                      'FrontalForcingsRignotarma': 'frontalforcingsrignotarma',
+                     'FrontalForcingsSubglacialDischargearma': 'frontalforcingsrignotarma',
                      'SMBarma': 'SMBarma',
                      'SMBforcing': 'SMBforcing'}
Index: /issm/trunk/src/m/classes/stressbalance.m
===================================================================
--- /issm/trunk/src/m/classes/stressbalance.m	(revision 28012)
+++ /issm/trunk/src/m/classes/stressbalance.m	(revision 28013)
@@ -115,5 +115,5 @@
 				disp(sprintf('\n !!! Warning: no spc applied, model might not be well posed if no basal friction is applied, check for solution crash\n'));
 			end
-			%CHECK THAT EACH LINES CONTAINS ONLY NAN VALUES OR NO NAN VALUES
+			%CHECK THAT EACH LINE CONTAINS ONLY NAN VALUES OR NO NAN VALUES
 			if any(sum(isnan(md.stressbalance.referential),2)~=0 & sum(isnan(md.stressbalance.referential),2)~=6),
 				md = checkmessage(md,['Each line of stressbalance.referential should contain either only NaN values or no NaN values']);
Index: /issm/trunk/src/m/classes/stressbalance.py
===================================================================
--- /issm/trunk/src/m/classes/stressbalance.py	(revision 28012)
+++ /issm/trunk/src/m/classes/stressbalance.py	(revision 28013)
@@ -154,26 +154,20 @@
             md = checkfield(md, 'fieldname', 'stressbalance.vertex_pairing', '>', 0)
         # Singular solution
-        #        if ~any((~isnan(md.stressbalance.spcvx) + ~isnan(md.stressbalance.spcvy)) == 2),
         if (not np.any(np.logical_or(np.logical_not(np.isnan(md.stressbalance.spcvx)), np.logical_not(np.isnan(md.stressbalance.spcvy))))) & (not np.any(md.mask.ocean_levelset>0)):
-            print("\n !!! Warning: no spc applied, model might not be well posed if no basal friction is applied, check for solution crash\n")
-        # CHECK THAT EACH LINES CONTAINS ONLY NAN VALUES OR NO NAN VALUES
-        #        if any(sum(isnan(md.stressbalance.referential), 2)~=0 & sum(isnan(md.stressbalance.referential), 2)~=6),
+            print('\n !!! Warning: no spc applied, model might not be well posed if no basal friction is applied, check for solution crash\n')
+        # CHECK THAT EACH LINES CONTAIN ONLY NAN VALUES OR NO NAN VALUES
         if np.any(np.logical_and(np.sum(np.isnan(md.stressbalance.referential), axis=1) != 0, np.sum(np.isnan(md.stressbalance.referential), axis=1) != 6)):
-            md.checkmessage("Each line of stressbalance.referential should contain either only NaN values or no NaN values")
+            md.checkmessage('Each line of stressbalance.referential should contain either only NaN values or no NaN values')
         # CHECK THAT THE TWO VECTORS PROVIDED ARE ORTHOGONAL
-        #        if any(sum(isnan(md.stressbalance.referential), 2) == 0),
         if np.any(np.sum(np.isnan(md.stressbalance.referential), axis=1) == 0):
             pos = [i for i, item in enumerate(np.sum(np.isnan(md.stressbalance.referential), axis=1)) if item == 0]
-        #            np.inner (and np.dot) calculate all the dot product permutations, resulting in a full matrix multiply
-        #            if np.any(np.abs(np.inner(md.stressbalance.referential[pos, 0:2], md.stressbalance.referential[pos, 3:5]).diagonal()) > sys.float_info.epsilon):
-        #                md.checkmessage("Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal")
             for item in md.stressbalance.referential[pos, :]:
                 if np.abs(np.inner(item[0:2], item[3:5])) > sys.float_info.epsilon:
-                    md.checkmessage("Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal")
+                    md.checkmessage('Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal')
         # CHECK THAT NO rotation specified for FS Grounded ice at base
         if m.strcmp(md.mesh.domaintype(), '3D') and md.flowequation.isFS:
             pos = np.nonzero(np.logical_and(md.mask.ocean_levelset, md.mesh.vertexonbase))
             if np.any(np.logical_not(np.isnan(md.stressbalance.referential[pos, :]))):
-                md.checkmessage("no referential should be specified for basal vertices of grounded ice")
+                md.checkmessage('no referential should be specified for basal vertices of grounded ice')
         if md.flowequation.isMOLHO:
             md = checkfield(md, 'fieldname', 'stressbalance.spcvx_base', 'Inf', 1, 'timeseries', 1)
Index: /issm/trunk/src/m/classes/surfaceload.py
===================================================================
--- /issm/trunk/src/m/classes/surfaceload.py	(revision 28012)
+++ /issm/trunk/src/m/classes/surfaceload.py	(revision 28013)
@@ -24,5 +24,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -32,9 +32,9 @@
         s += '{}\n'.format(fielddisplay(self, 'other', 'other loads (sediments) [kg/m^2/yr]'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -48,5 +48,5 @@
             md = checkfield(md, 'fieldname', 'solidearth.surfaceload.other', 'timeseries', 1, 'NaN', 1, 'Inf', 1)
         return md
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -75,7 +75,7 @@
 
         WriteData(fid, prefix, 'object', self, 'fieldname', 'otherchange', 'name', 'md.solidearth.surfaceload.otherchange', 'format', 'MatArray', 'timeserieslength', md.mesh.numberofelements + 1, 'yts', yts, 'scale', 1 / yts)
-    #}}}
+    # }}}
 
     def extrude(self, md):  #{{{
         return self
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/taoinversion.py
===================================================================
--- /issm/trunk/src/m/classes/taoinversion.py	(revision 28012)
+++ /issm/trunk/src/m/classes/taoinversion.py	(revision 28013)
@@ -10,5 +10,5 @@
 
 
-class taoinversion(object): #{{{
+class taoinversion(object):  # {{{
     """TAOINVERSION class definition
 
@@ -41,5 +41,5 @@
 
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):
Index: /issm/trunk/src/m/classes/thermal.py
===================================================================
--- /issm/trunk/src/m/classes/thermal.py	(revision 28012)
+++ /issm/trunk/src/m/classes/thermal.py	(revision 28013)
@@ -29,5 +29,5 @@
         self.requested_outputs = []
         self.setdefaultparameters()
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -43,7 +43,8 @@
         s += '{}\n'.format(fielddisplay(self, 'isdrainicecolumn', 'wether waterfraction drainage is enabled for enthalpy formulation (default is 1)'))
         s += '{}\n'.format(fielddisplay(self, 'watercolumn_upperlimit', 'upper limit of basal watercolumn for enthalpy formulation (default is 1000m)'))
+        s += '{}\n'.format(fielddisplay(self, 'fe', 'Finite Element type: ''P1'' (default), ''P1xP2'''))
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return s
-    #}}}
+    # }}}
 
     def extrude(self, md):  # {{{
@@ -54,5 +55,5 @@
             self.spctemperature[pos] = md.initialization.temperature[pos]  #impose observed temperature on surface
         return self
-    #}}}
+    # }}}
 
     def defaultoutputs(self, md):  # {{{
@@ -61,5 +62,5 @@
         else:
             return ['Temperature', 'BasalforcingsGroundediceMeltingRate']
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  # {{{
@@ -87,5 +88,5 @@
         self.requested_outputs = ['default']
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
@@ -93,6 +94,7 @@
         if ('ThermalAnalysis' not in analyses and 'EnthalpyAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isthermal):
             return md
-        md = checkfield(md, 'fieldname', 'thermal.stabilization', 'numel', [1], 'values', [0, 1, 2])
+        md = checkfield(md, 'fieldname', 'thermal.stabilization', 'numel', [1], 'values', [0, 1, 2, 3])
         md = checkfield(md, 'fieldname', 'thermal.spctemperature', 'Inf', 1, 'timeseries', 1)
+        md = checkfield(md,'fieldname', 'thermal.fe', 'values', ['P1', 'P1xP2', 'P1xP3'])
         md = checkfield(md, 'fieldname', 'thermal.requested_outputs', 'stringrow', 1)
         if 'EnthalpyAnalysis' in analyses and md.thermal.isenthalpy and md.mesh.dimension() == 3:
Index: /issm/trunk/src/m/classes/timestepping.py
===================================================================
--- /issm/trunk/src/m/classes/timestepping.py	(revision 28012)
+++ /issm/trunk/src/m/classes/timestepping.py	(revision 28013)
@@ -24,5 +24,5 @@
         else:
             raise RuntimeError('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  #{{{
@@ -37,5 +37,5 @@
         s += '{}\n'.format(fielddisplay(self, 'coupling_time', 'length of coupling time steps with ocean model [' + unit + ']'))
         return s
-    #}}}
+    # }}}
 
     def setdefaultparameters(self):  #{{{
@@ -52,5 +52,5 @@
 
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  #{{{
@@ -67,5 +67,5 @@
 
         return md
-    #}}}
+    # }}}
 
     def marshall(self, prefix, md, fid):  #{{{
@@ -79,3 +79,3 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'cycle_forcing', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'coupling_time', 'format', 'Double', 'scale', scale)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/timesteppingadaptive.py
===================================================================
--- /issm/trunk/src/m/classes/timesteppingadaptive.py	(revision 28012)
+++ /issm/trunk/src/m/classes/timesteppingadaptive.py	(revision 28013)
@@ -40,5 +40,5 @@
         else:
             raise Exception('constructor not supported')
-    #}}}
+    # }}}
 
     def __repr__(self):  # {{{
@@ -69,5 +69,5 @@
         self.cycle_forcing   = 0
         return self
-    #}}}
+    # }}}
 
     def checkconsistency(self, md, solution, analyses):  # {{{
Index: /issm/trunk/src/m/classes/toolkits.m
===================================================================
--- /issm/trunk/src/m/classes/toolkits.m	(revision 28012)
+++ /issm/trunk/src/m/classes/toolkits.m	(revision 28013)
@@ -68,5 +68,5 @@
 					self.DefaultAnalysis           = issmgslsolver(); 
 				else 
-					disp('WARNING: Need at least Mumps or Gsl to define an issm solver type, no default solver assigned');
+					disp('WARNING: Need at least MUMPS or GSL to define an ISSM solver type, no default solver assigned');
 				end
 			end
@@ -106,4 +106,11 @@
 					case 'EsaAnalysis'
 					case 'SealevelchangeAnalysis'
+					case 'FreeSurfaceBaseAnalysis'
+					case 'FreeSurfaceTopAnalysis'
+					case 'LevelsetAnalysis'
+					case 'DebrisAnalysis'
+					case 'L2ProjectionBaseAnalysis'
+					case 'ExtrudeFromBaseAnalysis'
+					case 'ExtrudeFromTopAnalysis'
 					otherwise
 						md = checkmessage(md,['md.toolkits.' analyses{i} ' not supported yet']);
@@ -117,6 +124,6 @@
 			%TOOLKITSFILE - build toolkits file
 			%
-			%   Build a Petsc compatible options file, from the toolkits model field  + return options string. 
-			%   This file will also be used when the toolkit used is 'issm' instead of 'petsc'
+			%   Build a Petsc compatible options file, from the toolkits model field and return options string.
+			%   This file will also be used when the toolkit used is 'issm' instead of 'petsc'.
 			%
 			%   Usage:     ToolkitsFile(toolkits,filename);
Index: /issm/trunk/src/m/classes/toolkits.py
===================================================================
--- /issm/trunk/src/m/classes/toolkits.py	(revision 28012)
+++ /issm/trunk/src/m/classes/toolkits.py	(revision 28013)
@@ -1,22 +1,64 @@
+from fielddisplay import fielddisplay
+from iluasmoptions import iluasmoptions
 from IssmConfig import IssmConfig
-from mumpsoptions import mumpsoptions
-from iluasmoptions import iluasmoptions
-from fielddisplay import fielddisplay
 from issmgslsolver import issmgslsolver
 from issmmumpssolver import issmmumpssolver
+from mumpsoptions import mumpsoptions
 
 
 class toolkits(object):
-    '''
-    TOOLKITS class definition
+    """toolkits class definition
 
+    Usage:
+        self = toolkits()
+    """
+
+    def __init__(self, *args):  # {{{
+        self.DefaultAnalysis = None
+        self.RecoveryAnalysis = None
+
+        nargs = len(args)
+        if nargs == 0:
+            self.setdefaultparameters()
+        elif nargs == 1:
+            # TODO: Replace the following with constructor
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
+    # }}}
+
+    def __repr__(self):  # {{{
+        s = "List of toolkits options per analysis:\n\n"
+        for analysis in list(vars(self).keys()):
+            s += "{}\n".format(fielddisplay(self, analysis, ''))
+
+        return s
+    # }}}
+
+    def addoptions(self, analysis, *args):  # {{{
+        """addoptions - add analysis to md.toolkits.analysis
+
+        Optional third parameter adds toolkits options to analysis.
+        
         Usage:
-            self = toolkits()
-    '''
+            md.toolkits = addoptions(md.toolkits, 'StressbalanceAnalysis', FSoptions())
+            md.toolkits = addoptions(md.toolkits, 'StressbalanceAnalysis')
+        """
 
-    def __init__(self): #{{{
-        #default toolkits
+        # Create dynamic property if property does not exist yet
+        if not hasattr(self, analysis):
+            setattr(self, analysis, None)
+
+        # Add toolkits options to analysis
+        if len(args) == 1:
+            setattr(self, analysis, args[0])
+
+        return self
+    # }}}
+
+    def setdefaultparameters(self):  # {{{
+        # Default toolkits
         if IssmConfig('_HAVE_PETSC_')[0]:
-            #MUMPS is the default toolkits
+            # MUMPS is the default toolkits
             if IssmConfig('_HAVE_MUMPS_')[0]:
                 self.DefaultAnalysis = mumpsoptions()
@@ -29,91 +71,90 @@
                 self.DefaultAnalysis = issmgslsolver()
             else:
-                raise IOError("ToolkitsFile error: need at least Mumps or Gsl to define issm solver type")
+                raise IOError('ToolkitsFile error: need at least MUMPS or GSL to define ISSM solver type, no default solver assigned')
 
-        #Use same solver for Recovery mode
+        # Use same solver for Recovery mode
         self.RecoveryAnalysis = self.DefaultAnalysis
 
-        #The other properties are dynamic
-    #}}}
+        return self
+    # }}}
 
-    def __repr__(self): #{{{
-        s = "List of toolkits options per analysis:\n\n"
-        for analysis in list(vars(self).keys()):
-            s += "{}\n".format(fielddisplay(self, analysis, ''))
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        supported_analyses = [
+            'DefaultAnalysis',
+            'RecoveryAnalysis',
+            'StressbalanceAnalysis',
+            'GLheightadvectionAnalysis',
+            'MasstransportAnalysis',
+            'ThermalAnalysis',
+            'EnthalpyAnalysis',
+            'AdjointBalancethicknessAnalysis',
+            'BalancethicknessAnalysis',
+            'Balancethickness2Analysis',
+            'BalancethicknessSoftAnalysis',
+            'BalancevelocityAnalysis',
+            'DamageEvolutionAnalysis',
+            'LoveAnalysis',
+            'EsaAnalysis',
+            'SealevelchangeAnalysis',
+            'FreeSurfaceBaseAnalysis',
+            'FreeSurfaceTopAnalysis',
+            'LevelsetAnalysis',
+            'DebrisAnalysis',
+            'L2ProjectionBaseAnalysis',
+            'ExtrudeFromBaseAnalysis',
+            'ExtrudeFromTopAnalysis'
+        ]
+        analyses = list(vars(self).keys())
+        for analysis in analyses:
+            if analysis not in supported_analyses:
+                md.checkmessage('md.toolkits.{} not supported yet'.format(analysis))
 
-        return s
-    #}}}
-
-    def addoptions(self, analysis, *args): #{{{
-        # Usage example:
-        #    md.toolkits = addoptions(md.toolkits, 'StressbalanceAnalysis', FSoptions())
-        #    md.toolkits = addoptions(md.toolkits, 'StressbalanceAnalysis')
-
-        #Create dynamic property if property does not exist yet
-        if not hasattr(self, analysis):
-            setattr(self, analysis, None)
-
-        #Add toolkits options to analysis
-        if len(args) == 1:
-            setattr(self, analysis, args[0])
-
-        return self
-    #}}}
-
-    def checkconsistency(self, md, solution, analyses): #{{{
-        # TODO
-        # - Implement something closer to a switch as in 
-        # src/m/classes/toolkits.m?
-        #
-        for analysis in list(vars(self).keys()):
             if not getattr(self, analysis):
-                md.checkmessage("md.toolkits.{} is empty".format(analysis))
+                md.checkmessage('md.toolkits.{} is empty'.format(analysis))
 
         return md
-    #}}}
+    # }}}
 
-    def ToolkitsFile(self, filename): #{{{
-        '''
-        TOOLKITSFILE - build toolkits file
+    def ToolkitsFile(self, filename):  # {{{
+        """ToolkitsFile - build toolkits file
 
-            Build a Petsc compatible options file, from the toolkits model 
-            field + return options string.
-            This file will also be used when the toolkit used is 'issm' instead 
-            of 'petsc'.s
+        Build a PETSc compatible options file, from the toolkits model field and return options string.
+        This file will also be used when the toolkit used is 'issm' instead of 'petsc'.
 
-            Usage:
-                ToolkitsFile(toolkits, filename)
-        '''
+        Usage:
+            ToolkitsFile(toolkits, filename)
+        """
 
-        #open file for writing
+        # Open file for writing
         try:
             fid = open(filename, 'w')
         except IOError as e:
-            raise IOError("ToolkitsFile error: could not open {}' for writing due to".format(filename), e)
+            raise IOError('ToolkitsFile error: could not open {} for writing due to {}'.format(filename), e)
 
-        #write header
-        fid.write("%s%s%s\n" % ('%Toolkits options file: ', filename, ' written from Python toolkits array'))
+        # Write header
+        fid.write('{}{}{}\n'.format('%Toolkits options file: ', filename, ' written from Python toolkits array'))
 
-        #start writing options
+        # Start writing options
         for analysis in list(vars(self).keys()):
             options = getattr(self, analysis)
 
-            #first write analysis:
-            fid.write("\n+{}\n".format(analysis))  #append a + to recognize it's an analysis enum
-            #now, write options
+            # First write analysis
+            fid.write('\n+{}\n'.format(analysis))  # Append a + to recognize it's an analysis enum
+
+            # Now, write options
             for optionname, optionvalue in list(options.items()):
 
                 if not optionvalue:
-                    #this option has only one argument
-                    fid.write("-{}\n".format(optionname))
+                    # This option has only one argument
+                    fid.write('-{}\n'.format(optionname))
                 else:
-                    #option with value. value can be string or scalar
+                    # Option with value. Value can be string or scalar.
                     if isinstance(optionvalue, (bool, int, float)):
-                        fid.write("-{} {}\n".format(optionname, optionvalue))
+                        fid.write('-{} {}\n'.format(optionname, optionvalue))
                     elif isinstance(optionvalue, str):
-                        fid.write("-{} {}\n".format(optionname, optionvalue))
+                        fid.write('-{} {}\n'.format(optionname, optionvalue))
                     else:
-                        raise TypeError("ToolkitsFile error: option '{}' is not well formatted.".format(optionname))
+                        raise TypeError('ToolkitsFile error: option {} is not well formatted'.format(optionname))
 
         fid.close()
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/classes/transient.js
===================================================================
--- /issm/trunk/src/m/classes/transient.js	(revision 28012)
+++ /issm/trunk/src/m/classes/transient.js	(revision 28013)
@@ -111,6 +111,5 @@
 		}//}}}
 		this.defaultoutputs = function(md) { //{{{
-			if(this.issmb)return ['SmbMassBalance'];
-			else return [];
+			return [];
 		}//}}}
 		this.fix=function() { //{{{
Index: /issm/trunk/src/m/classes/transient.m
===================================================================
--- /issm/trunk/src/m/classes/transient.m	(revision 28012)
+++ /issm/trunk/src/m/classes/transient.m	(revision 28013)
@@ -80,9 +80,5 @@
 		end % }}}
 		function list = defaultoutputs(self,md) % {{{
-			if(self.issmb)
-				list = {'SmbMassBalance'};
-			else
-				list = {};
-			end
+			list = {};
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -105,5 +101,5 @@
 			md = checkfield(md,'fieldname','transient.requested_outputs','stringrow',1);
 			md = checkfield(md,'fieldname','transient.isslc','numel',[1],'values',[0 1]);
-			md = checkfield(md,'fieldname','transient.isoceancoupling','numel',[1],'values',[0 1]);
+			md = checkfield(md,'fieldname','transient.isoceancoupling','numel',[1],'values',[0 1 2]);
 			md = checkfield(md,'fieldname','transient.issampling','numel',[1],'values',[0 1]);  
 			md = checkfield(md,'fieldname','transient.amr_frequency','numel',[1],'>=',0,'NaN',1,'Inf',1);
@@ -133,5 +129,5 @@
 			fielddisplay(self,'issampling','indicates whether sampling is used in the transient')
 			fielddisplay(self,'isslc','indicates whether a sea-level change solution is used in the transient');
-			fielddisplay(self,'isoceancoupling','indicates whether a coupling with an ocean model is used in the transient');
+			fielddisplay(self,'isoceancoupling','indicates whether a coupling with an ocean model is used in the transient (1 for cartesian coordinates, 2 for lat/long coordinates');
 			fielddisplay(self,'amr_frequency','frequency at which mesh is refined in simulations with multiple time_steps');
 			fielddisplay(self,'requested_outputs','list of additional outputs requested');
@@ -148,10 +144,10 @@
 			WriteData(fid,prefix,'object',self,'fieldname','isesa','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','isdamageevolution','format','Boolean');
+			WriteData(fid,prefix,'object',self,'fieldname','ismovingfront','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','ishydrology','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','isdebris','format','Boolean');
-			WriteData(fid,prefix,'object',self,'fieldname','ismovingfront','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','issampling','format','Boolean'); 
 			WriteData(fid,prefix,'object',self,'fieldname','isslc','format','Boolean');
-			WriteData(fid,prefix,'object',self,'fieldname','isoceancoupling','format','Boolean');
+			WriteData(fid,prefix,'object',self,'fieldname','isoceancoupling','format','Integer');
 			WriteData(fid,prefix,'object',self,'fieldname','amr_frequency','format','Integer');
 
Index: /issm/trunk/src/m/classes/transient.py
===================================================================
--- /issm/trunk/src/m/classes/transient.py	(revision 28012)
+++ /issm/trunk/src/m/classes/transient.py	(revision 28013)
@@ -11,5 +11,5 @@
     """
 
-    def __init__(self):  # {{{
+    def __init__(self, *args):  # {{{
         self.isage = 0
         self.issmb = 0
@@ -30,5 +30,8 @@
         self.requested_outputs = []
 
-        self.setdefaultparameters()
+        if len(args) == 0:
+            self.setdefaultparameters()
+        else:
+            raise Exception('constructor not supported')
     # }}}
 
@@ -49,5 +52,5 @@
         s += '{}\n'.format(fielddisplay(self, 'issampling', 'indicates whether sampling is used in the transient'))
         s += '{}\n'.format(fielddisplay(self, 'isslc', 'indicates if a sea level change solution is used in the transient'))
-        s += '{}\n'.format(fielddisplay(self, 'isoceancoupling', 'indicates whether coupling with an ocean model is used in the transient'))
+        s += '{}\n'.format(fielddisplay(self, 'isoceancoupling', 'indicates whether coupling with an ocean model is used in the transient (1 for cartesian coordinates, 2 for lat/long coordinates'))
         s += '{}\n'.format(fielddisplay(self, 'amr_frequency', 'frequency at which mesh is refined in simulations with multiple time_steps'))
         s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'list of additional outputs requested'))
@@ -56,8 +59,5 @@
 
     def defaultoutputs(self, md):  # {{{
-        if self.issmb:
-            return ['SmbMassBalance']
-        else:
-            return []
+        return []
     # }}}
 
@@ -129,5 +129,5 @@
         md = checkfield(md, 'fieldname', 'transient.ismovingfront', 'numel', [1], 'values', [0, 1])
         md = checkfield(md, 'fieldname', 'transient.isslc', 'numel', [1], 'values', [0, 1])
-        md = checkfield(md, 'fieldname', 'transient.isoceancoupling', 'numel', [1], 'values', [0, 1])
+        md = checkfield(md, 'fieldname', 'transient.isoceancoupling', 'numel', [1], 'values', [0, 1, 2])
         md = checkfield(md, 'fieldname', 'transient.amr_frequency', 'numel', [1], '>=', 0, 'NaN', 1, 'Inf', 1)
 
@@ -149,10 +149,10 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'isesa', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'isdamageevolution', 'format', 'Boolean')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'ismovingfront', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'ishydrology', 'format', 'Boolean')
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'ismovingfront', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'isdebris', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'issampling', 'format', 'Boolean')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'isslc', 'format', 'Boolean')
-        WriteData(fid, prefix, 'object', self, 'fieldname', 'isoceancoupling', 'format', 'Boolean')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'isoceancoupling', 'format', 'Integer')
         WriteData(fid, prefix, 'object', self, 'fieldname', 'amr_frequency', 'format', 'Integer')
 
Index: /issm/trunk/src/m/classes/verbose.m
===================================================================
--- /issm/trunk/src/m/classes/verbose.m	(revision 28012)
+++ /issm/trunk/src/m/classes/verbose.m	(revision 28013)
@@ -114,4 +114,11 @@
 		function md = checkconsistency(self,md,solution,analyses) % {{{
 
+			if md.inversion.iscontrol
+				temp = verbose('control',1);
+				if(VerboseToBinary(self) ~= VerboseToBinary(temp))
+					disp('INFO: the outlog will look better if only md.verbose.control is turned on');
+				end
+			end
+
 		end % }}}
 		function disp(verbose) % {{{
Index: /issm/trunk/src/m/consistency/ismodelselfconsistent.m
===================================================================
--- /issm/trunk/src/m/consistency/ismodelselfconsistent.m	(revision 28012)
+++ /issm/trunk/src/m/consistency/ismodelselfconsistent.m	(revision 28013)
@@ -45,7 +45,7 @@
 
 	if strcmp(solutiontype,'StressbalanceSolution')
-		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis'};
+		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','AgeAnalysis'};
 	elseif strcmp(solutiontype,'SteadystateSolution')
-		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis'};
+		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','AgeAnalysis'};
 	elseif strcmp(solutiontype,'ThermalSolution')
 		analyses={'EnthalpyAnalysis','ThermalAnalysis','MeltingAnalysis'};
@@ -73,9 +73,9 @@
 		analyses={'EsaAnalysis'};
 	elseif strcmp(solutiontype,'TransientSolution')
-		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','MasstransportAnalysis','OceantransportAnalysis','HydrologyShaktiAnalysis','HydrologyGladsAnalysis','HydrologyShreveAnalysis','HydrologyTwsAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis','SealevelchangeAnalysis'};
+		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','MasstransportAnalysis','OceantransportAnalysis','HydrologyShaktiAnalysis','HydrologyGladsAnalysis','HydrologyShreveAnalysis','HydrologyTwsAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis','SealevelchangeAnalysis','AgeAnalysis','HydrologyArmapwAnalysis','AgeAnalysis','DebrisAnalysis'};
 	elseif strcmp(solutiontype,'SealevelchangeSolution')
 		analyses={'SealevelchangeAnalysis'};
 	elseif strcmp(solutiontype,'HydrologySolution')
-		analyses={'L2ProjectionBaseAnalysis','HydrologyShreveAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis','HydrologyGladsAnalysis','HydrologyShaktiAnalysis','HydrologyTwsAnalysis'};
+		analyses={'L2ProjectionBaseAnalysis','HydrologyShreveAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis','HydrologyGladsAnalysis','HydrologyShaktiAnalysis','HydrologyTwsAnalysis','HydrologyArmapwAnalysis'};
 	elseif strcmp(solutiontype,'DamageEvolutionSolution')
 		analyses={'DamageEvolutionAnalysis'};
Index: /issm/trunk/src/m/consistency/ismodelselfconsistent.py
===================================================================
--- /issm/trunk/src/m/consistency/ismodelselfconsistent.py	(revision 28012)
+++ /issm/trunk/src/m/consistency/ismodelselfconsistent.py	(revision 28013)
@@ -35,5 +35,4 @@
 # }}}
 
-
 def AnalysisConfiguration(solutiontype):  #{{{
     """ANALYSISCONFIGURATION - return type of analyses, number of analyses
@@ -46,5 +45,5 @@
         analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis']
     elif solutiontype == 'SteadystateSolution':
-        analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis']
+        analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis','AgeAnalysis']
     elif solutiontype == 'ThermalSolution':
         analyses = ['EnthalpyAnalysis', 'ThermalAnalysis', 'MeltingAnalysis']
@@ -72,9 +71,9 @@
         analyses = ['EsaAnalysis']
     elif solutiontype == 'TransientSolution':
-        analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis', 'MasstransportAnalysis', 'OceantransportAnalysis', 'HydrologyShaktiAnalysis', 'HydrologyGladsAnalysis', 'HydrologyShreveAnalysis', 'HydrologyTwsAnalysis', 'HydrologyDCInefficientAnalysis', 'HydrologyDCEfficientAnalysis', 'SealevelchangeAnalysis']
+        analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis', 'MasstransportAnalysis', 'OceantransportAnalysis', 'HydrologyShaktiAnalysis', 'HydrologyGladsAnalysis', 'HydrologyShreveAnalysis', 'HydrologyTwsAnalysis', 'HydrologyDCInefficientAnalysis', 'HydrologyDCEfficientAnalysis', 'SealevelchangeAnalysis', 'AgeAnalysis', 'HydrologyArmapwAnalysis', 'DebrisAnalysis', 'AgeAnalysis']
     elif solutiontype == 'SealevelchangeSolution':
         analyses = ['SealevelchangeAnalysis']
     elif solutiontype == 'HydrologySolution':
-        analyses = ['L2ProjectionBaseAnalysis', 'HydrologyShreveAnalysis', 'HydrologyDCInefficientAnalysis', 'HydrologyDCEfficientAnalysis', 'HydrologyGladsAnalysis', 'HydrologyShaktiAnalysis', 'HydrologyTwsAnalysis']
+        analyses = ['L2ProjectionBaseAnalysis', 'HydrologyShreveAnalysis', 'HydrologyDCInefficientAnalysis', 'HydrologyDCEfficientAnalysis', 'HydrologyGladsAnalysis', 'HydrologyShaktiAnalysis', 'HydrologyTwsAnalysis', 'HydrologyArmapwAnalysis']
     elif 'DamageEvolutionSolution':
         analyses = ['DamageEvolutionAnalysis']
Index: /issm/trunk/src/m/contrib/badgeley/interpICESat2ATL1415.m
===================================================================
--- /issm/trunk/src/m/contrib/badgeley/interpICESat2ATL1415.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/badgeley/interpICESat2ATL1415.m	(revision 28013)
@@ -0,0 +1,81 @@
+function hout = interpICESat2ATL1415(X,Y,ncfile14,ncfile15)
+%interpICESat2ATL1415 - interpolate ICESat2 ATL14 + ATL15 data onto X and Y
+%
+%   Input:
+%     - optional 3rd input argument: path to ATL14 dataset
+%     - optional 4th input argument: path to ATL15 dataset
+%
+%   Output: 
+%     - hout: matrix of size number_of_vertices+1 x ATL15_num_timesteps
+%             it is a P1 timeseries
+%     NOTE: The output does not include ATL14 directly. ATL14 is only used 
+%           as a reference for getting absolute surface values from ATL15.
+%
+%   Examples:
+%      surface = interpICESat2ATL1415(md.mesh.x,md.mesh.y);
+%      surface = interpICESat2ATL1415(md.mesh.x,md.mesh.y,'~/ATL14_GL_0314_100m_002_01.nc','~/ATL15_GL_0314_01km_002_01.nc');
+%
+% Version 04/27/2023 Jessica Badgeley jessica.a.badgeley@dartmouth.edu
+
+if nargin==3
+   filename_h = ncfile14;
+else
+   filename_h = '/totten_1/ModelData/Greenland/ICESat2_ATL1415/ATL14_GL_0314_100m_002_01.nc';
+end
+if nargin==4
+	filename_dh = ncfile15;
+else
+   filename_dh = '/totten_1/ModelData/Greenland/ICESat2_ATL1415/ATL15_GL_0314_01km_002_01.nc';
+end
+
+xh = ncread(filename_h,'x');
+yh = ncread(filename_h,'y');
+
+xdh = ncread(filename_dh,'delta_h/x');
+ydh = ncread(filename_dh,'delta_h/y');
+tdh = ncread(filename_dh,'delta_h/time');
+
+tref = datetime('2018-01-01-00-00-00','format','yyyy-MM-dd-hh-mm-ss');
+ths = datenum(tref + days(tdh))./365.25;
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+
+posxh=find(xh<=xmax);
+id1xh=max(1,find(xh>=xmin,1)-offset);
+id2xh=min(numel(xh),posxh(end)+offset);
+
+posxdh=find(xdh<=xmax);
+id1xdh=max(1,find(xdh>=xmin,1)-offset);
+id2xdh=min(numel(xdh),posxdh(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+
+posyh=find(yh>=ymin);
+id1yh=max(1,find(yh<=ymax,1)-offset);
+id2yh=min(numel(yh),posyh(end)+offset);
+
+posydh=find(ydh>=ymin);
+id1ydh=max(1,find(ydh<=ymax,1)-offset);
+id2ydh=min(numel(ydh),posydh(end)+offset);
+
+xh = xh(id1xh:id2xh);
+yh = yh(id1yh:id2yh);
+
+xdh = xdh(id1xdh:id2xdh);
+ydh = ydh(id1ydh:id2ydh);
+
+disp('   --ICESat2 ATL1415: loading surface elevations');
+h = double(ncread(filename_h,'h',[id1xh id1yh],[id2xh-id1xh+1 id2yh-id1yh+1],[1 1]));
+dh = double(ncread(filename_dh,'delta_h/delta_h',[id1xdh id1ydh 1],[id2xdh-id1xdh+1 id2ydh-id1ydh+1 length(tdh)],[1 1 1]));
+geoid = interpBedmachineGreenland(X,Y,'geoid');
+
+disp('   --ICESat2 ATL1415: interpolating');
+href = InterpFromGrid(xh,yh,h',X,Y) - geoid; %this reference DEM is for 2020.0, but it is not currently included in hout
+
+hout = ones([length(Y)+1,length(tdh)]);
+for ii = 1:length(tdh)
+	hout(1:end-1,ii) = InterpFromGrid(xdh,ydh,dh(:,:,ii)',X,Y) + href;
+	hout(end,ii) = ths(ii);
+end
Index: /issm/trunk/src/m/contrib/badgeley/interpISMIP6GreenlandSMB.m
===================================================================
--- /issm/trunk/src/m/contrib/badgeley/interpISMIP6GreenlandSMB.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/badgeley/interpISMIP6GreenlandSMB.m	(revision 28013)
@@ -0,0 +1,115 @@
+function smb = interpISMIP6GreenlandSMB(md,model_name,scenario,surface_ref,path)
+%interpISMIP6GreenlandSMB - interpolate chosen ISMIP6 atmospheric forcing to model
+%
+%   Input:
+%     - md (model object)
+%     - model_name  (string): name of the climate model 
+%                             Examples: ACCESS1.3, CESM2, CNRM-CM6, CNRM-ESM2, CSIRO-Mk3.6, HadGEM2-ES,
+%                                       IPSL-CM5-MR, MIROC5, NorESM1, UKESM1-CM6
+%     - scenario    (string): name of the climate scenario
+%                             Examples: rcp26, rcp85, ssp126, ssp585
+%     - surface_ref (vector): optional 4th arg - reference surface for year 2015(?) (default is md.geometry.surface)
+%     - path        (string): optional 5th arg - directory path for model forcings - should end in "/aSMB_observed/v1/" 
+%
+%   Output:
+%     - smb: prepared to be input directly into md.smb
+%
+%   Examples:
+%      md.smb = interpISMIP6GreenlandSMB(md,'MIROC5','rcp85');
+%      md.smb = interpISMIP6GreenlandSMB(md,'MIROC5','rcp26',md.geometry.surface,'ISMIP6/Projections/GrIS/Atmosphere_Forcing/aSMB_observed/v1/');
+%
+%   Notes:
+%      1) This function currently uses RACMO as the reference climate. If you wish to use MAR instead, you will need to implement it.
+%      2) This function provides smb forcing for 2015 to 2100. If you want other years, you will need to implement this option.
+%      3) NOT YET IMPLEMENTED: If you would like to do a control run, give any string for the model_name and put 'control' as the scenario. 
+%                              Should the control climate have an elevation adjustment?
+%
+% Version 10/25/2023 Jessica Badgeley jessica.a.badgeley@dartmouth.edu
+
+if (nargin<4) | (length(surface_ref) ~= md.mesh.numberofvertices)
+	disp('Setting surface_ref to md.geometry.surface');
+   surface_ref = md.geometry.surface; 
+end
+if nargin<5
+   % Find appropriate directory
+   switch oshostname(),
+      case {'totten'}
+         path='/totten_1/ModelData/ISMIP6/Projections/GrIS/Atmosphere_Forcing/aSMB_observed/v1/';
+       otherwise
+         error('machine not supported yet, please provide your own path');
+   end
+end
+
+rootname = [path model_name '-' scenario '/'];
+if ~exist(rootname,'dir')
+   error(['this path does not exist or the ' model_name ' and ' scenario ' are not available in this combination.']);
+end
+
+% Process the aSMB and dSMBdz variables
+yrs = 2015:1:2100;
+smb_anom = zeros(md.mesh.numberofvertices+1,length(yrs))*NaN;
+smb_b    = zeros(md.mesh.numberofvertices+1,length(yrs))*NaN;
+
+X = md.mesh.x;
+Y = md.mesh.y;
+
+yr_ind = 0;
+for yr = yrs
+   yr_ind = yr_ind + 1;
+
+	ncfile_anom = [rootname 'aSMB/aSMB_MARv3.9-yearly-' model_name '-' scenario '-' num2str(yr) '.nc'];
+	ncfile_b    = [rootname 'dSMBdz/dSMBdz_MARv3.9-yearly-' model_name '-' scenario '-' num2str(yr) '.nc'];
+
+	%only need to do this once if all files are the same size
+	if yr_ind == 1
+      xdata = double(ncread(ncfile_anom,'x'));
+      ydata = double(ncread(ncfile_anom,'y'));
+
+      offset=2;
+      
+      xmin=min(X(:)); xmax=max(X(:));
+      posx=find(xdata<=xmax);
+      id1x=max(1,find(xdata>=xmin,1)-offset);
+      id2x=min(numel(xdata),posx(end)+offset);
+      
+      ymin=min(Y(:)); ymax=max(Y(:));
+      posy=find(ydata<=ymax);
+      id1y=max(1,find(ydata>=ymin,1)-offset);
+      id2y=min(numel(ydata),posy(end)+offset);
+	  
+		xdata=xdata(id1x:id2x);
+      ydata=ydata(id1y:id2y);
+	end
+
+   data_anom = double(ncread(ncfile_anom,'aSMB',[id1x id1y 1],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]))';
+   data_b = double(ncread(ncfile_b,'dSMBdz',[id1x id1y 1],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]))';
+
+   data_anom(find(data_anom==9.96921e+36))=NaN;
+	data_b(find(data_b==9.96921e+36))=NaN;
+
+	smb_anom(1:end-1,yr_ind) = InterpFromGrid(xdata,ydata,data_anom,double(X),double(Y));
+	smb_b(1:end-1,yr_ind) = InterpFromGrid(xdata,ydata,data_b,double(X),double(Y));
+
+	smb_anom(end,yr_ind) = yr;
+	smb_b(end,yr_ind) = yr;
+end
+
+% Convert units: from kg m-2 s-1 to m/yr ice eq using the # seconds/year given by ISMIP6 materials
+smb_anom(1:end-1,:) = smb_anom(1:end-1,:) * 31556926 / 1000 * (md.materials.rho_freshwater/md.materials.rho_ice);
+smb_b(1:end-1,:) = smb_b(1:end-1,:) * 31556926 / 1000 * (md.materials.rho_freshwater/md.materials.rho_ice);
+
+% Load the reference period SMB (RACMO mean 1969-1980)  
+smb_ref = interpRACMO1km(X,Y);
+
+% Calculate the total SMB
+smb_tot = smb_anom;
+smb_tot(1:end-1,:) = smb_tot(1:end-1,:) + repmat(smb_ref,[1,length(yrs)]);
+
+% Prepare the SMB output
+smb = SMBgradients();
+smb.href = surface_ref;
+smb.smbref = smb_tot;
+smb.b_pos = smb_b;
+smb.b_neg = smb_b;
+
+end
Index: /issm/trunk/src/m/contrib/badgeley/interpMouginotCatchments.m
===================================================================
--- /issm/trunk/src/m/contrib/badgeley/interpMouginotCatchments.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/badgeley/interpMouginotCatchments.m	(revision 28013)
@@ -0,0 +1,66 @@
+function catchments = interpMouginotCatchments(X,Y,ncfile,main_icesheet_only)
+%%Interpolate Mouginot's catchment IDs from Chad Greene's netcdf file for ice fronts onto mesh grid
+%
+%   Input:
+%     - optional 3rd input argument: path to netcdf file from Chad Greene
+%     - optional 4th input argument: set to True if you want to ignore catchments that are not part 
+%                                    of the main ice sheet (i.e., it wraps basins 0, 226-230, 256, and
+%                                    260-261 into the nearest other basins)
+%
+%   Output: 
+%     - catchments - catchmentd IDs interpolated onto mesh grid
+%
+%   Examples:
+%      X = mean(md.mesh.x(md.mesh.elements),2);
+%      Y = mean(md.mesh.y(md.mesh.elements),2);
+%      surface = interpMouginotCatchments(X,Y);
+%      surface = interpMouginotCatchments(X,Y,'~/greenland_ice_masks_1972-2022_v1.nc');
+%
+%   NOTE: This function
+%
+% Version 10/11/2023 Jessica Badgeley jessica.a.badgeley@dartmouth.edu
+
+if nargin>=3
+   filename = ncfile;
+else
+	filename = '/totten_1/ModelData/Greenland/IceFrontsGreene/greenland_ice_masks_1972-2022_v1.nc';
+end
+if nargin<4
+	main_icesheet_only = False;
+end
+
+x = ncread(filename,'x');
+y = ncread(filename,'y');
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+
+posx=find(x<=xmax);
+id1x=max(1,find(x>=xmin,1)-offset);
+id2x=min(numel(x),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+
+posy=find(y>=ymin);
+id1y=max(1,find(y<=ymax,1)-offset);
+id2y=min(numel(y),posy(end)+offset);
+
+x = x(id1x:id2x);
+y = y(id1y:id2y);
+
+disp('   --Mouginot Catchments: loading');
+catch_id = double(ncread(filename,'catchment',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]));
+
+disp('   --Mouginot Catchments: interpolating');
+catchments = InterpFromGrid(x,y,catch_id',X,Y,'nearest'); 
+
+if main_icesheet_only
+   pos0 = find((catchments==226) | (catchments==227) | (catchments==228) | ...
+      (catchments==229) | (catchments==230) | (catchments==256) | ...
+      (catchments==260) | (catchments==0) | (catchments==261));
+   pos = find((catchments~=226) & (catchments~=227) & (catchments~=228) & ...
+      (catchments~=229) & (catchments~=230) & (catchments~=256) & ...
+      (catchments~=260) & (catchments~=0) & (catchments~=261));
+   catchments(pos0) = griddata(X(pos), Y(pos), catchments(pos), X(pos0), Y(pos0),'nearest');
+end
Index: /issm/trunk/src/m/contrib/badgeley/interpRACMO23p2smb.m
===================================================================
--- /issm/trunk/src/m/contrib/badgeley/interpRACMO23p2smb.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/badgeley/interpRACMO23p2smb.m	(revision 28013)
@@ -0,0 +1,99 @@
+function [output] = interpRACMO23p2smb(X,Y,t_start,t_end),
+%interpRACMO23p2smb - interpolate RACMO23p2 SMB data downscaled to 1km to model grid
+%
+%   Input:
+%     - X:       model x values
+%     - Y:       model y values
+%     - t_start: first decimal year time for which you want SMB data (float)
+%     - t_end:   last decimal year time for which you want SMB data (float)
+%
+%   Output:
+%     - output:  matrix of size number_of_vertices+1 x number of time steps
+%                it is a P1 timeseries
+%
+%   Examples:
+%      smb = interpRACMO21p2smb(md.mesh.x,md.mesh.y,2007.0,2022.5);
+%
+% Version 08/18/2023 Jessica Badgeley jessica.a.badgeley@dartmouth.edu
+
+% Find paths to netCDF files
+ncpath = '/totten_1/ModelData/Greenland/RACMO23p2_2022_Greenland/smb';
+filestruct = dir([ncpath '/*.nc']);
+filenames = {filestruct(:).name};
+directories = {filestruct(:).folder};
+
+% Initializat output matrix
+output = zeros([length(X)+1,1])*NaN;
+
+count = 1;
+for ii = 1:length(filenames)
+
+	% Extract information about the file
+	filename = filenames{ii};
+   filename_split = split(filename,'.');
+	yr_str = filename_split{2};
+	yr_date = datetime([yr_str '-01-01']);
+
+	% If the file is not within the requested time range, skip it
+	if (str2num(yr_str) < floor(t_start)) | (str2num(yr_str) > t_end)
+		continue
+	end
+
+	% Load x and y data from netCDF file
+	% LAT and LON in this file are actually northing and easting in km
+   xdata_mat = double(ncread([directories{ii} '/' filename],'LON')*1000);
+   x = xdata_mat(:,1)';
+   ydata_mat = double(ncread([directories{ii} '/' filename],'LAT')*1000);
+   y = ydata_mat(1,:);
+
+	% Find subset of netCDF file to use
+   offset=2;
+   
+   xmin=min(X(:)); xmax=max(X(:));
+   
+   posx=find(x<=xmax);
+   id1x=max(1,find(x>=xmin,1)-offset);
+   id2x=min(numel(x),posx(end)+offset);
+   
+   ymin=min(Y(:)); ymax=max(Y(:));
+   
+   posy=find(y>=ymin);
+   id1y=max(1,find(y<=ymax,1)-offset);
+   id2y=min(numel(y),posy(end)+offset);
+
+   x = x(id1x:id2x);
+   y = y(id1y:id2y);
+
+	% Load time and convert to decimal year
+	time_temp = double(ncread([directories{ii} '/' filename],'time'));
+	time = decyear(yr_date + days(time_temp));
+
+	% Calulate number of days in year for unit transformation
+   daysinyear = daysact(yr_date,yr_date+calyears(1));
+
+	% Load SMB data
+	disp(['   -- RACMO23p2: reading smb for year ' yr_str]);
+	data = double(ncread([directories{ii} '/' filename],'smb_rec',[id1x id1y 1],[id2x-id1x+1 id2y-id1y+1 length(time)],[1 1 1]));
+	data(data<=-1.e+20)=nan;
+
+	% Loop through months, transform units, regrid, and put into output matrix 
+	for jj = 1:size(data,3)
+
+		% Calculate number of days in month for unit transformation
+      daysinmonth = daysact(yr_date+calmonths(jj-1),yr_date+calmonths(jj));
+
+		% Regrid
+		% Transform units from mm w.e./month to m ice/year assuming ice is 917 kg/m3
+      unit_transformation = (daysinyear / daysinmonth) * (917/1000) / 1000; 
+
+		% Put data and times into output matrix
+		if count == 1
+			output(1:end-1,1) = InterpFromGrid(x,y,data(:,:,jj)'*unit_transformation,double(X),double(Y));
+		else
+	      output(1:end-1,end+1) = InterpFromGrid(x,y,data(:,:,jj)'*unit_transformation,double(X),double(Y));
+		end
+	   output(end,end) = time(jj);
+
+		count = count + 1;
+	end
+end
Index: /issm/trunk/src/m/contrib/bgetraer/createMCC.m
===================================================================
--- /issm/trunk/src/m/contrib/bgetraer/createMCC.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/bgetraer/createMCC.m	(revision 28013)
@@ -0,0 +1,49 @@
+function createMCC(filename)
+%CREATEMCC takes an existing matlab script and compiles it to an executable file.
+% A number of files are produced, including 'run_MCCexecutable.sh' and 'MCCexecutable,'
+% and are saved into a directory './mccfiles' which is cleared if it already exists and is
+% created if it does not.
+%
+%USEAGE
+%   $ matlab 
+%   >> createMCC(filename);
+%   >> exit
+%   $ ./mccfiles/run_MCCexecutable.sh matlab
+%
+%INPUT
+%   filename   .m file to be turned into an executable
+%
+%OUTPUT
+%   no direct output is produced, however CREATEMCC creates a number of files in ./mccfiles
+
+% check if mccfiles directory exists in current directory
+if exist('./mccfiles','dir')
+	!rm ./mccfiles/* 
+else
+	mkdir ./mccfiles
+end
+
+%Get dependencies
+files = matlab.codetools.requiredFilesAndProducts(filename);
+
+%Creaste long string for command, ignore Matlab's statistical toolbox license
+deps = [];
+for i=1:numel(files)
+   if contains(files{i},'normfit_issm.m')
+      continue
+   elseif contains(files{i},'dakota_moments.m')
+      continue
+   elseif contains(files{i},'dakota_out_parse.m')
+      continue
+   else
+      deps = [deps ' ' files{i}];
+   end
+end
+
+%Create command
+command = ['mcc -m ' deps ' -o MCCexecutable'];
+%Create executable
+disp('Creating mccfiles');
+cd ./mccfiles
+system(command);
+cd ..
Index: /issm/trunk/src/m/contrib/buzzi/gravity/vfsa.cpp
===================================================================
--- /issm/trunk/src/m/contrib/buzzi/gravity/vfsa.cpp	(revision 28012)
+++ /issm/trunk/src/m/contrib/buzzi/gravity/vfsa.cpp	(revision 28013)
@@ -818,5 +818,5 @@
 	}
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	delete threads;
 	delete handles;
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/analyzeCalvingFront.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/analyzeCalvingFront.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/analyzeCalvingFront.m	(revision 28013)
@@ -5,6 +5,4 @@
 %                   [Xmain, positionx, positiony, time], icemask, calving rate,
 %                   melting rate and velocity magnitude, sigma, thickness
-% Author: Cheng Gong
-% Last modified: 2020-09-25
 
 transient = isfield(md.results,'TransientSolution');
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/averageOverTime.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/averageOverTime.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/averageOverTime.m	(revision 28013)
@@ -13,6 +13,4 @@
 % if within the range, do linear interpolation.
 %
-%   Author: Cheng Gong
-%   Last modified: 2020-09-09
 if nargin < 4
     endP = startP;
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/computeGrad.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/computeGrad.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/computeGrad.m	(revision 28013)
@@ -1,4 +1,4 @@
 function [gradx, grady]=computeGrad(index,x,y,field)
-%COMPUTEHESSIAN - compute the gradient from a field
+%COMPUTEGRAD - compute the gradient from a field
 
 %some variables
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/expxy2shpll.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/expxy2shpll.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/expxy2shpll.m	(revision 28013)
@@ -0,0 +1,69 @@
+function expxy2shpll(expfilename,shpfilename,geometry)
+%expxy2shpll
+%  Convert .exp to .shp file
+%
+%   Usage:
+%			expxy2shpll('glacier.exp', 'glacier.shp')
+%			expxy2shpll('glacier.exp', 'glacier.shp', geometry)
+%
+%		geometry (optional)-	'MultiPoint' : point clouds
+%									'Point' : single point
+%									'Line' : two points line
+%									'polygon' : multiple points
+%
+
+%check file extensions
+[pathstr,name,ext] = fileparts(shpfilename);
+if ~strcmp(ext,'.shp'),
+   error(['Shapefile ' shpfilename ' does not have an extension .shp']);
+end
+
+[pathstr,name,ext] = fileparts(expfilename);
+if ~strcmp(ext,'.exp'),
+   error(['Exp file ' expfilename ' does not have an extension .exp']);
+end
+
+shp=expread(expfilename);
+[lat, lon] = xy2ll(shp.x, shp.y, 1, 45, 70);
+shp.x = lon;
+shp.y = lat;
+
+%initialize number of profile
+count=1;
+
+contours=struct([]);
+for i=1:length(shp),
+   if nargin < 3
+
+      %TEMP
+      %if contains(shp(i).name,'_pointcloud');
+      %  continue;
+      %end
+
+      if length(shp(i).x) == 0
+         continue;
+      elseif contains(shp(i).name,'_pointcloud');
+         geometry = 'MultiPoint';
+         shp(i).name = erase(shp(i).name,'_pointcloud');
+      elseif length(shp(i).x) == 1
+         geometry = 'Point';
+      elseif length(shp(i).x) < 3
+         geometry = 'Line';
+      else
+         if (shp(i).x(end)==shp(i).x(1) && shp(i).y(end)==shp(i).y(1)),
+            geometry = 'Polygon';
+         else
+            geometry = 'Line';
+         end
+      end
+   end
+   contours(count).Geometry=geometry;
+   contours(count).id=i;
+   contours(count).Name=shp(i).name;
+   contours(count).X=shp(i).x;
+   contours(count).Y=shp(i).y;
+   count = count+1;
+end
+
+%Make sure it is one single geometry otherwise it will yell at you
+shapewrite(contours,shpfilename);
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/extractTransientSolutions.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/extractTransientSolutions.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/extractTransientSolutions.m	(revision 28013)
@@ -9,5 +9,7 @@
 transientSolutions.thickness = cell2mat({md.results.TransientSolution(:).Thickness});
 transientSolutions.SigmaVM = cell2mat({md.results.TransientSolution(:).SigmaVM});
-transientSolutions.smb = cell2mat({md.results.TransientSolution(:).SmbMassBalance});
+if (isfield(md.results.TransientSolution, 'SmbMassBalance'))
+	transientSolutions.smb = cell2mat({md.results.TransientSolution(:).SmbMassBalance});
+end
 transientSolutions.ice_levelset = cell2mat({md.results.TransientSolution(:).MaskIceLevelset});
 transientSolutions.calvingRate = cell2mat({md.results.TransientSolution(:).CalvingCalvingrate});
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/fillInNan.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/fillInNan.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/fillInNan.m	(revision 28013)
@@ -3,6 +3,4 @@
 %		data should have the same size as md.mesh.x
 %
-% Author: Cheng Gong
-% Last modified: 2021-12-08
 nanvflag = find(isnan(data));
 NNanv = length(nanvflag);
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/integrateOverDomain.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/integrateOverDomain.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/integrateOverDomain.m	(revision 28013)
@@ -8,11 +8,13 @@
 	weights = ones(size(data));
 	if nargin<3
-		masked = [];
+		masked = logical(zeros(size(data)));
 	end
 end
 
+masked = masked | isnan(data) | isnan(weights);
 % Set the area with masked=1 to nan
 data(masked) = nan;
 weights(masked) =nan;
+
 
 % get the mesh
@@ -28,5 +30,5 @@
 eleAreas = 1/3*eleAreas.*(weights(elements(:,1),:)+weights(elements(:,2),:)+weights(elements(:,3),:));
 
-intData = sum(eleData(:),'omitnan');
-areas = sum(eleAreas(:),'omitnan');
-meanData = intData / areas;
+intData = sum(eleData, 1, 'omitnan');
+areas = sum(eleAreas, 1, 'omitnan');
+meanData = intData ./ areas;
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/interpZeroPos.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/interpZeroPos.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/interpZeroPos.m	(revision 28013)
@@ -14,6 +14,4 @@
 %                x0 = -------------
 %                        y1-y2
-% Author: Cheng Gong
-% Last modified: 2020-10-20
 [m, n] = size(X);
 [my, k] = size(Y);
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/npsd.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/npsd.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/npsd.m	(revision 28013)
@@ -7,6 +7,4 @@
 %   freq: frequency(non-negative half)
 %
-%   Author: Cheng Gong
-%   Date: 2021-08-17
 
 % length of data
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/projectToFlowlines.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/projectToFlowlines.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/projectToFlowlines.m	(revision 28013)
@@ -6,6 +6,4 @@
 %	fy			-	y coordinates of the flowline
 %
-% Author: Cheng Gong
-% Last modified: 2021-01-27
 
 function valueC = projectToFlowlines(md, pValue, fx, fy)
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/psd.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/psd.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/psd.m	(revision 28013)
@@ -7,6 +7,4 @@
 %   freq: frequency(non-negative half)
 %
-%   Author: Cheng Gong
-%   Date: 2021-08-17
 
 % length of data
Index: /issm/trunk/src/m/contrib/chenggong/dataprocessing/wfDistance.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/dataprocessing/wfDistance.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/dataprocessing/wfDistance.m	(revision 28013)
@@ -9,6 +9,4 @@
 %   w2:  Wasserstein-Fourier Distance, or W2 distance of sx and sy
 %
-%   Author: Cheng Gong
-%   Date: 2021-08-18
 N = length(x);
 Sinv=linspace(0,1,N);
Index: sm/trunk/src/m/contrib/chenggong/expxy2shpll.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/expxy2shpll.m	(revision 28012)
+++ 	(revision )
@@ -1,69 +1,0 @@
-function expxy2shpll(expfilename,shpfilename,geometry)
-%expxy2shpll
-%  Convert .exp to .shp file
-%
-%   Usage:
-%			expxy2shpll('glacier.exp', 'glacier.shp')
-%			expxy2shpll('glacier.exp', 'glacier.shp', geometry)
-%
-%		geometry (optional)-	'MultiPoint' : point clouds
-%									'Point' : single point
-%									'Line' : two points line
-%									'polygon' : multiple points
-%
-
-%check file extensions
-[pathstr,name,ext] = fileparts(shpfilename);
-if ~strcmp(ext,'.shp'),
-   error(['Shapefile ' shpfilename ' does not have an extension .shp']);
-end
-
-[pathstr,name,ext] = fileparts(expfilename);
-if ~strcmp(ext,'.exp'),
-   error(['Exp file ' expfilename ' does not have an extension .exp']);
-end
-
-shp=expread(expfilename);
-[lat, lon] = xy2ll(shp.x, shp.y, 1, 45, 70);
-shp.x = lon;
-shp.y = lat;
-
-%initialize number of profile
-count=1;
-
-contours=struct([]);
-for i=1:length(shp),
-   if nargin < 3
-
-      %TEMP
-      %if contains(shp(i).name,'_pointcloud');
-      %  continue;
-      %end
-
-      if length(shp(i).x) == 0
-         continue;
-      elseif contains(shp(i).name,'_pointcloud');
-         geometry = 'MultiPoint';
-         shp(i).name = erase(shp(i).name,'_pointcloud');
-      elseif length(shp(i).x) == 1
-         geometry = 'Point';
-      elseif length(shp(i).x) < 3
-         geometry = 'Line';
-      else
-         if (shp(i).x(end)==shp(i).x(1) && shp(i).y(end)==shp(i).y(1)),
-            geometry = 'Polygon';
-         else
-            geometry = 'Line';
-         end
-      end
-   end
-   contours(count).Geometry=geometry;
-   contours(count).id=i;
-   contours(count).Name=shp(i).name;
-   contours(count).X=shp(i).x;
-   contours(count).Y=shp(i).y;
-   count = count+1;
-end
-
-%Make sure it is one single geometry otherwise it will yell at you
-shapewrite(contours,shpfilename);
Index: sm/trunk/src/m/contrib/chenggong/interpFromMEaSUREsGeotiff.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/interpFromMEaSUREsGeotiff.m	(revision 28012)
+++ 	(revision )
@@ -1,64 +1,0 @@
-function dataout = interpFromMEaSUREsGeotiff(X,Y,Tstart,Tend,varargin)
-%interpFromMEaSUREsGeotiff: 
-%	This function calls src/m/contrib/morlighem/modeldata/interpFromGeotiff.m for multiple times to load all avaliable 
-%	tif data in  /totten_1/ModelData/Greenland/VelMEaSUREs/Jakobshavn_2008_2021/ within the given time period (in decimal years)
-%	For some reason, each .tif file in this folder contains two sets of data, only the first dataset is useful
-%
-%   Usage:
-%		 dataout = interpFromMEaSUREsGeotiff(X,Y,Tstart,Tend, varargin)
-%
-%	X, Y are the coordinates of the mesh 
-%	Tstart and Tend decimal year of the start and end time
-%
-%   Example:
-%			obsData = interpFromMEaSUREsGeotiff(md.mesh.x,md.mesh.y, tstart, tend);
-%
-%   Options:
-%      - 'glacier':  which glacier to look for
-options    = pairoptions(varargin{:});
-glacier    = getfieldvalue(options,'glacier','Jakobshavn');
-
-if strcmp(glacier, 'Jakobshavn')
-	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Jakobshavn_2008_2021/';
-elseif strcmp(glacier, 'Kangerlussuaq')
-	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Kangerlussuaq_2006_2021/';
-elseif strcmp(glacier, 'Store')
-	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Store_2008_2021/';
-elseif strcmp(glacier, 'Rink')
-	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Rink_2008_2022/';
-else
-	error(['The velocity data for ', glacier, ' is not available, please download from NSIDC first.']);
-end
-
-% get the time info from file names
-templist = dir([foldername,'*.meta']);
-Ndata = length(templist);
-dataTstart = zeros(Ndata,1);
-dataTend = zeros(Ndata,1);
-
-for i = 1:Ndata
-	tempConv = split(templist(i).name, '_');
-	% follow the naming convention
-	dataPrefix(i) = join(tempConv(1:5), '_');
-	dataTstart(i) = date2decyear(datenum(tempConv{3}));
-	dataTend(i) = date2decyear(datenum(tempConv{4}));
-end
-disp(['  Found ', num2str(Ndata), ' records in ', foldername]);
-disp(['    from ', datestr(decyear2date(min(dataTstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date(max(dataTend)),'yyyy-mm-dd') ]);
-
-
-% find all the data files with Tstart<=t<=Tend
-dataInd = (dataTend>=Tstart) & (dataTstart<=Tend);
-disp([' For the selected period: ', datestr(decyear2date((Tstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date((Tend)),'yyyy-mm-dd'), ', there are ', num2str(sum(dataInd)), ' records' ]);
-
-dataToLoad = dataPrefix(dataInd);
-TstartToload = dataTstart(dataInd);
-TendToload = dataTend(dataInd);
-
-for i = 1:length(dataToLoad)
-	dataout(i).vx = interpFromGeotiff([foldername, dataToLoad{i}, '_vx_v04.0.tif'], X, Y, 2e9);
-	dataout(i).vy = interpFromGeotiff([foldername, dataToLoad{i}, '_vy_v04.0.tif'], X, Y, 2e9);
-	dataout(i).vel = interpFromGeotiff([foldername, dataToLoad{i}, '_vv_v04.0.tif'], X, Y, -1);
-	dataout(i).Tstart = TstartToload(i);
-	dataout(i).Tend = TendToload(i);
-end
Index: /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromAtlasDEM.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromAtlasDEM.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromAtlasDEM.m	(revision 28013)
@@ -0,0 +1,106 @@
+function dataout = interpFromAtlasDEM(X,Y,Tstart,Tend,varargin)
+	%interpFromAtlasDEM: 
+	%	This function calls src/m/contrib/morlighem/modeldata/interpFromGeotiff.m for multiple times to load all avaliable 
+	%
+	%   Usage:
+	%		 dataout = interpFromAtlasDEM(X,Y,Tstart,Tend, varargin)
+	%
+	%	X, Y are the coordinates of the mesh 
+	%	Tstart and Tend decimal year of the start and end time
+	%
+	%   Example:
+	%			obsData = interpFromAtlasDEM(md.mesh.x,md.mesh.y, tstart, tend);
+	%
+	%   Options:
+	options    = pairoptions(varargin{:});
+
+	foldername = '/totten_1/ModelData/Greenland/Helheim_ATLAS/';
+
+	% get the time info from file names
+	templist = dir([foldername,'*.tif']);
+	Ndata = length(templist);
+	dataTime = zeros(Ndata,1);
+
+	for i = 1:Ndata
+		tempConv = split(templist(i).name, '-');
+		% follow the naming convention
+		dataTime(i) = date2decyear(datenum(tempConv{1}, 'yymmdd_hhMMss'));
+	end
+	disp(['  Found ', num2str(Ndata), ' records in ', foldername]);
+	disp(['    from ', datestr(decyear2date(min(dataTime)),'yyyy-mm-dd'), ' to ', datestr(decyear2date(max(dataTime)),'yyyy-mm-dd') ]);
+
+
+	% find all the data files with Tstart<=t<=Tend
+	dataInd = (dataTime>=Tstart) & (dataTime<=Tend);
+	disp([' For the selected period: ', datestr(decyear2date((Tstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date((Tend)),'yyyy-mm-dd'), ', there are ', num2str(sum(dataInd)), ' records' ]);
+
+	dataToLoad = {templist(dataInd).name};
+	timeToload = dataTime(dataInd);
+
+	for i = 1:length(dataToLoad)
+		tifdata= interpFromTif([foldername, dataToLoad{i}], X, Y, 2e9);
+		dataout(i).name = dataToLoad{i};
+		dataout(i).surface = tifdata(:,:,3);
+		dataout(i).Time = timeToload(i);
+	end
+
+end
+
+	function dataout = interpFromTif(tifname,X,Y,nanValue) % {{{
+
+		if nargin < 4
+			nanValue = 10^30;
+		end
+
+		usemap = 0;
+
+		%Get image info
+		Tinfo = imfinfo(tifname);
+		N     = Tinfo(1).Width;
+		M     = Tinfo(1).Height;
+		dx    = Tinfo(1).ModelPixelScaleTag(1);
+		dy    = Tinfo(1).ModelPixelScaleTag(2);
+		minx  = Tinfo(1).ModelTiepointTag(4);
+		maxy  = Tinfo(1).ModelTiepointTag(5);
+
+		%Generate vectors
+		xdata = minx + dx/2 + ((0:N-1).*dx);
+		ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+
+		%Read image
+		assert(dx>0); assert(dy>0);
+		ydata = fliplr(ydata);
+
+		%Get pixels we are interested in
+		offset=2;
+		xmin=min(X(:)); xmax=max(X(:));
+		posx=find(xdata<=xmax);
+		id1x=max(1,find(xdata>=xmin,1)-offset);
+		id2x=min(numel(xdata),posx(end)+offset);
+
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+
+		data  = double(imread(tifname,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+		xdata=xdata(id1x:id2x);
+		ydata=ydata(id1y:id2y);
+
+		if nanValue > 0
+			data(find(abs(data)>=nanValue))=NaN;
+		else
+			data(find(data<=nanValue))=NaN;
+		end
+
+		if ndims(data) == 2
+			dataout = InterpFromGrid(xdata,ydata,data,X,Y);
+		elseif ndims(data) == 3
+			for i = 1:size(data, 3)
+				dataout(:,:,i) = InterpFromGrid(xdata, ydata, data(:,:, i), X, Y);
+			end
+		else
+			error(['not implemented for data of ', num2str(ndims(data)), 'dimensions!'])
+		end
+		dataout(dataout==-9999)=NaN;
+	end %}}}
Index: /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromITSLIVE.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromITSLIVE.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromITSLIVE.m	(revision 28013)
@@ -0,0 +1,74 @@
+function [vx_out, vy_out] = interpFromITSLIVE(X,Y,Tstart,Tend,varargin)
+	%interpFromITSLIVE: 
+	%	Interpolate ITS_LIVE velocity data to the given mesh
+	%
+	%   Usage:
+	%		 [vx_out, vy_out] = interpFromITSLIVE(X,Y,Tstart,Tend,varargin)
+	%
+	%	X, Y are the coordinates of the mesh 
+	%	Tstart and Tend decimal year of the start and end time
+	%
+	%   Example:
+	%			[vx, vy] = interpFromITSLIVE(md.mesh.x,md.mesh.y, tstart, tend);
+	%
+	%   Options:
+	options    = pairoptions(varargin{:});
+
+	foldername = '/totten_1/ModelData/Greenland/ITS_LIVE/';
+
+	% get the time info from file names
+	templist = dir([foldername,'*.nc']);
+	Ndata = length(templist);
+	dataTime = zeros(Ndata,1);
+
+	for i = 1:Ndata
+		[~, fname, ~] = fileparts(templist(i).name);
+		tempConv = split(fname, '_');
+		% follow the naming convention
+		dataTime(i) = date2decyear(datenum(tempConv{end}, 'yyyy'));
+	end
+	% find all the data files with Tstart<=t<=Tend
+	dataInd = (dataTime>=Tstart) & (dataTime<=Tend);
+	disp([' For the selected period: ', datestr(decyear2date((Tstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date((Tend)),'yyyy-mm-dd'), ', there are ', num2str(sum(dataInd)), ' records' ]);
+
+	dataToLoad = {templist(dataInd).name};
+	timeToload = dataTime(dataInd);
+
+	% Load x,y for GRE_G0240_0000.nc
+	refNF = [foldername, templist(1).name];
+	xh = ncread(refNF, 'x');
+	yh = ncread(refNF, 'y');
+
+	xmin = min(X(:)); xmax = max(X(:));
+	ymin = min(Y(:)); ymax = max(Y(:));
+	offset = max([diff(xh);diff(yh)]);
+
+	posxh = ((xh>=xmin-offset) & (xh<=xmax+offset));
+	id1xh = find(posxh, 1, 'first');
+	id2xh = find(posxh, 1, 'last');
+
+	posyh = ((yh>=ymin-offset) & (yh<=ymax+offset));
+	id1yh = find(posyh, 1, 'first');
+	id2yh = find(posyh, 1, 'last');
+
+	xh = xh(id1xh:id2xh);
+	yh = yh(id1yh:id2yh);
+
+	% loop through all the files
+	vx_out = zeros(numel(X)+1, numel(timeToload)); 
+	vy_out = zeros(numel(X)+1, numel(timeToload)); 
+	for i = 1:length(dataToLoad)
+
+		filename = [foldername, dataToLoad{i}];
+		vx = (ncread(filename,'vx',[id1xh id1yh],[id2xh-id1xh+1 id2yh-id1yh+1],[1 1]));
+		vy = (ncread(filename,'vy',[id1xh id1yh],[id2xh-id1xh+1 id2yh-id1yh+1],[1 1]));
+
+		vx(vx<-32760) = nan;
+		vy(vy<-32760) = nan;
+		vx_out(1:end-1,i) = InterpFromGrid(xh, yh, double(vx'), X, Y);
+		vx_out(end, i) = timeToload(i);
+		vy_out(1:end-1,i) = InterpFromGrid(xh, yh, double(vy'), X, Y);
+		vy_out(end, i) = timeToload(i);
+	end
+end
+
Index: /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromMEaSUREsGeotiff.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromMEaSUREsGeotiff.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/chenggong/modeldata/interpFromMEaSUREsGeotiff.m	(revision 28013)
@@ -0,0 +1,68 @@
+function dataout = interpFromMEaSUREsGeotiff(X,Y,Tstart,Tend,varargin)
+%interpFromMEaSUREsGeotiff: 
+%	This function calls src/m/contrib/morlighem/modeldata/interpFromGeotiff.m for multiple times to load all avaliable 
+%	tif data in  /totten_1/ModelData/Greenland/VelMEaSUREs/Jakobshavn_2008_2021/ within the given time period (in decimal years)
+%	For some reason, each .tif file in this folder contains two sets of data, only the first dataset is useful
+%
+%   Usage:
+%		 dataout = interpFromMEaSUREsGeotiff(X,Y,Tstart,Tend, varargin)
+%
+%	X, Y are the coordinates of the mesh 
+%	Tstart and Tend decimal year of the start and end time
+%
+%   Example:
+%			obsData = interpFromMEaSUREsGeotiff(md.mesh.x,md.mesh.y, tstart, tend);
+%
+%   Options:
+%      - 'glacier':  which glacier to look for
+options    = pairoptions(varargin{:});
+glacier    = getfieldvalue(options,'glacier','Jakobshavn');
+
+if strcmp(glacier, 'Jakobshavn')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Jakobshavn_2008_2021/';
+elseif strcmp(glacier, 'Kangerlussuaq')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Kangerlussuaq_2006_2021/';
+elseif strcmp(glacier, 'Store')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Store_2008_2021/';
+elseif strcmp(glacier, 'Rink')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Rink_2008_2022/';
+elseif strcmp(glacier, 'Upernavik')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Upernavik_2008_2022/';
+elseif strcmp(glacier, 'Helheim')
+	foldername = '/totten_1/ModelData/Greenland/VelMEaSUREs/Helheim_2008_2023/';
+else
+	error(['The velocity data for ', glacier, ' is not available, please download from NSIDC first.']);
+end
+
+% get the time info from file names
+templist = dir([foldername,'*.meta']);
+Ndata = length(templist);
+dataTstart = zeros(Ndata,1);
+dataTend = zeros(Ndata,1);
+
+for i = 1:Ndata
+	tempConv = split(templist(i).name, '_');
+	% follow the naming convention
+	dataPrefix(i) = join(tempConv(1:5), '_');
+	dataTstart(i) = date2decyear(datenum(tempConv{3}));
+	dataTend(i) = date2decyear(datenum(tempConv{4}));
+end
+disp(['  Found ', num2str(Ndata), ' records in ', foldername]);
+disp(['    from ', datestr(decyear2date(min(dataTstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date(max(dataTend)),'yyyy-mm-dd') ]);
+
+
+% find all the data files with Tstart<=t<=Tend
+dataInd = (dataTend>=Tstart) & (dataTstart<=Tend);
+disp([' For the selected period: ', datestr(decyear2date((Tstart)),'yyyy-mm-dd'), ' to ', datestr(decyear2date((Tend)),'yyyy-mm-dd'), ', there are ', num2str(sum(dataInd)), ' records' ]);
+
+dataToLoad = dataPrefix(dataInd);
+TstartToload = dataTstart(dataInd);
+TendToload = dataTend(dataInd);
+
+for i = 1:length(dataToLoad)
+	dataout(i).vx = interpFromGeotiff([foldername, dataToLoad{i}, '_vx_v04.0.tif'], X, Y, 2e9);
+	dataout(i).vy = interpFromGeotiff([foldername, dataToLoad{i}, '_vy_v04.0.tif'], X, Y, 2e9);
+	dataout(i).vel = interpFromGeotiff([foldername, dataToLoad{i}, '_vv_v04.0.tif'], X, Y, -1);
+	dataout(i).Tstart = TstartToload(i);
+	dataout(i).Tend = TendToload(i);
+end
Index: /issm/trunk/src/m/contrib/chenggong/modeldata/interpISMIP6Temp.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/modeldata/interpISMIP6Temp.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/modeldata/interpISMIP6Temp.m	(revision 28013)
@@ -4,7 +4,4 @@
 %	X and Y are the coordinates of the mesh
 %
-%  Author: Cheng Gong
-%  Last modified: 2021-12-06
-
 filename = '/totten_1/ModelData/Greenland/ISMIP6/GreenlandISMIP6-Morlighem-2020-10-01.nc';
 
Index: /issm/trunk/src/m/contrib/chenggong/modeldata/interpMonthlyIceMaskGreene.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/modeldata/interpMonthlyIceMaskGreene.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/chenggong/modeldata/interpMonthlyIceMaskGreene.m	(revision 28013)
@@ -0,0 +1,66 @@
+function icemask = interpMonthlyIceMaskGreene(X, Y, time, includedRockMask, ncdata)
+%INTERPMONTHLYICEMASKGREENE - interpolate monthly reconstructed ice masks onto X and Y, within the given time period
+%
+%	 Usage:
+%		distance = interpMonthlyIceMaskGreene(md.mesh.x, md.mesh.y, [md.timestepping.start_time, md.timestepping.final_time]);
+%
+%
+%   - X, Y: coordinates of the mesh or grid
+%	 - time: the starting and end point of the time series
+%   - optional 4th input argument: a flag to cover rock mask by ice and merge it into the ice mask, 
+%											so that there is no hole in the interior part of the domain, 
+%											and the mask will only represent the levelset of the ice front 
+%   - optional 5th input argument: path to the data file, by default it is the path on totten
+%
+
+% set icemask=-1 for the region with rocks
+if nargin < 4
+	includedRockMask = 1;
+end
+if nargin < 5
+	ncdata = '/totten_1/ModelData/Greenland/IceFrontsGreene/greenland_ice_masks_1972-2022_v1.nc';
+end
+
+x = ncread(ncdata, 'x');
+y = ncread(ncdata, 'y');
+d = ncread(ncdata, 'time');
+% convert t to decyear
+t = date2decyear(datenum(datetime('1900-01-01')+days(d)));
+
+offset=2;
+
+% get x-index covers the domain
+xmin=min(X(:)); xmax=max(X(:));
+idx = sort(find((x>=xmin) & (x<=xmax)));
+idx_min = max(idx(1)-offset, 1);
+idx_max = min(idx(end)+offset, length(x));
+x = x(idx_min:idx_max);
+
+% get y-index covers the domain
+ymin=min(Y(:)); ymax=max(Y(:));
+idy = sort(find((y>=ymin) & (y<=ymax)));
+idy_min = max(idy(1)-offset, 1);
+idy_max = min(idy(end)+offset, length(y));
+y = y(idy_min:idy_max);
+
+% get time index
+idt_min = max(find(t<=time(1), 1, 'last'), 1);
+idt_max = min(find(t>=time(end), 1, 'first'), length(t));
+t = t(idt_min:idt_max);
+
+% load icemask and rockmask from netCDF
+ice = ncread(ncdata, 'ice', [idx_min, idy_min, idt_min], [idx_max-idx_min+1, idy_max-idy_min+1, idt_max-idt_min+1], [1,1,1]);
+rock = ncread(ncdata, 'rock', [idx_min, idy_min], [idx_max-idx_min+1, idy_max-idy_min+1], [1,1]);
+
+% merge ice and rock
+if includedRockMask
+	iceall = ice + rock;
+else 
+	iceall = ice;
+end
+% Convert to ice_levelset values
+icemask = zeros(numel(X)+1, numel(t));
+icemask(end,:) = t;
+for i = 1:numel(t)
+	icemask(1:end-1, i) = InterpFromGrid(x, y, double(1-2*iceall(:,:,i)'), X, Y,'nearest');
+end
Index: /issm/trunk/src/m/contrib/chenggong/visualization/imageNonUni.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/visualization/imageNonUni.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/visualization/imageNonUni.m	(revision 28013)
@@ -2,6 +2,4 @@
 %imageNonUni - draw an image with non uniform grid
 %
-% Author: Cheng Gong
-% Last modified: 2020-08-24
 x = varargin{1};
 y = varargin{2};
Index: /issm/trunk/src/m/contrib/chenggong/visualization/plotCompareTransientFlowline.m
===================================================================
--- /issm/trunk/src/m/contrib/chenggong/visualization/plotCompareTransientFlowline.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/chenggong/visualization/plotCompareTransientFlowline.m	(revision 28013)
@@ -15,6 +15,4 @@
 %   yl:			ylim value
 %
-%   Author: Cheng Gong
-%   Date: 2021-12-06
 
 N = length(velList);
Index: /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m
===================================================================
--- /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 28013)
@@ -88,6 +88,10 @@
 			disp(sprintf('===Now treating %s===',groups{i}));
 		end
-		if strcmp(groups{i}, 'qmu'),
+		if any(strcmp(groups{i}, {'qmu'})),
 			disp('qmu is skipped until it is more stable');
+			continue
+		end
+		if any(strcmp(groups{i},{'radaroverlay'})),
+			disp(sprintf('%s is skipped.',groups{i}));
 			continue
 		end
@@ -171,4 +175,5 @@
 				klass=class(md.(groups{i}));
 				klasstring = strcat(klass, '.',klass);
+				netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classgroup',groups{i});
 				netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
 				if sum(numel(Var) == size(Var)) == 0,  %this is a 2D array or more (and not a vector with dimension 2 = 1)
Index: /issm/trunk/src/m/contrib/larour/mdanalysis.m
===================================================================
--- /issm/trunk/src/m/contrib/larour/mdanalysis.m	(revision 28012)
+++ /issm/trunk/src/m/contrib/larour/mdanalysis.m	(revision 28013)
@@ -330,5 +330,5 @@
 end
 if counter==-1, 
-	error('cound not find input model name matching base workspace names!');
+	error('could not find input model name matching base workspace names!');
 end
 
Index: /issm/trunk/src/m/contrib/morlighem/sia_vz.m
===================================================================
--- /issm/trunk/src/m/contrib/morlighem/sia_vz.m	(revision 28013)
+++ /issm/trunk/src/m/contrib/morlighem/sia_vz.m	(revision 28013)
@@ -0,0 +1,18 @@
+function [velz]=sia_vz(md)
+%SIA_VZ - computation vertical speed based on Shallow Ice Approximation
+%
+%   Usage:
+%      [vz]=sia_vz(md)
+
+if md.mesh.dimension~=3
+	error('Only 3d meshes are allowed to compute vz');
+end
+
+ws = - md.surfaceforcings.mass_balance;
+n  = md.materials.rheology_n(1); %just take the first one
+z  = md.mesh.z;
+b  = md.geometry.base;
+H  = md.geometry.thickness;
+s  = md.geometry.surface;
+
+vz = ws.*(n+2)/(n+1).*((z-b)./H + 1/(n+2)*(((s-z)./H).^(n+2)-1));
Index: /issm/trunk/src/m/contrib/musselman/read_netCDF_beta.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/read_netCDF_beta.py	(revision 28013)
+++ /issm/trunk/src/m/contrib/musselman/read_netCDF_beta.py	(revision 28013)
@@ -0,0 +1,90 @@
+# imports
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+from os import path, remove
+from model import *
+import re
+
+
+'''
+Given a NetCDF4 file, this set of functions will perform the following:
+    1. Enter each group of the file.
+    2. For each variable in each group, update an empty model with the variable's data
+'''
+
+
+# make a model framework to fill that is in the scope of this file
+model_copy = model()
+
+
+def read_netCDF(filename):
+    # check if path exists
+    if path.exists(filename):
+        print('Opening {} for reading'.format(filename))
+
+        # open the given netCDF4 file
+        global NCData   
+        NCData = Dataset(filename, 'r')
+        # remove masks from numpy arrays for easy conversion
+        NCData.set_auto_mask(False)
+    
+
+    # read the contents of the groups
+
+    '''
+    this function navigates like: 
+
+    filename.groups.keys() -> filename.groups['group1'] -> 
+    filename.groups['group1'].groups.keys() -> filename.groups['group1'].groups['group1.1'] ->
+    filename.groups['group1'].groups['group1.1'].groups.keys() ->
+    filename.groups['group1'].groups['group1.1'].groups['group1.1.1'] etc. etc.
+    '''
+    # walk through each group looking for subgroups and variables
+    for group in NCData.groups.keys():
+        print('walking ' + str(group))
+        # have to send a custom name to this function: filename.groups['group']
+        name = "NCData.groups['" + str(group) + "']"
+        print('name sent to walker is: ' + name)
+        walk_nested_groups(name)
+    
+    return model_copy
+
+
+def walk_nested_groups(group_location_in_file):
+    # first, we enter the group by: filename.groups['group_name']
+    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
+    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
+    # if the variables exist, copy the data to the model framework by calling a custom function
+    # if the nested groups exist, repeat. 
+
+    for variable in eval(group_location_in_file + '.variables.keys()'):
+        print('got a variable: ' + str(variable))
+        location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
+        # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
+        # Define the regex pattern to match the groups within brackets
+        pattern = r"\['(.*?)'\]"
+        # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
+        matches = re.findall(pattern, location_of_variable_in_file)
+        variable_name = matches[-1]
+        location_of_variable_in_model = '.'.join(matches[:-1])
+        copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name)
+
+    for nested_group in eval(group_location_in_file + '.groups.keys()'):
+        print('got a nested group: ' + nested_group)
+        new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
+        print('the location of this nested group in the file is: ' + new_nested_group)
+        walk_nested_groups(new_nested_group)
+
+
+
+def copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name):
+    # this should be as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
+    print('adress in file: ' + location_of_variable_in_file)
+    print('adress in model: ' + location_of_variable_in_model)
+    print('the value of the variable is: ')
+    print(eval(location_of_variable_in_file + '[:]'))
+    print('the name of the varialbe is: ' + variable_name)
+    print('the type of the variable is: ' + str(type(eval(location_of_variable_in_file + '[:]'))))
+    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+    print('successfully saved var to model')
Index: /issm/trunk/src/m/contrib/musselman/read_netCDF_commit.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/read_netCDF_commit.py	(revision 28013)
+++ /issm/trunk/src/m/contrib/musselman/read_netCDF_commit.py	(revision 28013)
@@ -0,0 +1,86 @@
+# imports
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+from os import path, remove
+from model import *
+import re
+
+
+'''
+Given a NetCDF4 file, this set of functions will perform the following:
+    1. Enter each group of the file.
+    2. For each variable in each group, update an empty model with the variable's data
+    3. Enter nested groups and repeat
+'''
+
+
+# make a model framework to fill that is in the scope of this file
+model_copy = model()
+
+
+def read_netCDF(filename):
+    # check if path exists
+    if path.exists(filename):
+        print('Opening {} for reading'.format(filename))
+
+        # open the given netCDF4 file
+        global NCData   
+        NCData = Dataset(filename, 'r')
+        # remove masks from numpy arrays for easy conversion
+        NCData.set_auto_mask(False)
+    
+
+    # read the contents of the groups
+
+    '''
+    this function navigates like: 
+
+    filename.groups.keys() -> filename.groups['group1'] -> 
+    filename.groups['group1'].groups.keys() -> filename.groups['group1'].groups['group1.1'] ->
+    filename.groups['group1'].groups['group1.1'].groups.keys() ->
+    filename.groups['group1'].groups['group1.1'].groups['group1.1.1'] etc. etc.
+    '''
+    # walk through each group looking for subgroups and variables
+    for group in NCData.groups.keys():
+        # have to send a custom name to this function: filename.groups['group']
+        name = "NCData.groups['" + str(group) + "']"
+        walk_nested_groups(name)
+    
+    return model_copy
+
+
+def walk_nested_groups(group_location_in_file):
+    # first, we enter the group by: filename.groups['group_name']
+    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
+    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
+    # if a variables exists, copy the data to the model framework by calling copy function
+    # if a nested groups exist, repeat all
+
+    for variable in eval(group_location_in_file + '.variables.keys()'):
+        location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
+        # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
+        # Define the regex pattern to match the groups within brackets
+        pattern = r"\['(.*?)'\]"
+        # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
+        matches = re.findall(pattern, location_of_variable_in_file)
+        variable_name = matches[-1]
+        location_of_variable_in_model = '.'.join(matches[:-1])
+        copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name)
+
+    for nested_group in eval(group_location_in_file + '.groups.keys()'):
+        new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
+        walk_nested_groups(new_nested_group)
+
+
+
+def copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name):
+    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
+    
+    # but there are a couple of cases we need to compensate for, like an arrary of a single integer should just be an integer and not an array
+    if len(eval(location_of_variable_in_file))>1:
+        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+    else:
+        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:][0]')) # note the [0] on the end
+        
+    print('Successfully saved ' + location_of_variable_in_model + '.' + variable_name + ' to model.')
Index: /issm/trunk/src/m/contrib/musselman/write_netCDF_beta.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/write_netCDF_beta.py	(revision 28013)
+++ /issm/trunk/src/m/contrib/musselman/write_netCDF_beta.py	(revision 28013)
@@ -0,0 +1,226 @@
+# imports
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+import time
+from os import path, remove
+from model import *
+
+
+'''
+this is like my todo list
+
+Given a model, this set of functions will perform the following:
+    1. Enter each nested class of the model.
+    2. View each attribute of each nested class.
+    3. Compare state of attribute in the model to an empty model class.
+    4. If states are identical, pass.
+    5. Else, create group named after original class.
+    6. Create variable named after nested class attribute and assign value to it.
+    7. 
+'''
+
+'''
+need to add cases for nested arrays, dicts, etc...
+To do this I need to: 
+    know exactly the data types that are generating problems
+'''
+
+
+def write_netCDF(model_var, model_name: str, filename: str):
+    '''
+    model_var = class object to be saved
+    model_name = name of class instance variable but inside quotation marks: ie if md = model(), then model_name = 'md'
+    filename = path and name to save file under
+    '''
+      
+    globals()[model_name] = model_var
+
+    # a sanity check
+    print('sanity check to make sure names are defined: ' + str(model_var == eval(model_name)))
+    
+    # Create a NetCDF file to write to
+    make_NetCDF(filename)
+    
+    # Create an instance of an empty model class to compare model_var against
+    global empty_model
+    empty_model = model()
+
+    # Walk through the model_var class and compare subclass states to empty_model
+    walk_through_model(model_var, model_name)
+    
+    NetCDF.close()
+    
+
+    
+def make_NetCDF(filename: str):
+    # Check if file already exists
+    if path.exists(filename):
+        print('File {} allready exist'.format(filename))
+    
+        # If so, inqure for a new name or to do delete existing file
+        newname = input('Give a new name or "delete" to replace: ')
+
+        if newname == 'delete':
+            remove(filename)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+
+    # Create file and define it globally (global variables are stored in memory/global namespace)
+    global NetCDF
+    NetCDF = Dataset(filename, 'w', format='NETCDF4')
+    NetCDF.history = 'Created ' + time.ctime(time.time())
+    NetCDF.createDimension('Unlim', None)  # unlimited dimension
+    NetCDF.createDimension('float', 1)     # single integer dimension
+    NetCDF.createDimension('int', 1)       # single float dimension
+    
+    print('Successfully created ' + filename)
+
+
+    
+def walk_through_model(model_var, model_name: str):
+    # Iterate over first layer of model_var attributes and assume this first layer is only classes
+    for group in model_var.__dict__.keys():
+        print(str(group))
+        adress = str(model_name + '.' + str(group))
+        print(adress)
+        # Recursively walk through subclasses
+        walk_through_subclasses(model_var, adress, model_name)       
+        
+
+        
+def walk_through_subclasses(model_var, adress: str, model_name: str):
+    # Iterate over each subclass' attributes
+    # Use try/except since it's either a class or it's not, no unknown exceptions
+    try:
+        # enter the subclass, see if it has nested classes and/or attributes
+        # then compare attributes between models and write to netCDF if they differ
+        # if subclass found, walk through it and repeat
+        for child in eval(adress + '.__dict__.keys()'):
+            # make a string variable so we can send thru this func again
+            adress_of_child = str(adress + '.' + str(child))
+            print('adress_of_child: ' + adress_of_child)
+            # If the attribute is unchanged, move onto the next layer
+            adress_of_child_in_empty_class = 'empty_model' + adress_of_child.removeprefix(str(model_name))
+            print('adress_of_child_in_empty_class: '+ adress_of_child_in_empty_class + '\n')
+            # using try/except here because sometimes a model can have class instances/attributes that are not 
+            # in the framework of an empty model. If this is the case, we move to the except statement
+            try:
+                if type(child) == type(eval(adress_of_child_in_empty_class)):
+                    print('passed a non-variable\n')
+                    walk_through_subclasses(model_var, adress_of_child, model_name)
+                # If it has been modified, record it in the NetCDF file
+                else:
+                    create_group(model_var, adress_of_child)
+                    walk_through_subclasses(model_var, adress_of_child, model_name)
+            except AttributeError:
+                create_group(model_var, adress_of_child)
+                walk_through_subclasses(model_var, adress_of_child, model_name)
+    except Exception as e: print(e)
+
+
+    
+def create_group(model_var, adress_of_child):
+    # start by splitting the adress_of_child into its components
+    print('entered create for: ' + adress_of_child + '\n')
+    print('the type is: ' + str(type(eval(adress_of_child))) + '\n')
+    levels_of_class = adress_of_child.split('.')
+    print(levels_of_class)
+
+    # Handle the first layer of the group(s)
+    group_name = levels_of_class[1]
+    group = NetCDF.createGroup(str(group_name))
+
+    # if the data is nested, create nested groups to match class structure
+    if len(levels_of_class) > 3:
+        for name in levels_of_class[2:-1]:
+            group = group.createGroup(str(name))
+    else: pass
+
+    # Lastly, handle the variable(s)
+    variable_name = levels_of_class[-1]
+    create_var(variable_name, adress_of_child, group)
+
+
+
+
+def create_var(variable_name, adress_of_child, group):
+    # There are lots of different variable types that we need to handle from the model class
+    
+    # This first conditional statement will catch numpy arrays of any dimension and save them
+    if isinstance(eval(adress_of_child), np.ndarray):
+        write_numpy_array_to_netcdf(variable_name, adress_of_child, group)
+        
+    elif isinstance(eval(adress_of_child), int):
+        print('caught an int!')
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = eval(adress_of_child)
+        
+    elif isinstance(eval(adress_of_child), float):
+        print('caught a float!')
+        variable = group.createVariable(variable_name, float, ('float',))
+        variable[:] = eval(adress_of_child)
+        
+    else:
+        try:
+            variable = group.createVariable(variable_name, type(eval(adress_of_child)), ('Unlim',))
+            variable = eval(adress_of_child)
+        except Exception as e: print(e)
+
+    print('successfully wrote ' + adress_of_child + ' to netcdf file')
+    
+    
+    
+
+def write_numpy_array_to_netcdf(variable_name, adress_of_child, group):
+    print('entered write_numpy_array_to_netcdf for: ' + variable_name)
+    # to make a nested array in netCDF, we have to get the dimensions of the array,
+    # create corresponding dimensions in the netCDF file, then we can make a variable
+    # in the netCDF with dimensions identical to those in the original array
+    
+    # start by getting the data type at the lowest level in the array:
+    typeis = eval(adress_of_child + '.dtype')
+    print('the type of elements are: ' + str(typeis))
+    
+    # if the array is 1D, we don't need to do anything fancy
+    # sometimes an array has just 1 element in it though, so we need to account for those cases here:
+    if len(eval(adress_of_child)) == 1:
+        if typeis is np.dtype('float64'):
+            variable = group.createVariable(variable_name, typeis, ('float',))
+            variable[:] = eval(adress_of_child)            
+        elif typeis is np.dtype('int64'):
+            variable = group.createVariable(variable_name, typeis, ('int',))
+            variable[:] = eval(adress_of_child)            
+        else:
+            variable = group.createVariable(variable_name, typeis, ('Unlim',))
+            variable[:] = eval(adress_of_child)
+    
+    # this is the 1D case:
+    elif len(np.shape(eval(adress_of_child))) == 1: 
+        variable = group.createVariable(variable_name, typeis, ('Unlim',))
+        variable[:] = eval(adress_of_child)
+    
+    # But if the array is >1D, we do need to be fancy:
+    else:
+        # make the dimensions
+        dimensions = []
+        for dimension in np.shape(eval(adress_of_child)):
+            dimensions.append(str('dim' + str(dimension)))
+            # if the dimension already exists we can't have a duplicate
+            try:
+                group.createDimension(str('dim' + str(dimension)), dimension)
+            except: pass # this would mean that the dimension already exists
+
+        print('the dimensions are: ' + str(dimensions))
+
+        # create the variable:
+        variable = group.createVariable(variable_name, typeis, tuple(dimensions))
+        print('created variable OK')
+
+        # write the variable:
+        variable[:] = eval(adress_of_child)
+
+        
+        
+        
Index: /issm/trunk/src/m/contrib/musselman/write_netCDF_commit.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/write_netCDF_commit.py	(revision 28013)
+++ /issm/trunk/src/m/contrib/musselman/write_netCDF_commit.py	(revision 28013)
@@ -0,0 +1,281 @@
+# imports
+import netCDF4
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+import time
+from os import path, remove
+from model import *
+from results import *
+
+
+'''
+Given a model, this set of functions will perform the following:
+    1. Enter each nested class of the model.
+    2. View each attribute of each nested class.
+    3. Compare state of attribute in the model to an empty model class.
+    4. If states are identical, pass.
+    5. Otherwise, create nested groups named after class structure.
+    6. Create variable named after class attribute and assign value to it.
+'''
+
+
+def write_netCDF(model_var, model_name: str, filename: str):
+    '''
+    model_var = class object to be saved
+    model_name = name of class instance variable but inside quotation marks: ie if md = model(), then model_name = 'md'
+    filename = path and name to save file under
+    '''
+    # this assigns the name model_name to the class object model_var... very important
+    globals()[model_name] = model_var
+    
+    # Create a NetCDF file to write to
+    make_NetCDF(filename)
+    
+    # Create an instance of an empty model class to compare model_var against
+    global empty_model
+    empty_model = model()
+
+    # Walk through the model_var class and compare subclass states to empty_model
+    walk_through_model(model_var, model_name)
+
+    # in order to handle some subclasses in the results class, we have to utilize this band-aid
+    # there will likely be more band-aids added unless a class name library is created with all class names that might be added to a model
+    try:
+        # if results has meaningful data, save the name of the subclass and class instance
+        NetCDF.groups['results']
+        results_subclasses_bandaid(model_var)
+        # otherwise, ignore
+    except KeyError:
+        pass
+        
+    NetCDF.close()
+    print('Model successfully saved as NetCDF4')
+    
+
+
+def results_subclasses_bandaid(model_var):
+    # since the results class may have nested classes within it, we need to record the name of the 
+    # nested class instance variable as it appears in the model that we're trying to save
+    quality_control = []
+    for class_instance_name in model_var.results.__dict__.keys():
+        # for each class instance in results, see which class its from and record that info in the netcdf to recreate structure later
+        # check to see if there is a solutionstep class instance
+        if isinstance(model_var.results.__dict__[class_instance_name],solutionstep):
+            quality_control.append(1)
+            write_string_to_netcdf(variable_name=str('solutionstep'), adress_of_child=str(class_instance_name), group=NetCDF.groups['results'])
+        # check to see if there is a solution class instance
+        if isinstance(model_var.results.__dict__[class_instance_name],solution):
+            quality_control.append(1)
+            write_string_to_netcdf(variable_name=str('solution'), adress_of_child=str(class_instance_name), group=NetCDF.groups['results'])
+        # check to see if there is a resultsdakota class instance
+        if isinstance(model_var.results.__dict__[class_instance_name],resultsdakota):
+            quality_control.append(1)
+            write_string_to_netcdf(variable_name=str('resultsdakota'), adress_of_child=str(class_instance_name), group=NetCDF.groups['results'])
+    if len(quality_control) != len(model_var.results.__dict__.keys()):
+        print('Error: The class instance within your model.results class is not currently supported by this application')
+        print(type(model_var.results.__dict__[class_instance_name]))
+    else:
+        print('The results class was successfully stored on disk')
+
+
+    
+def make_NetCDF(filename: str):
+    # If file already exists delete / rename it
+    if path.exists(filename):
+        print('File {} allready exist'.format(filename))
+    
+        # If so, inqure for a new name or to do delete the existing file
+        newname = input('Give a new name or "delete" to replace: ')
+
+        if newname == 'delete':
+            remove(filename)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+    else:
+        # Otherwise create the file and define it globally so other functions can call it
+        global NetCDF
+        NetCDF = Dataset(filename, 'w', format='NETCDF4')
+        NetCDF.history = 'Created ' + time.ctime(time.time())
+        NetCDF.createDimension('Unlim', None)  # unlimited dimension
+        NetCDF.createDimension('float', 1)     # single integer dimension
+        NetCDF.createDimension('int', 1)       # single float dimension
+    
+    print('Successfully created ' + filename)
+
+
+    
+def walk_through_model(model_var, model_name: str):
+    # Iterate over first layer of model_var attributes and assume this first layer is only classes
+    for group in model_var.__dict__.keys():
+        adress = str(model_name + '.' + str(group))
+        # Recursively walk through subclasses
+        walk_through_subclasses(model_var, adress, model_name)       
+        
+
+def walk_through_subclasses(model_var, adress: str, model_name: str):
+    # Iterate over each subclass' attributes
+    # Use try/except since it's either a class or it's not, no unknown exceptions
+    try:
+        # enter the subclass, see if it has nested classes and/or attributes
+        # then compare attributes between models and write to netCDF if they differ
+        # if subclass found, walk through it and repeat
+        for child in eval(adress + '.__dict__.keys()'):
+            # make a string variable so we can send thru this func again
+            adress_of_child = str(adress + '.' + str(child))
+            # If the attribute is unchanged, move onto the next layer
+            adress_of_child_in_empty_class = 'empty_model' + adress_of_child.removeprefix(str(model_name))
+            # using try/except here because sometimes a model can have class instances/attributes that are not 
+            # in the framework of an empty model. If this is the case, we move to the except statement
+            try:
+                if type(child) == type(eval(adress_of_child_in_empty_class)):
+                    walk_through_subclasses(model_var, adress_of_child, model_name)
+                # If it has been modified, record it in the NetCDF file
+                else:
+                    create_group(model_var, adress_of_child)
+                    walk_through_subclasses(model_var, adress_of_child, model_name)
+            except AttributeError:
+                create_group(model_var, adress_of_child)
+                walk_through_subclasses(model_var, adress_of_child, model_name)
+    except Exception as e: print(e)
+
+
+        
+def create_group(model_var, adress_of_child):
+    # start by splitting the adress_of_child into its components
+    levels_of_class = adress_of_child.split('.')
+
+    # Handle the first layer of the group(s)
+    group_name = levels_of_class[1]
+    group = NetCDF.createGroup(str(group_name))
+
+    # if the data is nested, create nested groups to match class structure
+    if len(levels_of_class) > 3:
+        for name in levels_of_class[2:-1]:
+            group = group.createGroup(str(name))
+    else: pass
+
+    # Lastly, handle the variable(s)
+    variable_name = levels_of_class[-1]
+    create_var(variable_name, adress_of_child, group)
+
+
+def create_var(variable_name, adress_of_child, group):
+    # There are lots of different variable types that we need to handle from the model class
+    
+    # This first conditional statement will catch numpy arrays of any dimension and save them
+    if isinstance(eval(adress_of_child), np.ndarray):
+        write_numpy_array_to_netcdf(variable_name, adress_of_child, group)
+    
+    # check if it's an int
+    elif isinstance(eval(adress_of_child), int):
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = eval(adress_of_child)
+    
+    # or a float
+    elif isinstance(eval(adress_of_child), float):
+        variable = group.createVariable(variable_name, float, ('float',))
+        variable[:] = eval(adress_of_child)
+
+    # or a string
+    elif isinstance(eval(adress_of_child), str):
+        write_string_to_netcdf(variable_name, adress_of_child, group)
+        
+    # or an empty list
+    elif isinstance(eval(adress_of_child), list) and len(eval(adress_of_child))==0:
+        variable = group.createVariable(variable_name, int, ('int',))
+
+    # or a list of strings -- this needs work as it can only handle a list of 1 string
+    elif isinstance(eval(adress_of_child),list) and isinstance(eval(adress_of_child)[0],str):
+        for string in eval(adress_of_child):
+            write_string_to_netcdf(variable_name, string, group)
+
+    # or a regular list
+    elif isinstance(eval(adress_of_child), list):
+        print(eval(adress_of_child))
+        variable = group.createVariable(variable_name, type(eval(adress_of_child)[0]), ('Unlim',))
+        variable[:] = eval(adress_of_child)
+
+    # anything else... (will likely need to add more cases; ie dict)
+    else:
+        try:
+            variable = group.createVariable(variable_name, type(eval(adress_of_child)), ('Unlim',))
+            variable[:] = eval(adress_of_child)
+        except Exception as e: 
+            print(e)
+            print('Datatype given: ' + str(type(eval(adress_of_child))))
+
+    print('Successfully transferred data from ' + adress_of_child + ' to the NetCDF')
+    
+
+
+
+def write_string_to_netcdf(variable_name, adress_of_child, group):
+    # netcdf and strings dont get along.. we have to do it 'custom':
+    # if we hand it an adress we need to do it this way:
+    try:
+        the_string_to_save = eval(adress_of_child)
+        length_of_the_string = len(the_string_to_save)
+        numpy_datatype = 'S' + str(length_of_the_string)
+        str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))
+    #otherwise we need to treat it like a string:
+    except: 
+        the_string_to_save = adress_of_child
+        length_of_the_string = len(the_string_to_save)
+        numpy_datatype = 'S' + str(length_of_the_string)
+        str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))        
+
+    # we'll need to make a new dimension for the string if it doesn't already exist
+    name_of_dimension = 'char' + str(length_of_the_string)
+    try: 
+        group.createDimension(name_of_dimension, length_of_the_string)
+    except: pass
+    # now we can make a variable in this dimension:
+    string = group.createVariable(variable_name, 'S1', (name_of_dimension))
+    #finally we can write the variable:
+    string[:] = str_out
+
+
+def write_numpy_array_to_netcdf(variable_name, adress_of_child, group):
+    # to make a nested array in netCDF, we have to get the dimensions of the array,
+    # create corresponding dimensions in the netCDF file, then we can make a variable
+    # in the netCDF with dimensions identical to those in the original array
+    
+    # start by getting the data type at the lowest level in the array:
+    typeis = eval(adress_of_child + '.dtype')
+    
+    # if the array is 1D, we don't need to do anything fancy
+    # sometimes an array has just 1 element in it though, so we need to account for those cases here:
+    if len(eval(adress_of_child)) == 1:
+        if typeis is np.dtype('float64'):
+            variable = group.createVariable(variable_name, typeis, ('float',))
+            variable[:] = eval(adress_of_child)            
+        elif typeis is np.dtype('int64'):
+            variable = group.createVariable(variable_name, typeis, ('int',))
+            variable[:] = eval(adress_of_child)            
+        else:
+            variable = group.createVariable(variable_name, typeis, ('Unlim',))
+            variable[:] = eval(adress_of_child)
+    
+    # this is the 1D case:
+    elif len(np.shape(eval(adress_of_child))) == 1: 
+        variable = group.createVariable(variable_name, typeis, ('Unlim',))
+        variable[:] = eval(adress_of_child)
+    
+    # But if the array is >1D, we do need to be fancy:
+    else:
+        # make the dimensions
+        dimensions = []
+        for dimension in np.shape(eval(adress_of_child)):
+            dimensions.append(str('dim' + str(dimension)))
+            # if the dimension already exists we can't have a duplicate
+            try:
+                group.createDimension(str('dim' + str(dimension)), dimension)
+            except: pass # this would mean that the dimension already exists
+
+        # create the variable:
+        variable = group.createVariable(variable_name, typeis, tuple(dimensions))
+
+        # write the variable:
+        variable[:] = eval(adress_of_child)
Index: /issm/trunk/src/m/coordsystems/epsg2proj.m
===================================================================
--- /issm/trunk/src/m/coordsystems/epsg2proj.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/epsg2proj.m	(revision 28013)
@@ -10,22 +10,11 @@
 %      return proj4string='+proj=longlat +datum=wgs84 +no_defs'
 %
-	% First, get GDAL version
-	[s,r]=system(['gdalsrsinfo --version | awk ''{print $2}'' | cut -d ''.'' -f1']);
 
-	if s~=0, 
-		error(r);
-	end
+%Call PROJ library
+[status, string]=system(['projinfo -o PROJ -q epsg:' num2str(epsg)]);
 
-	version_major=str2num(r);
+%Check status
+if status~=0; error(string); end
 
-	[s,r]=system(['gdalsrsinfo epsg:' num2str(epsg) ' | command grep PROJ.4 | tr -d ''\n'' | sed ''s/PROJ.4 : //''']);
-
-	if s~=0, 
-		error(r);
-	end
-
-	if version_major==1,
-		r=r(2:end-2);
-	end
-
-	string=(r);
+%remove trailing  blanks
+string = deblank(string);
Index: /issm/trunk/src/m/coordsystems/epsg2proj.py
===================================================================
--- /issm/trunk/src/m/coordsystems/epsg2proj.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/epsg2proj.py	(revision 28013)
@@ -20,21 +20,11 @@
 
     #First, get GDAL version
-    subproc_args = "gdalsrsinfo --version | awk '{print $2}' | cut -d '.' -f1"
+    #subproc_args = "gdalsrsinfo --version | awk '{print $2}' | cut -d '.' -f1"
+    subproc_args = "projinfo -o PROJ -q epsg:{}".format(epsg)
     subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
     outs, errs = subproc.communicate()
     if errs != '':
-        raise RuntimeError("epsg2proj: call to gdalsrsinfo failed: {}".format(errs))
-
-    version_major=int(outs)
-
-    subproc_args = "gdalsrsinfo epsg:{} | command grep PROJ.4 | tr -d '\n' | sed 's/PROJ.4 : //'".format(epsg)
-    subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
-    outs, errs = subproc.communicate()
-    if errs != '':
-        raise RuntimeError("epsg2proj: call to gdalsrsinfo failed: {}".format(errs))
-
-    if version_major == 1:
-        outs = outs[1:-1]
+        raise RuntimeError("epsg2proj: call to projinfo failed: {}".format(errs))
 
     return outs
-#}}}
+# }}}
Index: /issm/trunk/src/m/coordsystems/flaglatlongradius.py
===================================================================
--- /issm/trunk/src/m/coordsystems/flaglatlongradius.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/flaglatlongradius.py	(revision 28013)
@@ -4,5 +4,5 @@
 
 
-def flaglatlogradius(lat, long, lat0, long0, radius): # {{{
+def flaglatlogradius(lat, long, lat0, long0, radius):  # {{{
     '''
     FLAGLATLONGRADIUS - given a vector of lat, long, and a circle of radius 
Index: /issm/trunk/src/m/coordsystems/flagradiuselements.py
===================================================================
--- /issm/trunk/src/m/coordsystems/flagradiuselements.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/flagradiuselements.py	(revision 28013)
@@ -6,5 +6,5 @@
 
 
-def flagradiuselements(elements, x, y, z, lat0, long0, radius): # {{{
+def flagradiuselements(elements, x, y, z, lat0, long0, radius):  # {{{
     # get x0,y0,z0:
     R   = planetradius('earth')
Index: /issm/trunk/src/m/coordsystems/gdaltransform.py
===================================================================
--- /issm/trunk/src/m/coordsystems/gdaltransform.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/gdaltransform.py	(revision 28013)
@@ -9,5 +9,5 @@
 
 
-def gdaltransform(x, y, proj_in, proj_out): #{{{
+def gdaltransform(x, y, proj_in, proj_out):  # {{{
     """GDALTRANSFORM - switch from one projection system to another
 
@@ -70,3 +70,3 @@
 
     return [xout, yout]
-#}}}
+# }}}
Index: /issm/trunk/src/m/coordsystems/gmtmask.m
===================================================================
--- /issm/trunk/src/m/coordsystems/gmtmask.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/gmtmask.m	(revision 28013)
@@ -5,4 +5,7 @@
 %      mask.ocean = gmtmask(md.mesh.lat,md.mesh.long);
 %
+%	TODO: Standardize discovery of GMT bin path and whether or not we have GMT 
+%	modules (i.e. `gmt select`) between this file, gmtmask.py, and 
+%	gmtmaskparallel.m
 
 	%are we doing a recursive call? 
@@ -31,12 +34,23 @@
 	%First, write our lat,long file for gmt:
 	nv=length(lat);
-	filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 
-	filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 
+	filename_suffix=[num2str(feature('GetPid')) '.txt'];
+	filename_all=['all_vertices-' filename_suffix]; 
+	filename_oce=['oce_vertices-' filename_suffix];
 	dlmwrite(filename_all,[long lat (1:nv)'],'delimiter','\t','precision',10);
 
 	%figure out which vertices are on the ocean, which one on the continent:
-	[status,result] = system(['gmt select ./' filename_all ' -h0 -Df -R0/360/-90/90  -A0 -JQ180/200 -Nk/s/s/k/s > ./' filename_oce]);
+	%
+	% NOTE: Remove -Ve option to enable warnings if this method is not working 
+	%		expected
+	% 
+	gmt_select_options='-Ve -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s';
+	[status,result]=system(['gmt select ./' filename_all ' ' gmt_select_options ' > ./' filename_oce]);
 	if status~=0,
-		error(result);
+		%assume we are working with GMT 6.0.0
+		gmt_select_options='-h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s';
+		[status,result] = system(['gmtselect ./' filename_all ' ' gmt_select_options ' > ./' filename_oce]);
+		if status~=0,
+			error(result);
+		end
 	end
 
Index: /issm/trunk/src/m/coordsystems/gmtmask.py
===================================================================
--- /issm/trunk/src/m/coordsystems/gmtmask.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/gmtmask.py	(revision 28013)
@@ -10,10 +10,9 @@
 
 def gmtmask(lat, long, *args):
-    '''
-    GMTMASK - figure out which lat, long points are on the ocean
+    """gmtmask - figure out which lat, long points are on the ocean
 
-        Usage:
-            mask.ocean = gmtmask(md.mesh.lat, md.mesh.long)
-    '''
+    Usage:
+        mask.ocean = gmtmask(md.mesh.lat, md.mesh.long)
+    """
     lenlat = len(lat)
     mask = np.empty(lenlat)
@@ -30,5 +29,5 @@
         print(('gmtmask: num vertices ' + str(lenlat)))
 
-    #Check lat and long size is not more than 50,000. If so, recursively call gmtmask:
+    # Check lat and long size is not more than 50,000. If so, recursively call gmtmask.
     if lenlat > 50000:
         for i in range(int(ceil(lenlat / 50000))):
@@ -39,13 +38,28 @@
         return mask
 
-    #First, write our lat, long file for gmt:
+    # First, write our lat, long file for gmt
     nv = lenlat
     #print(np.transpose([int, lat, np.arange(1, nv + 1)]))
     np.savetxt('./all_vertices.txt', np.transpose([long, lat, np.arange(1, nv + 1)]), delimiter='\t', fmt='%.10f')
 
-    #figure out which vertices are on the ocean, which one on the continent:
-    subprocess.call('gmt select ./ all_vertices.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices.txt', shell=True)
+    # Figure out which vertices are on the ocean, which one on the continent:
+    #
+    # NOTE: Remove -Ve option to enable warnings if this method is not working 
+    #       expected
+    #
+    gmt_select_options = '-Ve -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s'
+    subproc_cmd = 'gmt select ./all_vertices.txt ' + gmt_select_options + ' > ./oce_vertices.txt'
+    subproc = subprocess.Popen(subproc_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+    outs, errs = subproc.communicate()
+    if errs != '':
+        # Assume we are working with GMT 6.0.0
+        gmt_select_options = '-h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s'
+        subproc_cmd = 'gmtselect ./all_vertices.txt ' + gmt_select_options + ' > ./oce_vertices.txt'
+        subproc = subprocess.Popen(subproc_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+        outs, errs = subproc.communicate()
+        if errs != '':
+            raise RuntimeError('gmtmask: calls to both gmt and gmtselect failed: {}'.format(errs))
 
-    #read the con_vertices.txt file and flag our mesh vertices on the continent
+    # Read the con_vertices.txt file and flag our mesh vertices on the continent
     fid = open('./oce_vertices.txt', 'r')
     line = fid.readline()
Index: /issm/trunk/src/m/coordsystems/gmtmaskparallel.m
===================================================================
--- /issm/trunk/src/m/coordsystems/gmtmaskparallel.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/gmtmaskparallel.m	(revision 28013)
@@ -27,20 +27,27 @@
 
 	%Find path to gmt, list all possible known paths to gmt (you may need to add yours to the list)
-   paths = {[issmdir() '/bin/gmt'],[issmdir() '/externalpackages/gmt/install/bin/gmt'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt'};
-   gmtpath = '';
-   for i=paths
-      if exist(i{1},'file'),
-         gmtpath = i{1};
+	paths = {[issmdir() '/bin'],[issmdir() '/externalpackages/gmt/install/bin'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin'};
+	gmt_path = '';
+	for i=paths
+		if exist([i{1} '/gmt'],'file'),
+			gmt_path = i{1};
 			break;
-      end
-   end
-   if isempty(gmtpath),
+		end
+	end
+	if isempty(gmt_path),
 		error('gmt not found, make sure it is properly installed, or add its path to this file (line 26)');
-   end
+	end
+
+	%Figure out if we have gmtselect executable
+	if exist([gmt_path '/gmtselect'],'file'),
+		gmt_exec=[gmt_path '/gmtselect'];
+	else,
+		gmt_exec=[gmt_path '/gmtselect'];
+	end
 
 	%Build xjobs script:
 	fid=fopen('xjobs.script','w');
 	for i=1:length(nnv)-1,
-		fprintf(fid,'%s gmt select ./all_vertices%i.txt -h0 -Df -R0/360/-90/90  -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices%i.txt\n',gmtpath,i,i);
+		fprintf(fid,'%s ./all_vertices%i.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices%i.txt\n',gmt_exec,i,i);
 	end
 	fclose(fid);
@@ -62,4 +69,4 @@
 	mask=zeros(nv,1);
 	mask(flags)=1;
-	
+
 	system('rm -rf ./all_vertices*.txt ./oce_vertices*.txt vertices.txt ./gmt.history ./xjobs.script');
Index: /issm/trunk/src/m/coordsystems/laea.py
===================================================================
--- /issm/trunk/src/m/coordsystems/laea.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/laea.py	(revision 28013)
@@ -1,3 +1,3 @@
-def laea(lat, long): #{{{
+def laea(lat, long):  # {{{
     """LAEA - Lambert Azimuthal Equal Area projection at lat, long projection 
     center.
@@ -12,3 +12,3 @@
 
     return '+proj=laea +lat_0={} +lon_0={} +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs'.format(lat, long)
-#}}}
+# }}}
Index: /issm/trunk/src/m/coordsystems/ll2utm.m
===================================================================
--- /issm/trunk/src/m/coordsystems/ll2utm.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/ll2utm.m	(revision 28013)
@@ -18,6 +18,6 @@
 %	meters)	and F is flattening of the user-defined ellipsoid.
 %
-%	LL2UTM(...,ZONE) forces the UTM ZONE (scalar integer) instead of
-%	automatic set.
+%	LL2UTM(...,ZONE) forces the UTM ZONE (scalar integer or same size as
+%   LAT and LON) instead of automatic set.
 %
 %	[X,Y,ZONE]=LL2UTM(...) returns also the computed UTM ZONE (negative
@@ -37,13 +37,13 @@
 %		   Notes Techniques NT/G 76, janvier 1995.
 %
-%	Acknowledgments: Mathieu.
+%	Acknowledgments: Mathieu, Frederic Christen.
 %
 %
 %	Author: Francois Beauducel, <beauducel@ipgp.fr>
 %	Created: 2003-12-02
-%	Updated: 2014-08-24
-
-
-%	Copyright (c) 2001-2014, François Beauducel, covered by BSD License.
+%	Updated: 2019-05-29
+
+
+%	Copyright (c) 2001-2019, François Beauducel, covered by BSD License.
 %	All rights reserved.
 %
@@ -93,12 +93,14 @@
 end
 
-if isnumeric(varargin{1}) & size(varargin{1},2) == 2
+if nargin > 1 && isnumeric(varargin{1}) && isnumeric(varargin{2}) ...
+		&& (all(size(varargin{1})==size(varargin{2})) ...
+		|| isscalar(varargin(1)) || isscalar(varargin{2}))
+	lat = varargin{1};
+	lon = varargin{2};
+	v = 2;
+elseif isnumeric(varargin{1}) && size(varargin{1},2) == 2
 	lat = varargin{1}(:,1);
 	lon = varargin{1}(:,2);
 	v = 1;
-elseif nargin > 1 & isnumeric(varargin{2})
-	lat = varargin{1};
-	lon = varargin{2};
-	v = 2;
 else
 	error('Single input argument must be a 2-column matrix [LAT,LON].')
@@ -109,11 +111,23 @@
 end
 
+if any(abs(lat)>90)
+	error('LAT absolute values must be lower than 90.')
+end
+
+% checks for DATUM and/or ZONE syntax
+% NOTE: the following strategy works in any case except if ZONE argument
+% has a size of 1x2 (in that case it will be interpreted as a DATUM). To
+% force the ZONE syntax with 2 elements, just use ZONE(:) to make a colum
+% vector of 2x1.
 for n = (v+1):nargin
 	% LL2UTM(...,DATUM)
-	if ischar(varargin{n}) | (isnumeric(varargin{n}) & numel(varargin{n})==2)
+	if ischar(varargin{n}) || (isnumeric(varargin{n}) ...
+			&& all(size(varargin{n})==[1,2]))
 		datum = varargin{n};
 	% LL2UTM(...,ZONE)
-	elseif isnumeric(varargin{n}) & isscalar(varargin{n})
-			zone = round(varargin{n});
+	elseif isnumeric(varargin{n}) && (isscalar(varargin{n}) ...
+			|| (isscalar(lat) || all(size(varargin{n})==size(lat))) ...
+			&& (isscalar(lon) || all(size(varargin{n})==size(lon))))
+		zone = round(varargin{n});
 	else
 		error('Unknown argument #%d. See documentation.',n)
@@ -142,5 +156,5 @@
 	F0 = round((l1*D0 + 183)/6);
 else
-	F0 = zone;
+	F0 = abs(zone);
 end
 
@@ -148,5 +162,5 @@
 E1 = sqrt((A1*A1 - B1*B1)/(A1*A1));
 P0 = 0/D0;
-L0 = (6*F0 - 183)/D0;		% UTM origin longitude (rad)
+L0 = (6*F0 - 183)/D0;	% UTM origin longitude (rad)
 Y0 = 1e7*(p1 < 0);		% UTM false northern (m)
 N = K0*A1;
@@ -166,9 +180,8 @@
 % same size as x/y in case of crossed zones
 if nargout > 2
-	fu = unique(F0.*sign(lat));
+   	f = F0.*sign(lat);
+	fu = unique(f);
 	if isscalar(fu)
 		f = fu;
-	else
-		f = F0;
 	end
 end
Index: /issm/trunk/src/m/coordsystems/ll2xy.m
===================================================================
--- /issm/trunk/src/m/coordsystems/ll2xy.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/ll2xy.m	(revision 28013)
@@ -40,33 +40,18 @@
 end
 
-%Move to CoordTransform now...
-if exist('CoordTransform_matlab')==3
-	disp('Calling CoordTransform instead, make sure to change your MATLAB script');
-	if sgn==+1
-		if delta==45 && slat==70      %BedMachine
-			[x y]=CoordTransform(lat,lon,'EPSG:4326','EPSG:3413'); return;
-		elseif delta==0 && slat==75   %IBCAO
-			[x y]=CoordTransform(lat,lon,'EPSG:4326','EPSG:3996'); return;
-		elseif delta==39 && slat==71  %Bamber
-			Bamber_proj = '+proj=stere +lat_0=90 +lat_ts=71 +lon_0=-39 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs';
-			[x y]=CoordTransform(lat,lon,'EPSG:4326',Bamber_proj); return;
-		else
-			error('not supported yet');
-		end
-	else
-		assert(delta==0); assert(slat ==71);
-		[x y]=CoordTransform(lat,lon,'EPSG:4326','EPSG:3031');
-		return;
-	end
+%Choose ellipsoid
+if 0
+	%Hughes ellipsoid
+	re   = 6378.273*10^3; % Radius of the earth in meters
+	ex2 = .006693883;     % Eccentricity of the Hughes ellipsoid squared
+else
+	%WGS84 ellipsoid
+	re = 6378137;         % Radius of the earth in meters
+	f  = 1./298.257223563;% Earth flattening
+	ex2 = 2*f-f^2;        % Eccentricity squared
 end
 
-% Conversion constant from degrees to radians
-cde  = 57.29577951;
-% Radius of the earth in meters
-re   = 6378.273*10^3;
-% Eccentricity of the Hughes ellipsoid squared
-ex2   = .006693883;
-% Eccentricity of the Hughes ellipsoid
-ex    =  sqrt(ex2);
+% Eccentricity 
+ex = sqrt(ex2);
 
 latitude  = abs(lat) * pi/180.;
Index: /issm/trunk/src/m/coordsystems/ll2xy.py
===================================================================
--- /issm/trunk/src/m/coordsystems/ll2xy.py	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/ll2xy.py	(revision 28013)
@@ -13,32 +13,32 @@
         x, y = ll2xy(lat, lon, sgn, central_meridian, standard_parallel)
 
-    - sgn = Sign of latitude	1 : north latitude (default is mer = 45 lat = 70)
-    						   -1 : south latitude (default is mer = 0  lat = 71)
+    - sgn = Sign of latitude    1 : north latitude (default is mer = 45 lat = 70)
+                               -1 : south latitude (default is mer = 0  lat = 71)
     """
     assert sgn == 1 or sgn == -1, 'error: sgn should be either 1 or -1'
-	 
-	 # Get central_meridian and standard_parallel depending on hemisphere
+
+         # Get central_meridian and standard_parallel depending on hemisphere
     if len(args) == 2:
-       delta = args[0]
-       slat = args[1]
+        delta = args[0]
+        slat = args[1]
     elif len(args) == 0:
-       if sgn == 1:
-          delta = 45.
-          slat = 70.
-          print('        ll2xy: creating coordinates in north polar stereographic (Std Latitude: 70degN Meridian: 45deg)')
-       elif sgn == -1:
-          delta = 0.
-          slat = 71.
-          print('        ll2xy: creating coordinates in south polar stereographic (Std Latitude: 71degS Meridian: 0deg)')
-       else:
-          raise ValueError('sgn should be either 1 or -1')
+        if sgn == 1:
+            delta = 45.
+            slat = 70.
+            print('        ll2xy: creating coordinates in north polar stereographic (Std Latitude: 70degN Meridian: 45deg)')
+        elif sgn == -1:
+            delta = 0.
+            slat = 71.
+            print('        ll2xy: creating coordinates in south polar stereographic (Std Latitude: 71degS Meridian: 0deg)')
+        else:
+            raise ValueError('sgn should be either 1 or -1')
     else:
-       raise Exception('bad usage: type "help(ll2xy)" for details')
+        raise Exception('bad usage: type "help(ll2xy)" for details')
 
     # if lat, lon passed as lists, convert to np.arrays
     if type(lat) != "np.ndarray":
-       lat = np.array(lat)
+        lat = np.array(lat)
     if type(lon) != "np.ndarray":
-       lon = np.array(lon)
+        lon = np.array(lon)
 
     # Conversion constant from degrees to radians
Index: /issm/trunk/src/m/coordsystems/utm2ll.m
===================================================================
--- /issm/trunk/src/m/coordsystems/utm2ll.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/utm2ll.m	(revision 28013)
@@ -5,5 +5,5 @@
 %	(in degrees). Default datum is WGS84.
 %
-%	X and Y can be scalars, vectors or matrix. Outputs LAT and LON will
+%	X, Y and F can be scalars, vectors or matrix. Outputs LAT and LON will
 %	have the same size as inputs.
 %
@@ -32,8 +32,12 @@
 %	Author: Francois Beauducel, <beauducel@ipgp.fr>
 %	Created: 2001-08-23
-%	Updated: 2014-04-20
-
-
-%	Copyright (c) 2001-2014, François Beauducel, covered by BSD License.
+%	Updated: 2019-05-29
+
+%	Revision history:
+%
+%	[2019-05-29]
+%	  - fix an issue when X or Y are matrices.
+
+%	Copyright (c) 2001-2019, François Beauducel, covered by BSD License.
 %	All rights reserved.
 %
@@ -74,10 +78,13 @@
 end
 
-if all([numel(x),numel(y)] > 1) && any(size(x) ~= size(y))
-	error('X and Y must be the same size or scalars.')
-end
-
-if ~isnumeric(f) || ~isscalar(f) || f ~= round(f)
-	error('ZONE must be a scalar integer.')
+% checks if input arguments have compatible sizes using unique(complex)
+sz = [size(x);size(y);size(f)];
+sz = complex(sz(:,1),sz(:,2));
+if length(unique(sz(sz~=complex(1,1)))) > 1
+	error('X, Y and ZONE must be scalar or vector/matrix of the same size.')
+end
+
+if ~isnumeric(f) || any(f ~= round(f))
+	error('ZONE must be integer value.')
 end
 
@@ -101,4 +108,9 @@
 end
 
+% calculations are made on column vectors
+x = x(:);
+y = y(:);
+f = f(:);
+
 % constants
 D0 = 180/pi;	% conversion rad to deg
@@ -133,5 +145,5 @@
 p0 = NaN;
 n = 0;
-while any(isnan(p0) | abs(p - p0) > eps) & n < maxiter
+while any(isnan(p0) | abs(p - p0) > eps) && n < maxiter
 	p0 = p;
 	es = E1*sin(p0);
@@ -141,8 +153,10 @@
 
 if nargout < 2
-	lat = D0*[p(:),l(:)];
+	lat = D0*[p,l];
 else
-	lat = p*D0;
-	lon = l*D0;
+	% reshapes vectors to x/y/f original size
+	sz = max([size(x);size(y);size(f)]);
+	lat = reshape(p*D0,sz);
+	lon = reshape(l*D0,sz);
 end
 
Index: /issm/trunk/src/m/coordsystems/xy2ll.m
===================================================================
--- /issm/trunk/src/m/coordsystems/xy2ll.m	(revision 28012)
+++ /issm/trunk/src/m/coordsystems/xy2ll.m	(revision 28013)
@@ -36,38 +36,18 @@
 end
 
-%Move to CoordTransform now...
-if exist('CoordTransform_matlab')==3
-	disp('Calling CoordTransform instead, make sure to change your MATLAB script');
-	if sgn==+1
-		if delta==45 && slat==70      %BedMachine
-			[lat lon]=CoordTransform(x, y,'EPSG:3413','EPSG:4326'); return;
-		elseif delta==0 && slat==75   %IBCAO
-			[lat lon]=CoordTransform(x, y,'EPSG:3996','EPSG:4326'); return;
-		elseif delta==39 && slat==71  %Bamber
-			Bamber_proj = '+proj=stere +lat_0=90 +lat_ts=71 +lon_0=-39 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs';
-			[lat lon]=CoordTransform(x, y, Bamber_proj, 'EPSG:4326'); return;
-		else
-			error('not supported yet');
-		end
-	else
-		assert(delta==0); assert(slat ==71);
-		[lat lon]=CoordTransform(x, y,'EPSG:3031','EPSG:4326');
-		return;
-	end
+%Choose ellipsoid
+if 0
+	%Hughes ellipsoid
+	re   = 6378.273*10^3; % Radius of the earth in meters
+	ex2 = .006693883;     % Eccentricity of the Hughes ellipsoid squared
+else
+	%WGS84 ellipsoid
+	re = 6378137;         % Radius of the earth in meters
+	f  = 1./298.257223563;% Earth flattening
+	ex2 = 2*f-f^2;        % Eccentricity squared
 end
 
-if nargout~=3 & nargout~=2,
-	help xy2ll
-	error('bad usage');
-end
-
-% Conversion constant from degrees to radians
-cde  = 57.29577951;
-% Radius of the earth in meters
-re   = 6378.273*10^3;
-% Eccentricity of the Hughes ellipsoid squared
-ex2   = .006693883;
-% Eccentricity of the Hughes ellipsoid
-ex    =  sqrt(ex2);
+% Eccentricity 
+ex = sqrt(ex2);
 
 sl  = slat*pi/180.;
Index: /issm/trunk/src/m/dev/devpath.m
===================================================================
--- /issm/trunk/src/m/dev/devpath.m	(revision 28012)
+++ /issm/trunk/src/m/dev/devpath.m	(revision 28013)
@@ -18,6 +18,6 @@
 
 %ISSM path
-addpath([ISSM_DIR '/src/m/os/']); %load recursivepath
-addpath([ISSM_DIR '/lib']);       %load MEX files
+addpath([ISSM_DIR '/src/m/os/']);       %load recursivepath
+addpath([ISSM_DIR '/lib']);             %load MEX files
 addpath(recursivepath([ISSM_DIR '/src/m']));
 addpath(recursivepath([ISSM_DIR '/externalpackages/scotch']));
@@ -30,5 +30,4 @@
 addpath(recursivepath([ISSM_DIR '/externalpackages/mealpix']));
 addpath(recursivepath([ISSM_DIR '/externalpackages/pcatool']));
-clear ISSM_DIR;
 
 %Check on any warning messages that might indicate that the paths were not correct. 
@@ -40,4 +39,9 @@
 end
 
+warning ('off','all');
+addpath([ISSM_DIR '/lib-precompiled']); %load MEX files (precompiled; remove after MEX file compilation is supported on Silicon-based Macs)
+warning ('on','all');
+clear ISSM_DIR;
+
 %disable matlab bell!
 beep off;
Index: /issm/trunk/src/m/dev/devpath.py
===================================================================
--- /issm/trunk/src/m/dev/devpath.py	(revision 28012)
+++ /issm/trunk/src/m/dev/devpath.py	(revision 28013)
@@ -1,7 +1,7 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 import os
 import sys
 
-#Recover ISSM_DIR and USERNAME
+# Recover ISSM_DIR and USERNAME
 ISSM_DIR = os.getenv('ISSM_DIR')
 USERNAME = os.getenv('USER')
@@ -10,5 +10,5 @@
     raise NameError('"ISSM_DIR" environment variable is empty! You should define ISSM_DIR in your .cshrc or .bashrc!')
 
-#Go through src/m and append any directory that contains a *.py file to PATH
+# Go through src/m and append any directory that contains a *.py file to PATH
 for root, dirs, files in os.walk(ISSM_DIR + '/src/m'):
     if '.svn' in dirs:
@@ -20,9 +20,11 @@
                     sys.path.append(root)
 
-#Also add the Nightly run directory
+# Also add the Nightly run directory
 if ISSM_DIR + '/test/NightlyRun' not in sys.path:
     sys.path.append(ISSM_DIR + '/test/NightlyRun')
 if ISSM_DIR + '/lib' not in sys.path:
     sys.path.append(ISSM_DIR + '/lib')
+if ISSM_DIR + '/lib-precompiled' not in sys.path:
+    sys.path.append(ISSM_DIR + '/lib-precompiled') # load precompiled MEX files; remove after MEX file compilation is supported on Silicon-based Macs
 if ISSM_DIR + '/src/wrappers/python/.libs' not in sys.path:
     sys.path.append(ISSM_DIR + '/src/wrappers/python/.libs')
@@ -51,2 +53,3 @@
 # print("\n  ISSM development path correctly loaded")
 # print("Current path is {}\n\n".format(ISSM_DIR))
+
Index: /issm/trunk/src/m/dev/issmversion.m
===================================================================
--- /issm/trunk/src/m/dev/issmversion.m	(revision 28012)
+++ /issm/trunk/src/m/dev/issmversion.m	(revision 28013)
@@ -1,11 +1,17 @@
-function issmversion(),
+function vers = issmversion(),
 %ISSMVERSION - display ISSM version
 %
 %   Usage:
 %      issmversion()
+%      version = issmversion()
 
 
 if exist('IssmConfig_matlab')~=3,
 	error('ISSM not correctly installed. "IssmConfig_matlab" not found');
+end
+
+if nargout==1
+	vers = IssmConfig('PACKAGE_VERSION');
+	return;
 end
 
@@ -17,5 +23,5 @@
 disp(['Compiled on ' IssmConfig('HOST_VENDOR') ' ' IssmConfig('HOST_OS') ' ' IssmConfig('HOST_ARCH') ' by ' IssmConfig('USER_NAME')]);
 disp([' ']);
-disp(['Copyright (c) 2009-2022 California Institute of Technology']);
+disp(['Copyright (c) 2009-2023 California Institute of Technology']);
 disp([' ']);
 disp(['    to get started type: issmdoc']);
Index: /issm/trunk/src/m/dev/issmversion.py
===================================================================
--- /issm/trunk/src/m/dev/issmversion.py	(revision 28012)
+++ /issm/trunk/src/m/dev/issmversion.py	(revision 28013)
@@ -16,5 +16,5 @@
 print(' ')
 print(('Build date: ' + IssmConfig('PACKAGE_BUILD_DATE')[0]))
-print('Copyright (c) 2009-2022 California Institute of Technology')
+print('Copyright (c) 2009-2023 California Institute of Technology')
 print(' ')
 print('    to get started type: issmdoc')
Index: /issm/trunk/src/m/exp/contourlevelzero.py
===================================================================
--- /issm/trunk/src/m/exp/contourlevelzero.py	(revision 28012)
+++ /issm/trunk/src/m/exp/contourlevelzero.py	(revision 28013)
@@ -3,229 +3,222 @@
 from collections import OrderedDict
 
+
 def contourlevelzero(md,mask,level):
-    """CONTOURLEVELZERO - figure out the zero level (or offset thereof, specified by the level value)
-                       of a vectorial mask, and vectorialize it into an exp or shp compatible structure.
-    
-       Usage:
-          contours=contourlevelzero(md,mask,level)
-    
-       See also: PLOT_CONTOUR
+    """CONTOURLEVELZERO - figure out the zero level (or offset thereof, 
+    specified by the level value of a vectorial mask, and vectorialize it into 
+    an exp or shp compatible structure.
+
+    Usage:
+        contours=contourlevelzero(md,mask,level)
+
+        See also: PLOT_CONTOUR
     """
-    
-    #process data 
-    if md.mesh.dimension()==3:
+
+    # Process data 
+    if md.mesh.dimension() == 3:
         x = md.mesh.x2d
         y = md.mesh.y2d
-        z=md.mesh.z
-        index=md.mesh.elements2d-1
+        z = md.mesh.z
+        index = md.mesh.elements2d - 1
     else:
-        x=md.mesh.x
-        y=md.mesh.y
-        index=md.mesh.elements-1
-        z=np.zeros((md.mesh.numberofvertices,1))
-        
-    if len(mask)==0:
+        x = md.mesh.x
+        y = md.mesh.y
+        index = md.mesh.elements - 1
+        z = np.zeros((md.mesh.numberofvertices, 1))
+
+    if len(mask) == 0:
         raise OSError("mask provided is empty")
-    
-    if md.mesh.dimension()==3:
-        if len(mask)!=md.mesh.numberofvertices2d: 
+
+    if md.mesh.dimension() == 3:
+        if len(mask) != md.mesh.numberofvertices2d: 
             raise OSError("mask provided should be specified at the vertices of the mesh")
     else:
-        if len(mask)!=md.mesh.numberofvertices:
+        if len(mask) != md.mesh.numberofvertices:
             raise OSError("mask provided should be specified at the vertices of the mesh")
+
+    # Initialization of some variables
+    numberofelements = np.size(index, 0)
+    elementslist = np.c_[0:numberofelements]
+    c = []
+    h = []
+
+    # Get unique edges in mesh
+    # 1: list of edges
+    edges = np.vstack((np.vstack((index[:, (0, 1)], index[:, (1, 2)])), index[:, (2, 0)]))
+    # 2: find unique edges
+    [edges, J] = np.unique(np.sort(edges, 1), axis=0, return_inverse=True)
+    # 3: unique edge numbers
+    vec = J
+    # 4: unique edges numbers in each triangle (2 triangles sharing the same 
+    # edge will have the same edge number)
+    edges_tria = np.hstack((np.hstack((vec[elementslist], vec[elementslist + numberofelements])), vec[elementslist + 2 * numberofelements]))
+
+    # Segments [nodes1 nodes2]
+    Seg1 = index[:, (0, 1)]
+    Seg2 = index[:, (1, 2)]
+    Seg3 = index[:, (2, 0)]
+
+    # Segment numbers [1;4;6;...]
+    Seg1_num = edges_tria[:, 0]
+    Seg2_num = edges_tria[:, 1]
+    Seg3_num = edges_tria[:, 2]
+
+    #value of data on each tips of the segments
+    Data1 = mask[Seg1]
+    Data2 = mask[Seg2]
+    Data3 = mask[Seg3]
+
+    # Get the ranges for each segment
+    Range1 = np.sort(Data1, 1)
+    Range2 = np.sort(Data2, 1)
+    Range3 = np.sort(Data3, 1)
+
+    # Find the segments that contain this value
+    pos1 = (Range1[:, 0] < level) & (Range1[:, 1] >= level)
+    pos2 = (Range2[:, 0] < level) & (Range2[:, 1] >= level)
+    pos3 = (Range3[:, 0] < level) & (Range3[:, 1] >= level)
+
+    # Get elements
+    poselem12 = (pos1) & (pos2)
+    poselem13 = (pos1) & (pos3)
+    poselem23 = (pos2) & (pos3)
+    poselem = np.where((poselem12) | (poselem13) | (poselem23))
+    poselem = poselem[0]
+    numelems = len(poselem)
+
+    # If no element has been flagged, skip to the next level
+    if numelems == 0:
+        raise Exception('contourlevelzero warning message: no elements found with corresponding level value in mask')
+        contours = []
+        return contours
+
+    # Go through the elements and build the coordinates for each segment (1 by element)
+    x1 = np.zeros((numelems, 1))
+    x2 = np.zeros((numelems, 1))
+    y1 = np.zeros((numelems, 1))
+    y2 = np.zeros((numelems, 1))
+    z1 = np.zeros((numelems, 1))
+    z2 = np.zeros((numelems, 1))
+
+    edge_l = np.zeros((numelems, 2))
+
+    for j in range(0, numelems):
+        with np.errstate(divide='ignore', invalid='ignore'):
+            weight1 = np.divide(level - Data1[poselem[j], 0],Data1[poselem[j], 1] - Data1[poselem[j], 0])
+            weight2 = np.divide(level - Data2[poselem[j], 0],Data2[poselem[j], 1] - Data2[poselem[j], 0])
+            weight3 = np.divide(level - Data3[poselem[j], 0],Data3[poselem[j], 1] - Data3[poselem[j], 0])
+
+        if poselem12[poselem[j]] == True:
+            x1[j] = x[Seg1[poselem[j], 0]] + weight1 * [x[Seg1[poselem[j], 1]] - x[Seg1[poselem[j], 0]]]
+            x2[j] = x[Seg2[poselem[j], 0]] + weight2 * [x[Seg2[poselem[j], 1]] - x[Seg2[poselem[j], 0]]]
+            y1[j] = y[Seg1[poselem[j], 0]] + weight1 * [y[Seg1[poselem[j], 1]] - y[Seg1[poselem[j], 0]]]
+            y2[j] = y[Seg2[poselem[j], 0]] + weight2 * [y[Seg2[poselem[j], 1]] - y[Seg2[poselem[j], 0]]]
+            z1[j] = z[Seg1[poselem[j], 0]] + weight1 * [z[Seg1[poselem[j], 1]] - z[Seg1[poselem[j], 0]]]
+            z2[j] = z[Seg2[poselem[j], 0]] + weight2 * [z[Seg2[poselem[j], 1]] - z[Seg2[poselem[j], 0]]]
+
+            edge_l[j, 0] = Seg1_num[poselem[j]]
+            edge_l[j, 1] = Seg2_num[poselem[j]]
+        elif poselem13[poselem[j]] == True:
+            x1[j] = x[Seg1[poselem[j], 0]] + weight1 * [x[Seg1[poselem[j], 1]] - x[Seg1[poselem[j], 0]]]
+            x2[j] = x[Seg3[poselem[j], 0]] + weight3 * [x[Seg3[poselem[j], 1]] - x[Seg3[poselem[j], 0]]]
+            y1[j] = y[Seg1[poselem[j], 0]] + weight1 * [y[Seg1[poselem[j], 1]] - y[Seg1[poselem[j], 0]]]
+            y2[j] = y[Seg3[poselem[j], 0]] + weight3 * [y[Seg3[poselem[j], 1]] - y[Seg3[poselem[j], 0]]]
+            z1[j] = z[Seg1[poselem[j], 0]] + weight1 * [z[Seg1[poselem[j], 1]] - z[Seg1[poselem[j], 0]]]
+            z2[j] = z[Seg3[poselem[j], 0]] + weight3 * [z[Seg3[poselem[j], 1]] - z[Seg3[poselem[j], 0]]]
+
+            edge_l[j, 0] = Seg1_num[poselem[j]]
+            edge_l[j, 1] = Seg3_num[poselem[j]]
+        elif poselem23[poselem[j]] == True:
+            x1[j] = x[Seg2[poselem[j], 0]] + weight2 * [x[Seg2[poselem[j], 1]] - x[Seg2[poselem[j], 0]]]
+            x2[j] = x[Seg3[poselem[j], 0]] + weight3 * [x[Seg3[poselem[j], 1]] - x[Seg3[poselem[j], 0]]]
+            y1[j] = y[Seg2[poselem[j], 0]] + weight2 * [y[Seg2[poselem[j], 1]] - y[Seg2[poselem[j], 0]]]
+            y2[j] = y[Seg3[poselem[j], 0]] + weight3 * [y[Seg3[poselem[j], 1]] - y[Seg3[poselem[j], 0]]]
+            z1[j] = z[Seg2[poselem[j], 0]] + weight2 * [z[Seg2[poselem[j], 1]] - z[Seg2[poselem[j], 0]]]
+            z2[j] = z[Seg3[poselem[j], 0]] + weight3 * [z[Seg3[poselem[j], 1]] - z[Seg3[poselem[j], 0]]]
+
+            edge_l[j, 0] = Seg2_num[poselem[j]]
+            edge_l[j, 1] = Seg3_num[poselem[j]]
+        # else:
+        # Should never get here
+
+    # Now that we have the segments, we must try to connect them...
+
+    # Loop over the subcontours
+    contours = []
+
+    while len(edge_l) > 0:
+        # Take the right edge of the second segment and connect it to the next segments if any
+        e1 = edge_l[0, 0]
+        e2 = edge_l[0, 1]
+        xc = np.vstack((x1[0], x2[0]))
+        yc = np.vstack((y1[0], y2[0]))
+        zc = np.vstack((z1[0], z2[0]))
+        # Erase the lines corresponding to this edge
+        edge_l = np.delete(edge_l, 0, axis=0)
+        x1 = np.delete(x1, 0, axis=0)
+        x2 = np.delete(x2, 0, axis=0)
+        y1 = np.delete(y1, 0, axis=0)
+        y2 = np.delete(y2, 0, axis=0)
+        z1 = np.delete(z1, 0, axis=0)
+        z2 = np.delete(z2,0,axis=0)
+        pos1 = np.where(edge_l == e1)
         
-    #initialization of some variables
-    numberofelements=np.size(index,0)
-    elementslist=np.c_[0:numberofelements]
-    c=[]
-    h=[]
-    
-    #get unique edges in mesh
-    #1: list of edges
-    edges=np.vstack((np.vstack((index[:,(0,1)],index[:,(1,2)])),index[:,(2,0)]))
-
-    #2: find unique edges
-    [edges,J]=np.unique(np.sort(edges,1),axis=0,return_inverse=True)
-    #3: unique edge numbers
-    vec=J
-    #4: unique edges numbers in each triangle (2 triangles sharing the same edge will have
-    #   the same edge number)
-    edges_tria=np.hstack((np.hstack((vec[elementslist],vec[elementslist+numberofelements])),vec[elementslist+2*numberofelements]))
-    
-    #segments [nodes1 nodes2]
-    Seg1=index[:,(0,1)]
-    Seg2=index[:,(1,2)]
-    Seg3=index[:,(2,0)]
-    
-    #segment numbers [1;4;6;...]
-    Seg1_num=edges_tria[:,0]
-    Seg2_num=edges_tria[:,1]
-    Seg3_num=edges_tria[:,2]
-    
-    #value of data on each tips of the segments
-    Data1=mask[Seg1]
-    Data2=mask[Seg2]
-    Data3=mask[Seg3]
-    
-    #get the ranges for each segment
-    Range1=np.sort(Data1,1)
-    Range2=np.sort(Data2,1)
-    Range3=np.sort(Data3,1)
-    
-    #find the segments that contain this value
-    pos1=(Range1[:,0]<level) & (Range1[:,1]>=level)
-    pos2=(Range2[:,0]<level) & (Range2[:,1]>=level)
-    pos3=(Range3[:,0]<level) & (Range3[:,1]>=level)
-    
-    #get elements
-    poselem12=(pos1) & (pos2)
-    poselem13=(pos1) & (pos3)
-    poselem23=(pos2) & (pos3)
-    poselem=np.where((poselem12) | (poselem13) | (poselem23))
-    poselem=poselem[0]
-    numelems=len(poselem)
-    
-    #if no element has been flagged, skip to the next level
-    if numelems==0:
-        raise Exception('contourlevelzero warning message: no elements found with corresponding level value in mask')
-        contours=[]
-        return contours
-    
-    #go through the elements and build the coordinates for each segment (1 by element)
-    x1=np.zeros((numelems,1))
-    x2=np.zeros((numelems,1))
-    y1=np.zeros((numelems,1))
-    y2=np.zeros((numelems,1))
-    z1=np.zeros((numelems,1))
-    z2=np.zeros((numelems,1))
-    
-    edge_l=np.zeros((numelems,2))
-    
-    for j in range(0,numelems):
-        
-        with np.errstate(divide='ignore', invalid='ignore'):
-            weight1=np.divide(level-Data1[poselem[j],0],Data1[poselem[j],1]-Data1[poselem[j],0])
-            weight2=np.divide(level-Data2[poselem[j],0],Data2[poselem[j],1]-Data2[poselem[j],0])
-            weight3=np.divide(level-Data3[poselem[j],0],Data3[poselem[j],1]-Data3[poselem[j],0])
-        
-        if poselem12[poselem[j]]==True:
-            
-            x1[j]=x[Seg1[poselem[j],0]]+weight1*[x[Seg1[poselem[j],1]]-x[Seg1[poselem[j],0]]]
-            x2[j]=x[Seg2[poselem[j],0]]+weight2*[x[Seg2[poselem[j],1]]-x[Seg2[poselem[j],0]]]
-            y1[j]=y[Seg1[poselem[j],0]]+weight1*[y[Seg1[poselem[j],1]]-y[Seg1[poselem[j],0]]]
-            y2[j]=y[Seg2[poselem[j],0]]+weight2*[y[Seg2[poselem[j],1]]-y[Seg2[poselem[j],0]]]
-            z1[j]=z[Seg1[poselem[j],0]]+weight1*[z[Seg1[poselem[j],1]]-z[Seg1[poselem[j],0]]]
-            z2[j]=z[Seg2[poselem[j],0]]+weight2*[z[Seg2[poselem[j],1]]-z[Seg2[poselem[j],0]]]
-            
-            edge_l[j,0]=Seg1_num[poselem[j]]
-            edge_l[j,1]=Seg2_num[poselem[j]]
-        elif poselem13[poselem[j]]==True:
-            
-            x1[j]=x[Seg1[poselem[j],0]]+weight1*[x[Seg1[poselem[j],1]]-x[Seg1[poselem[j],0]]]
-            x2[j]=x[Seg3[poselem[j],0]]+weight3*[x[Seg3[poselem[j],1]]-x[Seg3[poselem[j],0]]]
-            y1[j]=y[Seg1[poselem[j],0]]+weight1*[y[Seg1[poselem[j],1]]-y[Seg1[poselem[j],0]]]
-            y2[j]=y[Seg3[poselem[j],0]]+weight3*[y[Seg3[poselem[j],1]]-y[Seg3[poselem[j],0]]]
-            z1[j]=z[Seg1[poselem[j],0]]+weight1*[z[Seg1[poselem[j],1]]-z[Seg1[poselem[j],0]]]
-            z2[j]=z[Seg3[poselem[j],0]]+weight3*[z[Seg3[poselem[j],1]]-z[Seg3[poselem[j],0]]]
-            
-            edge_l[j,0]=Seg1_num[poselem[j]]
-            edge_l[j,1]=Seg3_num[poselem[j]]
-        elif poselem23[poselem[j]]==True:
-            
-            x1[j]=x[Seg2[poselem[j],0]]+weight2*[x[Seg2[poselem[j],1]]-x[Seg2[poselem[j],0]]]
-            x2[j]=x[Seg3[poselem[j],0]]+weight3*[x[Seg3[poselem[j],1]]-x[Seg3[poselem[j],0]]]
-            y1[j]=y[Seg2[poselem[j],0]]+weight2*[y[Seg2[poselem[j],1]]-y[Seg2[poselem[j],0]]]
-            y2[j]=y[Seg3[poselem[j],0]]+weight3*[y[Seg3[poselem[j],1]]-y[Seg3[poselem[j],0]]]
-            z1[j]=z[Seg2[poselem[j],0]]+weight2*[z[Seg2[poselem[j],1]]-z[Seg2[poselem[j],0]]]
-            z2[j]=z[Seg3[poselem[j],0]]+weight3*[z[Seg3[poselem[j],1]]-z[Seg3[poselem[j],0]]]
-
-            edge_l[j,0]=Seg2_num[poselem[j]]
-            edge_l[j,1]=Seg3_num[poselem[j]]
-
-        #else:
-	    #it shoud not go here
-            
-    #now that we have the segments, we must try to connect them...
-    
-    #loop over the subcontours
-    contours=[]
-    
-    while len(edge_l)>0:
-        
-        #take the right edge of the second segment and connect it to the next segments if any
-        e1=edge_l[0,0]
-        e2=edge_l[0,1]
-        xc=np.vstack((x1[0],x2[0]))
-        yc=np.vstack((y1[0],y2[0]))
-        zc=np.vstack((z1[0],z2[0]))
-        #erase the lines corresponding to this edge
-        edge_l=np.delete(edge_l,0,axis=0)
-        x1=np.delete(x1,0,axis=0)
-        x2=np.delete(x2,0,axis=0)
-        y1=np.delete(y1,0,axis=0)
-        y2=np.delete(y2,0,axis=0)
-        z1=np.delete(z1,0,axis=0)
-        z2=np.delete(z2,0,axis=0)
-        pos1=np.where(edge_l==e1)
-        
-        while len(pos1[0])>0:
-            
-            if np.all(pos1[1]==0):
-                xc=np.vstack((x2[pos1[0]],xc))
-                yc=np.vstack((y2[pos1[0]],yc))
-                zc=np.vstack((z2[pos1[0]],zc))
-                #next edge:
-                e1=edge_l[pos1[0],1]
+        while len(pos1[0]) > 0:
+            if np.all(pos1[1] == 0):
+                xc = np.vstack((x2[pos1[0]], xc))
+                yc = np.vstack((y2[pos1[0]], yc))
+                zc = np.vstack((z2[pos1[0]], zc))
+                # Next edge:
+                e1 = edge_l[pos1[0], 1]
             else:
-                xc=np.vstack((x1[pos1[0]],xc))
-                yc=np.vstack((y1[pos1[0]],yc))
-                zc=np.vstack((z1[pos1[0]],zc))
-                #next edge:
-                e1=edge_l[pos1[0],0]
-                
-            #erase the lines of this
-            edge_l=np.delete(edge_l,pos1[0],axis=0)
-            x1=np.delete(x1,pos1[0],axis=0)
-            x2=np.delete(x2,pos1[0],axis=0)
-            y1=np.delete(y1,pos1[0],axis=0)
-            y2=np.delete(y2,pos1[0],axis=0)
-            z1=np.delete(z1,pos1[0],axis=0)
-            z2=np.delete(z2,pos1[0],axis=0)
-            #next connection
-            pos1=np.where(edge_l==e1)
-            
-        #same thing the other way (to the right)
-        pos2=np.where(edge_l==e2)
-
-        while len(pos2[0])>0:
-            
-            if np.all(pos2[1]==0):
-                xc=np.vstack((xc,x2[pos2[0]]))
-                yc=np.vstack((yc,y2[pos2[0]]))
-                zc=np.vstack((zc,z2[pos2[0]]))
-                #next edge:
-                e2=edge_l[pos2[0],1]
+                xc = np.vstack((x1[pos1[0]], xc))
+                yc = np.vstack((y1[pos1[0]], yc))
+                zc = np.vstack((z1[pos1[0]], zc))
+                # Next edge:
+                e1 = edge_l[pos1[0], 0]
+
+            # Erase the lines of this
+            edge_l = np.delete(edge_l, pos1[0], axis=0)
+            x1 = np.delete(x1, pos1[0], axis=0)
+            x2 = np.delete(x2, pos1[0], axis=0)
+            y1 = np.delete(y1, pos1[0], axis=0)
+            y2 = np.delete(y2, pos1[0], axis=0)
+            z1 = np.delete(z1, pos1[0], axis=0)
+            z2 = np.delete(z2, pos1[0], axis=0)
+            # Next connection
+            pos1 = np.where(edge_l == e1)
+
+        # Same thing the other way (to the right)
+        pos2 = np.where(edge_l == e2)
+
+        while len(pos2[0]) > 0:
+            if np.all(pos2[1] == 0):
+                xc = np.vstack((xc, x2[pos2[0]]))
+                yc = np.vstack((yc, y2[pos2[0]]))
+                zc = np.vstack((zc, z2[pos2[0]]))
+                # Next edge:
+                e2 = edge_l[pos2[0], 1]
             else:
-                xc=np.vstack((xc,x1[pos2[0]]))
-                yc=np.vstack((yc,y1[pos2[0]]))
-                zc=np.vstack((zc,z1[pos2[0]]))
-                #next edge:
-                e2=edge_l[pos2[0],0]
-                
-            #erase the lines of this
-            edge_l=np.delete(edge_l,pos2[0],axis=0)
-            x1=np.delete(x1,pos2[0],axis=0)
-            x2=np.delete(x2,pos2[0],axis=0)
-            y1=np.delete(y1,pos2[0],axis=0)
-            y2=np.delete(y2,pos2[0],axis=0)
-            z1=np.delete(z1,pos2[0],axis=0)
-            z2=np.delete(z2,pos2[0],axis=0)
-            #next connection
-            pos2=np.where(edge_l==e2)
-            
-        #save xc,yc contour: 
+                xc = np.vstack((xc, x1[pos2[0]]))
+                yc = np.vstack((yc, y1[pos2[0]]))
+                zc = np.vstack((zc, z1[pos2[0]]))
+                # Next edge:
+                e2 = edge_l[pos2[0], 0]
+
+            # Erase the lines of this
+            edge_l = np.delete(edge_l, pos2[0], axis=0)
+            x1 = np.delete(x1, pos2[0], axis=0)
+            x2 = np.delete(x2, pos2[0], axis=0)
+            y1 = np.delete(y1, pos2[0], axis=0)
+            y2 = np.delete(y2, pos2[0], axis=0)
+            z1 = np.delete(z1, pos2[0], axis=0)
+            z2 = np.delete(z2, pos2[0], axis=0)
+            # Next connection
+            pos2 = np.where(edge_l == e2)
+
+        # Save xc, yc contour:
         newcontour = OrderedDict()
         newcontour['nods'] = np.size(xc)
-        newcontour['density'] = 1 
+        newcontour['density'] = 1
         newcontour['closed'] = 0
         newcontour['x'] = np.ma.filled(xc.astype(float), np.nan)
@@ -234,4 +227,4 @@
         newcontour['name'] = ''
         contours.append(newcontour)
-        
+
     return contours
Index: /issm/trunk/src/m/exp/exptool.m
===================================================================
--- /issm/trunk/src/m/exp/exptool.m	(revision 28012)
+++ /issm/trunk/src/m/exp/exptool.m	(revision 28013)
@@ -16,5 +16,5 @@
 %      - markersize (default=7)
 %      - markeredgecolor (default='r')
-%      - nofigurecopy (default=0) do not copy current figure, this is needed on some plateform to avoid an offset in the figure
+%      - nofigurecopy (default=0) do not copy current figure, this is needed on some platform to avoid an offset in the figure
 %
 %   Usage:
@@ -139,5 +139,5 @@
 disableDefaultInteractivity(gca); %disables the built-in interactions for the specified axes
 
-%Build backup structre for do and redo
+%Build backup structure for do and redo
 backup=cell(1,3);
 backup{1,1}=A;
Index: /issm/trunk/src/m/exp/isoline.m
===================================================================
--- /issm/trunk/src/m/exp/isoline.m	(revision 28012)
+++ /issm/trunk/src/m/exp/isoline.m	(revision 28013)
@@ -30,4 +30,10 @@
 	index=md.mesh.elements;
 end
+if exist(options,'amr')
+	amr = getfieldvalue(options,'amr');
+	x=amr.MeshX;
+	y=amr.MeshY;
+	index=amr.MeshElements;
+end
 
 %Deal with z coordinate
@@ -35,5 +41,5 @@
 	z=md.mesh.z;
 else
-	z=zeros(md.mesh.numberofvertices,1);
+	z=zeros(numel(x),1);
 end
 
@@ -44,5 +50,5 @@
 	end
 else
-	if length(field)~=md.mesh.numberofvertices
+	if length(field)~=numel(x)
 		error('field provided should be of size md.mesh.numberofvertices'); 
 	end
@@ -258,4 +264,7 @@
 elseif strcmp(outputformat,'struct')
 	%nothing to do, this is the default
+elseif strcmp(outputformat,'longest')
+	[~, mId] = max([contours.nods]);
+	contours = contours(mId);
 else
 	disp('output format not supported, returning struct');
Index: /issm/trunk/src/m/extrusion/project2d.py
===================================================================
--- /issm/trunk/src/m/extrusion/project2d.py	(revision 28012)
+++ /issm/trunk/src/m/extrusion/project2d.py	(revision 28013)
@@ -40,5 +40,8 @@
         projection_value = value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d]
     elif value.shape[0] == md3d.mesh.numberofvertices + 1:
-        projection_value = np.vstack((value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d], value[-1]))
+        if np.ndim(value) == 1:
+            projection_value = np.hstack((value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d], value[-1]))
+        else:
+            projection_value = np.vstack((value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d], value[-1]))
     else:
         projection_value = value[(layer - 1) * md3d.mesh.numberofelements2d:layer * md3d.mesh.numberofelements2d]
Index: /issm/trunk/src/m/geometry/AboveGround.py
===================================================================
--- /issm/trunk/src/m/geometry/AboveGround.py	(revision 28012)
+++ /issm/trunk/src/m/geometry/AboveGround.py	(revision 28013)
@@ -1,8 +1,8 @@
 import numpy as np
 
-def AboveGround(lat, long, r, height): #{{{
+def AboveGround(lat, long, r, height):  # {{{
     r = r + height
     x = r * np.cos(np.deg2rad(lat)) * np.cos(np.deg2rad(long))
     y = r * np.cos(np.deg2rad(lat)) * np.sin(np.deg2rad(long))
     z = r * np.sin(np.deg2rad(lat))
-#}}}
+# }}}
Index: /issm/trunk/src/m/geometry/VolumeAboveFloatation.py
===================================================================
--- /issm/trunk/src/m/geometry/VolumeAboveFloatation.py	(revision 28013)
+++ /issm/trunk/src/m/geometry/VolumeAboveFloatation.py	(revision 28013)
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+from model import model
+import numpy as np
+from GetAreas import GetAreas
+from mesh3dprisms import mesh3dprisms
+from mesh2d import mesh2d
+
+def VolumeAboveFloatation(md, step=None, flags=None):
+   '''
+VOLUMEABOVEFLOATATION - returns volume above floatation
+
+   Usage:
+      V = VolumeAboveFloatation(md)          % uses model fiels alone
+      V = VolumeAboveFloatation(md,10)       % Will look at step 10 of transient solution
+      V = VolumeAboveFloatation(md,10,flags) % Will look at step 10 of transient solution, only flaged elements
+   '''
+   isverb = 0 # verbosity.
+
+   #Special case if 3d
+   if isinstance(md.mesh, mesh3dprisms):
+      index = md.mesh.elements2d-1;
+      x = md.mesh.x2d;
+      y = md.mesh.y2d;
+   elif isinstance(md.mesh, mesh2d):
+      index = md.mesh.elements-1;
+      x = md.mesh.x;
+      y = md.mesh.y;
+   else:
+      raise Exception('not supported yet for {}.'%(type(md.mesh)));
+
+   #1. get some parameters
+   rho_ice   = md.materials.rho_ice
+   rho_water = md.materials.rho_water
+
+   #2. compute averages
+   if (not step) and (not flags):
+      base           = np.mean(md.geometry.base[index],axis=1);
+      surface        = np.mean(md.geometry.surface[index],axis=1);
+      bathymetry     = np.mean(md.geometry.bed[index],axis=1);
+      ice_levelset   = md.mask.ice_levelset;
+      ocean_levelset = md.mask.ocean_levelset;
+   else:
+      if 'MaskIceLevelset' in md.results.TransientSolution[step].keys():
+      #if isprop(md.results.TransientSolution(step),'MaskIceLevelset')
+         ice_levelset   = md.results.TransientSolution[step].MaskIceLevelset;
+      else:
+         ice_levelset   = md.mask.ice_levelset;
+      ocean_levelset = md.results.TransientSolution[step].MaskOceanLevelset;
+      base           = np.mean(md.results.TransientSolution[step].Base[index],axis=1);
+      surface        = np.mean(md.results.TransientSolution[step].Surface[index],axis=1);
+      if 'Bed' in md.results.TransientSolution[step].keys(): #,'Bed')
+         bathymetry  = np.mean(md.results.TransientSolution[step].Bed[index],axis=1);
+      else:
+         bathymetry  = np.mean(md.geometry.bed[index],axis=1);
+
+   #3. get areas of all triangles
+   areas = GetAreas(index+1,x,y);
+
+   #4. Compute volume above floatation
+   if isverb:
+      print(np.shape(areas))
+      print(np.shape(surface))
+      print(np.shape(base))
+      print(np.shape(bathymetry))
+
+   V = areas*(surface-base+np.minimum(rho_water/rho_ice*bathymetry,0.))
+   if isverb:
+      print(np.shape(V))
+
+   #5. take out the ones that are outside of levelset or floating
+   pos = np.where((np.min(ice_levelset[index],axis=1)>0) | (np.min(ocean_levelset[index],axis=1)<0))
+   V[pos] = 0;
+
+   #In case we are only looking at one portion of the domain...
+   if flags:
+      V[~flags] = 0;
+
+   #sum individual contributions
+   V = np.sum(V);
+
+   if isverb:
+      print('   potential volume is: %e m^3'%(V))
+
+   return V
Index: /issm/trunk/src/m/geometry/inpolygon.py
===================================================================
--- /issm/trunk/src/m/geometry/inpolygon.py	(revision 28012)
+++ /issm/trunk/src/m/geometry/inpolygon.py	(revision 28013)
@@ -2,5 +2,5 @@
 import numpy as np
 
-def inpolygon(xq, yq, xv, yv): #{{{
+def inpolygon(xq, yq, xv, yv):  # {{{
     """
     INPOLYGON - Returns points located inside polygonal region.
@@ -25,3 +25,3 @@
 
     return in_polygon
-#}}}
+# }}}
Index: /issm/trunk/src/m/geometry/planetradius.py
===================================================================
--- /issm/trunk/src/m/geometry/planetradius.py	(revision 28012)
+++ /issm/trunk/src/m/geometry/planetradius.py	(revision 28013)
@@ -1,3 +1,3 @@
-def planetradius(planet): # {{{
+def planetradius(planet):  # {{{
     '''
     PLANETRADIUS - return planet radius according to planetary body name
Index: /issm/trunk/src/m/geometry/polyarea.py
===================================================================
--- /issm/trunk/src/m/geometry/polyarea.py	(revision 28012)
+++ /issm/trunk/src/m/geometry/polyarea.py	(revision 28013)
@@ -4,5 +4,5 @@
 
 
-def polyarea(x, y): #{{{
+def polyarea(x, y):  # {{{
     """POLYAREA - returns the area of the 2-D polygon defined by the vertices in 
     lists x and y
@@ -26,3 +26,3 @@
 
     return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
-#}}}
+# }}}
Index: /issm/trunk/src/m/geometry/slope.py
===================================================================
--- /issm/trunk/src/m/geometry/slope.py	(revision 28012)
+++ /issm/trunk/src/m/geometry/slope.py	(revision 28013)
@@ -34,6 +34,6 @@
 
     summation = np.array([[1], [1], [1]])
-    sx = np.dot(surf[index - 1, 0] * alpha, summation).reshape(-1, )
-    sy = np.dot(surf[index - 1, 0] * beta, summation).reshape(-1, )
+    sx = np.dot(surf[index - 1] * alpha, summation).reshape(-1, )
+    sy = np.dot(surf[index - 1] * beta, summation).reshape(-1, )
 
     s = np.sqrt(sx**2 + sy**2)
Index: /issm/trunk/src/m/interp/SectionValues.py
===================================================================
--- /issm/trunk/src/m/interp/SectionValues.py	(revision 28012)
+++ /issm/trunk/src/m/interp/SectionValues.py	(revision 28013)
@@ -56,5 +56,5 @@
 
         length_segment = np.sqrt((x_end - x_start)**2 + (y_end - y_start)**2)
-        portion = np.ceil(length_segment / res_h)
+        portion = int(np.ceil(length_segment / res_h))
 
         x_segment = np.zeros(portion)
Index: /issm/trunk/src/m/interp/interp.py
===================================================================
--- /issm/trunk/src/m/interp/interp.py	(revision 28012)
+++ /issm/trunk/src/m/interp/interp.py	(revision 28013)
@@ -54,5 +54,5 @@
     xflag = np.logical_and(x > xlim[0], x < xlim[1])
     yflag = np.logical_and(y > ylim[0], y < ylim[1])
-    bothind = np.nonzero(np.logical_and(xflag, yflag))
+    bothind = np.squeeze(np.where(np.logical_and(xflag, yflag))).astype(int)
     subdata = data[bothind]
     subx = x[bothind]
@@ -86,5 +86,5 @@
 
     return interpdata
-    #}}}
+    # }}}
 
 
@@ -182,5 +182,5 @@
 
     return interpdata
-    #}}}
+    # }}}
 
 
@@ -247,3 +247,3 @@
 
     return interpdata
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/io/loadvars.py
===================================================================
--- /issm/trunk/src/m/io/loadvars.py	(revision 28012)
+++ /issm/trunk/src/m/io/loadvars.py	(revision 28013)
@@ -144,5 +144,5 @@
                                 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]].steps.append(getattr(classtype[mod][1], 'solutionstep')())
                             Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]][:]
-                    #}}}
+                    # }}}
                 #elif classtype[mod][0] == 'massfluxatgate.massfluxatgate':  #this is for output definitions {{{
                 elif mod.startswith('outputdefinition'):  #this is for output definitions {{{
@@ -152,5 +152,5 @@
                     nvdict['md'].__dict__[classtree[mod][0]].__dict__[defname].append(getattr(classtype[mod][1], outdeftype)())
                     Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[defname][defindex - 1]
-                #}}}
+                # }}}
                 elif classtype[mod][0] == 'collections.OrderedDict':  #Treating multiple toolkits {{{
                     nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = getattr(classtype[mod][1], 'OrderedDict')
Index: /issm/trunk/src/m/mech/basalstress.m
===================================================================
--- /issm/trunk/src/m/mech/basalstress.m	(revision 28012)
+++ /issm/trunk/src/m/mech/basalstress.m	(revision 28013)
@@ -13,5 +13,15 @@
 r=averaging(md,md.friction.q./md.friction.p,0);
 
-%compute horizontal velocity
+%Compute effective pressure
+switch(md.friction.coupling)
+	case 0
+		N = max(md.constants.g*(md.materials.rho_ice*md.geometry.thickness+md.materials.rho_water*md.geometry.base),0);
+	case 3
+		N = max(md.friction.effective_pressure, 0);
+	otherwise
+		error('not supported yet');
+end
+
+%compute sliding velocity
 ub=sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
 ubx=md.initialization.vx/md.constants.yts;
@@ -19,10 +29,11 @@
 
 %compute basal drag (S.I.)
-alpha2=(max(md.constants.g*(md.materials.rho_ice*md.geometry.thickness+md.materials.rho_water*md.geometry.base),0).^r).*(md.friction.coefficient.^2).*(ub.^(s-1));
+alpha2 = (N.^r).*(md.friction.coefficient.^2).*(ub.^(s-1));
 b  =  alpha2.*ub;
 bx = -alpha2.*ubx;
 by = -alpha2.*uby;
 
-clear alpha2
-
+%return magnitude of only one output is requested
+if nargout==1
+	bx = b;
 end
Index: /issm/trunk/src/m/mesh/ExportGmsh.m
===================================================================
--- /issm/trunk/src/m/mesh/ExportGmsh.m	(revision 28012)
+++ /issm/trunk/src/m/mesh/ExportGmsh.m	(revision 28013)
@@ -38,6 +38,8 @@
 	elseif(md.mesh.y(md.mesh.segments(np,1))==min(md.mesh.y(:))&&md.mesh.y(md.mesh.segments(np,2))==min(md.mesh.y(:))),
 		bc_id=4;
+	else
+		bc_id=5;
   end
-		fprintf(fid,'%g 1 2 %g 1 %g %g \n',np,bc_id,md.mesh.segments(np,1),md.mesh.segments(np,2));
+  fprintf(fid,'%g 1 2 %g 1 %g %g \n',np,bc_id,md.mesh.segments(np,1),md.mesh.segments(np,2));
 end
 %and for the body
Index: /issm/trunk/src/m/mesh/bamg.py
===================================================================
--- /issm/trunk/src/m/mesh/bamg.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/bamg.py	(revision 28013)
@@ -534,5 +534,5 @@
         #do nothing...
         pass
-    #}}}
+    # }}}
     # Bamg mesh parameters {{{
     if not options.exist('domain') and md.mesh.numberofvertices and md.mesh.elementtype() == 'Tria':
@@ -549,5 +549,5 @@
         if isinstance(md.rifts.riftstruct, dict):
             raise TypeError("bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg")
-    #}}}
+    # }}}
     # Bamg options {{{
     bamg_options['Crack'] = options.getfieldvalue('Crack', 0)
@@ -576,5 +576,5 @@
     bamg_options['splitcorners'] = options.getfieldvalue('splitcorners', 1)
     bamg_options['verbose'] = options.getfieldvalue('verbose', 1)
-    #}}}
+    # }}}
 
     # Call Bamg
@@ -781,3 +781,3 @@
     """
     return geom
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/mesh/findsegments.py
===================================================================
--- /issm/trunk/src/m/mesh/findsegments.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/findsegments.py	(revision 28013)
@@ -6,5 +6,5 @@
 from pairoptions import pairoptions
 
-def findsegments(md, *args): #{{{
+def findsegments(md, *args):  # {{{
     """FINDSEGMENTS - build segments model field
 
@@ -96,3 +96,3 @@
 
     return segments
-#}}}
+# }}}
Index: /issm/trunk/src/m/mesh/meshintersect3d.py
===================================================================
--- /issm/trunk/src/m/mesh/meshintersect3d.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/meshintersect3d.py	(revision 28013)
@@ -6,5 +6,5 @@
 
 
-def meshintersect3d(x, y, z, xs, ys, zs, *args): #{{{
+def meshintersect3d(x, y, z, xs, ys, zs, *args):  # {{{
     """MESHINTERSECT - returns indices (into x, y, and z) of common values 
     between (x, y, z) and (xs, ys, zs) (i.e. x(index) = xs; y(index) = ys).
@@ -68,3 +68,3 @@
 
     return indices
-#}}}
+# }}}
Index: /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.m
===================================================================
--- /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.m	(revision 28012)
+++ /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.m	(revision 28013)
@@ -1,4 +1,4 @@
 function mesh=gmshplanet(varargin)
-%GMSHPLANET - mesh generation for a sphere. Very specific code for gmsh from $ISSM_DIR/src/demos/simple_geo/sphere.geo
+%GMSHPLANET - mesh generation for a sphere. Very specific code for Gmsh from $ISSM_DIR/src/demos/simple_geo/sphere.geo
 %
 %   Available options (for more details see ISSM website http://issm.jpl.nasa.gov/):
@@ -15,40 +15,39 @@
 %      md.mesh=gmshplanet('radius',6000,'resolution',100);
 
-	%Find path to gmsh
-	paths = {
-		[getenv('ISSM_EXT_DIR') '/shared/gmsh/install/bin/gmsh'],...
-		[getenv('ISSM_EXT_DIR') '/static/gmsh/install/bin/gmsh'],...
-		[getenv('ISSM_EXT_DIR') '/gmsh/install/bin/gmsh'],...
-		[issmdir() 'externalpackages/gmsh/install/bin/gmsh'],...
-		[issmdir() 'bin/gmsh'],...
-		['/usr/bin/gmsh']...
-	};
-	gmshpath = '';
-	for i=paths
-		if exist(i{1},'file'),
-			gmshpath = i{1};
-			break;
-		end
-	end
-	if isempty(gmshpath),
-		error('Gmsh not found, make sure it is properly installed');
-	end
-
-	% Get Gmsh version
-	[s,r]=system(['gmsh -info 2>&1 | command grep ''Version'' | sed -e ''s/Version[[:blank:]]*:[[:blank:]]//'' | cut -d ''.'' -f1']);
+	%Get Gmsh version
+	[s,r]=system(['gmsh -info | command grep ''Version'' | sed -e ''s/Version[[:blank:]]*:[[:blank:]]//'' | cut -d ''.'' -f1']);
+	if contains(r, 'dyld'),
+		error(['gmshplanet: ' r]);
+	end
 	if s~=0,
-		error(r);
-	elseif isempty(r),
-		% If this function is called from one of our distributable packages, we 
-		% need to do a bit more to find the Gmsh executable
-		[filepath,name,ext]=fileparts(which('gmsh.'));
-		setenv('PATH',[filepath ':' getenv('PATH')]);
-		[s,r]=system(['gmsh -info 2>&1 | command grep ''Version'' | sed -e ''s/Version[[:blank:]]*:[[:blank:]]//'' | cut -d ''.'' -f1']);
-		if s~=0,
-			error(r);
-		elseif isempty(r),
-			error('gmshplanet: Gmsh executable not found!');
-		end
-	end
+		%gmsh executable may not be on path; attempt to find it
+
+		paths={
+			[getenv('ISSM_EXT_DIR') '/shared/gmsh/install/bin'],...
+			[getenv('ISSM_EXT_DIR') '/static/gmsh/install/bin'],...
+			[getenv('ISSM_EXT_DIR') '/gmsh/install/bin'],...
+			[issmdir() '/externalpackages/gmsh/install/bin'],...
+			[issmdir() '/bin'],...
+			['/usr/bin']...
+		};
+		gmshpath='';
+		for i=paths
+			if exist([i{1} '/gmsh'],'file'),
+				gmshpath = i{1};
+				break;
+			end
+		end
+		if isempty(gmshpath),
+			error('gmshplanet: gmsh executable not found!');
+		end
+		setenv('PATH', [gmshpath ':' getenv('PATH')]);
+
+		%Get Gmsh version
+		[s,r]=system(['gmsh -info | command grep ''Version'' | sed -e ''s/Version[[:blank:]]*:[[:blank:]]//'' | cut -d ''.'' -f1']);
+		if contains(r, 'dyld'),
+			error(['gmshplanet: ' r]);
+		end
+	end
+
 	gmshmajorversion=str2num(r);
 	if ~ismember([3,4],gmshmajorversion),
Index: /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.py
===================================================================
--- /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/planet/gmsh/gmshplanet.py	(revision 28013)
@@ -1,4 +1,6 @@
+import os
 import subprocess
 import numpy as np
+from issmdir import issmdir
 from MatlabFuncs import *
 from mesh3dsurface import *
@@ -7,5 +9,5 @@
 
 def gmshplanet(*args):
-    """GMSHPLANET - mesh generation for a sphere. Very specific code for gmsh from $ISSM_DIR/src/demos/simple_geo/sphere.geo
+    """gmshplanet - mesh generation for a sphere. Very specific code for Gmsh from $ISSM_DIR/src/demos/simple_geo/sphere.geo
 
     Available options (for more details see ISSM website http://issm.jpl.nasa.gov/):
@@ -28,8 +30,37 @@
     try:
         strErrs = errs.decode()
-    except AttributeError:  #this is not a byte variable, let's assume string
+    except AttributeError:  # this is not a byte variable, let's assume string
         strErrs = errs
     if strErrs != '':
-        raise Exception('gmshplanet: call to gmsh failed: {}'.format(errs))
+        # gmsh executable may not be on path; attempt to find it
+        paths = [
+            os.environ.get('ISSM_EXT_DIR') + '/shared/gmsh/install/bin',
+            os.environ.get('ISSM_EXT_DIR') + '/static/gmsh/install/bin',
+            os.environ.get('ISSM_EXT_DIR') + '/gmsh/install/bin',
+            issmdir() + '/externalpackages/gmsh/install/bin',
+            issmdir() + '/bin',
+            '/usr/bin'
+        ]
+        gmshpath = ''
+        for path in paths:
+            if exists(path + '/gmsh'):
+                gmshpath = path
+                break
+        if gmshpath == '':
+            error('gmshplanet: gmsh executable not found!')
+
+        os.environ['PATH'] = gmshpath ':' os.environ.get['PATH']
+
+        # Get Gmsh version
+        subproc_args = 'gmsh -info 2>&1 | command grep \'Version\' | sed -e \'s/Version[[:blank:]]*:[[:blank:]]//\' | cut -d \'.\' -f1'
+        subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+        outs, errs = subproc.communicate()
+        try:
+            strErrs = errs.decode()
+        except AttributeError:  # this is not a byte variable, let's assume string
+            strErrs = errs
+        if strErrs != '':
+            raise Exception('gmshplanet: call to gmsh failed: {}'.format(errs))
+
     gmshmajorversion = int(outs)
     if gmshmajorversion not in [3, 4]:
@@ -115,5 +146,5 @@
     fid.write('Physical Volume(2) = 30;\n')
     fid.close()
-    #}}}
+    # }}}
 
     if options.exist('refine'):
@@ -133,5 +164,5 @@
         fid.write('};\n')
         fid.close()
-        #}}}
+        # }}}
 
     # Call gmsh
@@ -192,8 +223,8 @@
         raise RuntimeError(['Expecting $EndElements (', A, ')'])
     fid.close()
-    #}}}
+    # }}}
 
     # A little technicality here. The mesh is not exactly on the sphere. We
-    # create lat,long coordiantes, and reproject onto an exact sphere.
+    # create lat,long coordinates, and reproject onto an exact sphere.
     mesh.r = np.sqrt(mesh.x ** 2 + mesh.y ** 2 + mesh.z ** 2)
 
Index: /issm/trunk/src/m/mesh/rifts/meshprocessoutsiderifts.py
===================================================================
--- /issm/trunk/src/m/mesh/rifts/meshprocessoutsiderifts.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/rifts/meshprocessoutsiderifts.py	(revision 28013)
@@ -101,3 +101,3 @@
 
     return flag
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/mesh/roundmesh.m
===================================================================
--- /issm/trunk/src/m/mesh/roundmesh.m	(revision 28012)
+++ /issm/trunk/src/m/mesh/roundmesh.m	(revision 28013)
@@ -2,5 +2,5 @@
 %ROUNDMESH - create an unstructured round mesh 
 %
-%   This script will generate a structured round mesh
+%   This script will generate an unstructured round mesh
 %   - radius     : specifies the radius of the circle in meters
 %   - resolution : specifies the resolution in meters
@@ -11,8 +11,8 @@
 
 %First we have to create the domain outline 
-if nargin<4
+if nargin>=4
+	expname = varargin{1};
+else
 	expname = [tempname() '.exp'];
-else
-	expname = varargin{1};
 end
 
@@ -20,5 +20,5 @@
 pointsonedge=floor((2.*pi*radius) / resolution)+1; %+1 to close the outline
 
-%Calculate the cartesians coordinates of the points
+%Calculate the Cartesian coordinates of the points
 theta=linspace(0,2*pi,pointsonedge)';
 x_list=roundsigfig(radius*cos(theta),12);
@@ -27,10 +27,10 @@
 expwrite(A,expname);
 
-%Call Bamg
+%Call mesher
 md=triangle(md,expname,resolution);
 %md=bamg(md,'domain','RoundDomainOutline.exp','hmin',resolution);
 
 %move the closest node to the center
-[mini pos]=min(md.mesh.x.^2+md.mesh.y.^2);
+[minimum pos]=min(md.mesh.x.^2+md.mesh.y.^2);
 md.mesh.x(pos)=0.;
 md.mesh.y(pos)=0.;
Index: /issm/trunk/src/m/mesh/roundmesh.py
===================================================================
--- /issm/trunk/src/m/mesh/roundmesh.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/roundmesh.py	(revision 28013)
@@ -3,23 +3,29 @@
 from collections import OrderedDict
 from expwrite import expwrite
+from MatlabFuncs import tempname
 from triangle import triangle
 
 
-def roundmesh(md, radius, resolution):
-    """
-    ROUNDMESH - create an unstructured round mesh
+def roundmesh(md, radius, resolution, *args):
+    """roundmesh - create an unstructured round mesh
 
-       This script will generate a structured round mesh
- - radius     : specifies the radius of the circle in meters
- - resolution : specifies the resolution in meters
+    This script will generate an unstructured round mesh
+    - radius     : specifies the radius of the circle in meters
+    - resolution : specifies the resolution in meters
 
-       Usage:
-          md = roundmesh(md, radius, resolution)
+    Usage:
+        md = roundmesh(md, radius, resolution)
+        md = roundmesh(md, radius, resolution, 'domain.exp')
     """
     # First we have to create the domain outline
+    if len(args):
+        expname = args[0]
+    else:
+        expname = tempname() + '.exp'
+
     # Get number of points on the circle
-    pointsonedge = int(np.floor((2. * np.pi * radius) / resolution) + 1)  # + 1 to close the outline
+    pointsonedge = int(np.floor((2. * np.pi * radius) / resolution) + 1)  # +1 to close the outline
 
-    # Calculate the cartesians coordinates of the points
+    # Calculate the Cartesian coordinates of the points
     theta = np.linspace(0., 2. * np.pi, pointsonedge)
     x_list = roundsigfig(radius * np.cos(theta), 12)
@@ -29,17 +35,18 @@
     A['y'] = y_list
     A['density'] = 1.
-    expwrite(A, 'RoundDomainOutline.exp')
+    expwrite(A, expname)
 
-    # Call Bamg
-    md = triangle(md, 'RoundDomainOutline.exp', resolution)
+    # Call mesher
+    md = triangle(md, expname, resolution)
     # md = bamg(md, 'domain', 'RoundDomainOutline.exp', 'hmin', resolution)
 
-    # move the closest node to the center
-    pos = np.argmin(md.mesh.x**2 + md.mesh.y**2)
+    # Move the closest node to the center
+    pos = np.argmin(np.add(np.power(md.mesh.x, 2), np.power(md.mesh.y, 2)))
     md.mesh.x[pos] = 0.
     md.mesh.y[pos] = 0.
 
-    # delete domain
-    os.remove('RoundDomainOutline.exp')
+    # Delete domain
+    if not len(args):
+        os.remove(expname)
 
     return md
@@ -47,5 +54,4 @@
 
 def roundsigfig(x, n):
-
     nonzeros = np.where(x != 0)
     digits = np.ceil(np.log10(np.abs(x[nonzeros])))
Index: /issm/trunk/src/m/mesh/triangle.m
===================================================================
--- /issm/trunk/src/m/mesh/triangle.m	(revision 28012)
+++ /issm/trunk/src/m/mesh/triangle.m	(revision 28013)
@@ -48,17 +48,18 @@
 removeorphans=1;
 if removeorphans,
-	orphan=find(~ismember([1:length(x)],sort(unique(elements(:)))));
-	for i=1:length(orphan),
+	uniqueelements=sort(unique(elements(:)));
+	orphans=find(~ismember([1:length(x)],uniqueelements));
+	for i=1:length(orphans),
 		disp('WARNING: removing orphans');
 		%get rid of the orphan node i
 		%update x and y
-		x=[x(1:orphan(i)-(i-1)-1); x(orphan(i)-(i-1)+1:end)];
-		y=[y(1:orphan(i)-(i-1)-1); y(orphan(i)-(i-1)+1:end)];
+		x=[x(1:orphans(i)-(i-1)-1); x(orphans(i)-(i-1)+1:end)];
+		y=[y(1:orphans(i)-(i-1)-1); y(orphans(i)-(i-1)+1:end)];
 		%update elements
-		pos=find(elements>orphan(i)-(i-1));
+		pos=find(elements>orphans(i)-(i-1));
 		elements(pos)=elements(pos)-1;
 		%update segments
-		pos1=find(segments(:,1)>orphan(i)-(i-1));
-		pos2=find(segments(:,2)>orphan(i)-(i-1));
+		pos1=find(segments(:,1)>orphans(i)-(i-1));
+		pos2=find(segments(:,2)>orphans(i)-(i-1));
 		segments(pos1,1)=segments(pos1,1)-1;
 		segments(pos2,2)=segments(pos2,2)-1;
@@ -77,5 +78,6 @@
 md.mesh.numberofelements=size(md.mesh.elements,1);
 md.mesh.numberofvertices=length(md.mesh.x);
-md.mesh.vertexonboundary=zeros(md.mesh.numberofvertices,1); md.mesh.vertexonboundary(md.mesh.segments(:,1:2))=1;
+md.mesh.vertexonboundary=zeros(md.mesh.numberofvertices,1);
+md.mesh.vertexonboundary(md.mesh.segments(:,1:2))=1;
 
 %Now, build the connectivity tables for this mesh.
Index: /issm/trunk/src/m/mesh/triangle.py
===================================================================
--- /issm/trunk/src/m/mesh/triangle.py	(revision 28012)
+++ /issm/trunk/src/m/mesh/triangle.py	(revision 28013)
@@ -10,10 +10,10 @@
 
 def triangle(md, domainname, *args):
-    """TRIANGLE - create model mesh using the triangle package
+    """triangle - create model mesh using the triangle package
 
-    This routine creates a model mesh using Triangle and a domain outline, to within a certain resolution
-    where md is a @model object, domainname is the name of an Argus domain outline file,
-    and resolution is a characteristic length for the mesh (same unit as the domain outline
-    unit). Riftname is an optional argument (Argus domain outline) describing rifts.
+    This routine creates a model mesh using Triangle and a domain outline, to 
+    within a certain resolution where md is a @model object, domainname is the 
+    name of an Argus domain outline file, and resolution is a characteristic 
+    length for the mesh (same unit as the domain outline unit). Riftname is an optional argument (Argus domain outline) describing rifts.
 
     Usage:
@@ -27,6 +27,6 @@
     """
 
-    #Figure out a characteristic area. Resolution is a node oriented concept (ex a 1000m  resolution node would
-    #be made of 1000 * 1000 area squares).
+    # Figure out a characteristic area. Resolution is a node oriented concept 
+    # (ex a 1000m resolution node would be made of 1000 * 1000 area squares).
 
     if len(args) == 1:
@@ -37,9 +37,9 @@
         resolution = args[1]
 
-    #Check that mesh was not already run, and warn user:
+    # Check that mesh was not already run, and warn user
     if md.mesh.numberofelements:
         choice = input('This model already has a mesh. Are you sure you want to go ahead? (y / n)')
         if choice not in ['y', 'n']:
-            print("bad answer try you should use 'y' or 'n' ... exiting")
+            print('bad answer try you should use \'y\' or \'n\' ... exiting')
             return None
         if choice == 'n':
@@ -49,16 +49,42 @@
     area = resolution ** 2
 
-    #Check that file exist (this is a very very common mistake)
+    # Check that file exists (this is a very common mistake)
     if not os.path.exists(domainname):
-        raise IOError("file '%s' not found" % domainname)
+        raise IOError('file {} not found'.format(domainname))
 
-    #Mesh using Triangle
+    # Mesh using Triangle
+    elements, x, y, segments, segmentmarkers = Triangle_python(domainname, riftname, area)
+
+    # Check that all the created nodes belong to at least one element
+    removeorphans = 1
+    if removeorphans:
+        uniqueelements = np.sort(np.unique(elements))
+        orphans = np.nonzero((~np.isin(range(1, len(x)), uniqueelements)).astype(int))[0]
+        for i in range(0, len(orphans)):
+            print('WARNING: removing orphans')
+            # Get rid of the orphan node i
+            # Update x and y
+            x = np.concatenate((x[0:(orphans[i] - i)], x[(orphans[i] - i + 1):]))
+            y = np.concatenate((y[0:(orphans[i] - i)], y[(orphans[i] - i + 1):]))
+            # Update elements
+            pos = np.nonzero((elements > (orphans[i] - i)).flatten(order='F'))[0]
+            elementstmp = elements.flatten(order='F')
+            elementstmp[pos] -= 1
+            elements = elementstmp.reshape(np.shape(elements), order='F')
+            # Update segments
+            pos1 = np.nonzero(segments[:,0] > (orphans[i] - i))[0]
+            pos2 = np.nonzero(segments[:,1] > (orphans[i] - i))[0]
+            segments[pos1, 0] -= 1
+            segments[pos2, 1] -= 1
+
+    # Plug into md
     md.mesh = mesh2d()
-    md.mesh.elements, md.mesh.x, md.mesh.y, md.mesh.segments, md.mesh.segmentmarkers = Triangle_python(domainname, riftname, area)
-    md.mesh.elements = md.mesh.elements.astype(int)
-    md.mesh.segments = md.mesh.segments.astype(int)
-    md.mesh.segmentmarkers = md.mesh.segmentmarkers.astype(int)
+    md.mesh.x = x
+    md.mesh.y = y
+    md.mesh.elements = elements.astype(int)
+    md.mesh.segments = segments.astype(int)
+    md.mesh.segmentmarkers = segmentmarkers.astype(int)
 
-    #Fill in rest of fields:
+    # Fill in rest of fields
     md.mesh.numberofelements = np.size(md.mesh.elements, axis=0)
     md.mesh.numberofvertices = np.size(md.mesh.x)
@@ -66,5 +92,5 @@
     md.mesh.vertexonboundary[md.mesh.segments[:, 0:2] - 1] = 1
 
-    #Now, build the connectivity tables for this mesh.
+    # Now, build the connectivity tables for this mesh
     md.mesh.vertexconnectivity = NodeConnectivity(md.mesh.elements, md.mesh.numberofvertices)
     md.mesh.elementconnectivity = ElementConnectivity(md.mesh.elements, md.mesh.vertexconnectivity)
Index: /issm/trunk/src/m/miscellaneous/MatlabFuncs.py
===================================================================
--- /issm/trunk/src/m/miscellaneous/MatlabFuncs.py	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/MatlabFuncs.py	(revision 28013)
@@ -169,5 +169,5 @@
 
     return np.intersect1d(A, B)
-#}}}
+# }}}
 
 def isa(A, dataType):  # {{{
@@ -372,2 +372,10 @@
         return False
 # }}}
+
+def tempname():  # {{{
+    import random
+    import string
+
+    alphanumlist = string.ascii_lowercase + string.digits
+    return '/tmp/tp' + ''.join(random.choices(alphanumlist, k=8)) + '_' + ''.join(random.choices(alphanumlist, k=4)) + '_' + ''.join(random.choices(alphanumlist, k=4)) + '_' + ''.join(random.choices(alphanumlist, k=4)) + '_' + ''.join(random.choices(alphanumlist, k=12))
+# }}}
Index: /issm/trunk/src/m/miscellaneous/PythonFuncs.py
===================================================================
--- /issm/trunk/src/m/miscellaneous/PythonFuncs.py	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/PythonFuncs.py	(revision 28013)
@@ -2,5 +2,5 @@
 
 
-def logical_and_n(*arg): #{{{
+def logical_and_n(*arg):  # {{{
     if len(arg):
         result = arg[0]
@@ -10,7 +10,7 @@
     else:
         return None
-#}}}
+# }}}
 
-def logical_or_n(*arg): #{{{
+def logical_or_n(*arg):  # {{{
     if len(arg):
         result = arg[0]
@@ -20,3 +20,3 @@
     else:
         return None
-#}}}
+# }}}
Index: /issm/trunk/src/m/miscellaneous/fielddisplay.m
===================================================================
--- /issm/trunk/src/m/miscellaneous/fielddisplay.m	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/fielddisplay.m	(revision 28013)
@@ -18,9 +18,5 @@
 	if ischar(field),
 
-		if length(field)>30;
-			displayunit(offset,name,'not displayed',comment),
-		else
-			displayunit(offset,name,['''' field ''''],comment),
-		end
+		displayunit(offset,name,['''' field ''''],comment)
 
 	%numeric
@@ -29,5 +25,5 @@
 		%double
 		if numel(field)==1,
-			displayunit(offset,name,num2str(field),comment),
+			displayunit(offset,name,num2str(field),comment)
 		%matrix
 		else
@@ -38,5 +34,5 @@
 			end
 			string = [string(1:end-1) ')'];
-			displayunit(offset,name,string,comment),
+			displayunit(offset,name,string,comment)
 		end
 
@@ -50,23 +46,23 @@
 		if max(fieldsize)==1,
 			if (field)
-				displayunit(offset,name,'true',comment),
+				displayunit(offset,name,'true',comment)
 			else
-				displayunit(offset,name,'false',comment),
+				displayunit(offset,name,'false',comment)
 			end
 		%matrix
 		else
-			displayunit(offset,name,['(' num2str(fieldsize(1)) 'x' num2str(fieldsize(2)) ')'],comment),
+			displayunit(offset,name,['(' num2str(fieldsize(1)) 'x' num2str(fieldsize(2)) ')'],comment)
 		end
 
 	%structure
 	elseif isstruct(field),
-		struct_display(offset,name,field,comment),
+		struct_display(offset,name,field,comment)
 
 	%cell
 	elseif iscell(field),
-		cell_display(offset,name,field,comment),
+		cell_display(offset,name,field,comment)
 
 	else
-		displayunit(offset,name,'not displayed',comment),
+		displayunit(offset,name,'not displayed',comment)
 
 	end
@@ -76,5 +72,5 @@
 
 	if ~isempty(fieldnames(field))
-		displayunit(offset,name,'(structure)',comment),
+		displayunit(offset,name,'(structure)',comment)
 		offset=[offset '   '];
 
@@ -91,5 +87,5 @@
 
 	else
-		displayunit(offset,name,'N/A',comment),
+		displayunit(offset,name,'N/A',comment)
 
 	end
@@ -122,5 +118,5 @@
 	displayunit(offset,name,string,comment);
 end% }}}
-function displayunit(offset,name,characterization,comment),% {{{
+function displayunit(offset,name,characterization,comment)% {{{
 
 	%take care of name
Index: /issm/trunk/src/m/miscellaneous/fielddisplay.py
===================================================================
--- /issm/trunk/src/m/miscellaneous/fielddisplay.py	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/fielddisplay.py	(revision 28013)
@@ -19,5 +19,5 @@
 
 
-def parsedisplay(offset, name, field, comment): #{{{
+def parsedisplay(offset, name, field, comment):  # {{{
     #string
     if isinstance(field, str):
@@ -58,8 +58,8 @@
 
     return string
-    #}}}
+    # }}}
 
 
-def dict_display(offset, name, field, comment): #{{{
+def dict_display(offset, name, field, comment):  # {{{
     if field:
         string = displayunit(offset, name, '{dictionary}', comment) + '\n'
@@ -76,8 +76,8 @@
 
     return string
-    #}}}
+    # }}}
 
 
-def list_display(offset, name, field, comment): #{{{
+def list_display(offset, name, field, comment):  # {{{
     #initialization
     if isinstance(field, list):
@@ -107,5 +107,5 @@
     #call displayunit
     return displayunit(offset, name, string, comment)
-    #}}}
+    # }}}
 
 
@@ -136,3 +136,3 @@
 
     return string
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/miscellaneous/intersect.py
===================================================================
--- /issm/trunk/src/m/miscellaneous/intersect.py	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/intersect.py	(revision 28013)
@@ -2,5 +2,5 @@
 
 
-def intersect(a, b): #{{{
+def intersect(a, b):  # {{{
     """INTERSECT - Python implementation of MATLAB's 'intersect' function
 
@@ -23,3 +23,3 @@
 
     return c, ia[np.isin(a_unique, c)], ib[np.isin(b_unique, c)]
-#}}}
+# }}}
Index: /issm/trunk/src/m/miscellaneous/transientrestart.m
===================================================================
--- /issm/trunk/src/m/miscellaneous/transientrestart.m	(revision 28012)
+++ /issm/trunk/src/m/miscellaneous/transientrestart.m	(revision 28013)
@@ -41,4 +41,5 @@
 if isfield(results,'Watercolumn'), md.initialization.watercolumn=results.Watercolumn; end
 if isfield(results,'Enthalpy'),    md.initialization.enthalpy=results.Enthalpy; end
+if isfield(results,'DebrisThickness'),md.initialization.debris=results.DebrisThickness; end
 
 %Deal with new geometry
@@ -48,4 +49,6 @@
 	if isa(md.mesh,'mesh3dprisms')
 		md.mesh.z=base+thickness./md.geometry.thickness.*(md.mesh.z-md.geometry.base);
+	elseif isa(md.mesh,'mesh2dvertical')
+		md.mesh.y=base+thickness./md.geometry.thickness.*(md.mesh.y-md.geometry.base);
 	end
 	md.geometry.base=base;
Index: /issm/trunk/src/m/modeldata/InterpFromGrid.cpp
===================================================================
--- /issm/trunk/src/m/modeldata/InterpFromGrid.cpp	(revision 28013)
+++ /issm/trunk/src/m/modeldata/InterpFromGrid.cpp	(revision 28013)
@@ -0,0 +1,567 @@
+/*Written by Mathieu Morlighem April 19th 2019*/
+
+/*includes*/
+#include <mex.h>
+#include <pthread.h>
+#include <math.h>   //for isnan
+#include <cstring>  // for strcmp
+#define f(m,n)\
+  data[n*dataM+m] //Warning: matrix is transposed!
+
+/*Inputs{{{*/
+#define DATAX   (mxArray*)prhs[0]
+#define DATAY   (mxArray*)prhs[1]
+#define DATA    (mxArray*)prhs[2]
+#define INTERPX (mxArray*)prhs[3]
+#define INTERPY (mxArray*)prhs[4]
+#define METHOD  (mxArray*)prhs[5]
+/*}}}*/
+/*Outputs{{{*/
+#define INTERP (mxArray**)&plhs[0]
+/*}}}*/
+/*threading structs{{{*/
+typedef struct{
+	void* usr;
+	int   my_thread;
+	int   num_threads;
+} pthread_handle;
+
+typedef struct{
+	int     dataM;
+	int     dataN;
+	double* datax;
+	double* datay;
+	double* data;
+	int     interpN;
+	double* interpx;
+	double* interpy;
+	double* interp;
+	int     method;
+} AppStruct; /*}}}*/
+/*Prototypes{{{*/
+void  FetchMatrixPointer(double** pmatrix,int *pM,int *pN,const mxArray* dataref);
+void  FetchVectorPointer(double** pvector,int *pN,const mxArray* dataref);
+void  FetchString(char** pstring,const mxArray* dataref);
+void  WriteMatrix(mxArray** pdataref,double* matrix,int M,int N);
+void  WriteVector(mxArray** pdataref,double* vector,int N);
+void* InterpFromGridt(void* vpthread_handle);
+void  LaunchThread(void* function(void*), void* usr,int num_threads);
+bool  binary_search_increasing(int* pindex,double target,double* list,int n);
+bool  binary_search_decreasing(int* pindex,double target,double* list,int n);
+void  dataderivatives(double* A,double* x,double* y,double* data,int M,int N, int m0, int m1,int m2,int m3, int n0, int n1,int n2,int n3);
+/*}}}*/
+
+void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]){/*{{{*/
+
+	double *datax   = NULL;
+	double *datay   = NULL;
+	double *data    = NULL;
+	int     dataM,dataN;
+	double *interpx = NULL;
+	double *interpy = NULL;
+	double *interp  = NULL;
+	int     interpM,interpN;
+
+	int     num_threads = 20;
+	int     test1,test2,test3,test4;
+	int     method = 1; // 0 = nearest, 1 = bilinear, 2 = bicubic
+
+	/*Check arguments to avoid crash*/
+	if(nlhs>1 || (nrhs<5 || nrhs>6)) mexErrMsgTxt("Wrong usage");
+
+	/*Get variables from matlab to C*/
+	FetchVectorPointer(&datax,&dataN,DATAX);
+	FetchVectorPointer(&datay,&dataM,DATAY);
+	FetchMatrixPointer(&data ,&test1,&test2,DATA); 
+	FetchMatrixPointer(&interpx,&interpM,&interpN,INTERPX);
+	FetchMatrixPointer(&interpy,&test3,&test4,INTERPY);
+	if(!dataM*dataN)     mexErrMsgTxt("data is empty");
+	if(!interpM*interpN) mexErrMsgTxt("no interpolation requested");
+	if(test1!=dataM)     mexErrMsgTxt("x should have as many elements as there are columns in the data");
+	if(test2!=dataN)     mexErrMsgTxt("y should have as many elements as there are lines in the data");
+	if(test3!=interpM)   mexErrMsgTxt("interpolation locations (x,y) do not have the same size");
+	if(test4!=interpN)   mexErrMsgTxt("interpolation locations (x,y) do not have the same size");
+	if(nrhs==6){
+		char* method_string = NULL;
+		FetchString(&method_string,METHOD);
+		if(strcmp(method_string,"nearest")==0)      method = 0;
+		else if(strcmp(method_string,"linear")==0)  method = 1;
+		else if(strcmp(method_string,"cubic")==0)   method = 2;
+		else{
+			mexErrMsgTxt("Method not supported yet");
+		}
+
+		mxFree(method_string);
+	}
+
+	/*Check inputs*/
+	if(true){
+		for(int i=0;i<interpM*interpN;i++){
+			if(isnan(interpx[i])) mexErrMsgTxt("NaN found in interpx");
+			if(isnan(interpy[i])) mexErrMsgTxt("NaN found in interpy");
+		}
+	}
+	if(method==3){
+		if(datax[1]-datax[0]<0) mexErrMsgTxt("x needs to be increasing for cubic interpolation");
+		if(datay[1]-datay[0]<0) mexErrMsgTxt("y needs to be increasing for cubic interpolation");
+	}
+
+	/*Allocate output*/
+	interp=(double*)mxMalloc(interpM*interpN*sizeof(double));
+
+	/*Multithreaded core*/
+	AppStruct usr;
+	usr.dataM   = dataM;
+	usr.dataN   = dataN;
+	usr.datax   = datax;
+	usr.datay   = datay;
+	usr.data    = data;
+	usr.interpN = interpM*interpN;
+	usr.interpx = interpx;
+	usr.interpy = interpy;
+	usr.interp  = interp;
+	usr.method  = method;
+	LaunchThread(InterpFromGridt,(void*)&usr,num_threads);
+
+	/*Write output vector*/
+	WriteMatrix(INTERP,interp,interpM,interpN);
+
+	/*Clean-up and return*/
+	/*Do not erase pointers!*/
+	return;
+}/*}}}*/
+
+/*InterpFromGridt{{{*/
+void* InterpFromGridt(void* vpthread_handle){
+
+	/*recover this thread info*/
+	pthread_handle *handle = (pthread_handle*)vpthread_handle;
+	int my_thread   = handle->my_thread;
+	int num_threads = handle->num_threads;
+
+	/*Recover struct*/
+	AppStruct *usr = (AppStruct*)handle->usr;
+	int     dataM   = usr->dataM;
+	int     dataN   = usr->dataN;
+	double *datax   = usr->datax;
+	double *datay   = usr->datay;
+	double *data    = usr->data;
+	int     interpN = usr->interpN;
+	double *interpx = usr->interpx;
+	double *interpy = usr->interpy;
+	double *interp  = usr->interp;
+	int     method = usr->method;
+
+	/*Intermediary*/
+	double xprime,yprime;
+	double x,y,x0,x1,x2,x3,y0,y1,y2,y3;
+	double Q11,Q12;
+	double Q21,Q22;
+	double A[16];
+	int    m,n,m0,m1,m2,m3,n0,n1,n2,n3;
+	int    oldm=-1,oldn=-1;
+
+	/*Is our matrix inverted?*/
+	bool invertx = (datax[1]-datax[0])<0 ? true:false;
+	bool inverty = (datay[1]-datay[0])<0 ? true:false;
+
+	for(int idx=my_thread;idx<interpN;idx+=num_threads){
+
+		x=interpx[idx];
+		y=interpy[idx];
+
+		/*Find indices m and n into y and x, for which  y(m)<=y_grids<=y(m+1) and x(n)<=x_grid<=x(n+1)*/
+		if(invertx) binary_search_decreasing(&n,x,datax,dataN);
+		else        binary_search_increasing(&n,x,datax,dataN);
+		if(inverty) binary_search_decreasing(&m,y,datay,dataM);
+		else        binary_search_increasing(&m,y,datay,dataM);
+
+		if(n>=0 && n<dataN && m>=0 && m<dataM){
+
+			/*    Q12             Q22
+			 * y2 x---------+-----x
+			 *    |         |     |
+			 *    |         |P    |
+			 *    |---------+-----|
+			 *    |         |     |
+			 *    |         |     |
+			 * y1 x---------+-----x Q21
+			 *    x1                 x2       
+			 *
+			 */
+			if(invertx){
+				n1=n+1; n2=n;
+			}
+			else{
+				n1=n; n2=n+1;
+			}
+			if(inverty){
+				m1=m+1; m2=m;
+			}
+			else{
+				m1=m; m2=m+1;
+			}
+
+			x1 = datax[n1]; x2 = datax[n2];
+			y1 = datay[m1]; y2 = datay[m2];
+
+			if(method==0){
+				/*Nearest neighbor interpolation*/
+				if(x > (x1+x2)/2.){
+					if(y > (y1+y2)/2.)
+						interp[idx] = f(m2,n2);
+					else
+						interp[idx] = f(m1,n2);
+					}
+				else{
+					if(y > (y1+y2)/2.)
+						interp[idx] = f(m2,n1);
+					else
+						interp[idx] = f(m1,n1);
+				}
+				continue;
+			}
+			else if(method==1){
+				/*Bilinear interpolation*/
+				if(Q11==-9999 || Q12==-9999 || Q21==-9999 || Q22==-9999){
+					interp[idx] = -9999;
+					continue;
+				}
+
+				interp[idx] =
+				  +f(m1,n1)*(x2-x)*(y2-y)/((x2-x1)*(y2-y1))
+				  +f(m1,n2)*(x-x1)*(y2-y)/((x2-x1)*(y2-y1))
+				  +f(m2,n1)*(x2-x)*(y-y1)/((x2-x1)*(y2-y1))
+				  +f(m2,n2)*(x-x1)*(y-y1)/((x2-x1)*(y2-y1));
+			}
+			else{
+				/*Bicubic interpolation*/
+				if(invertx){n0=n+2; n3=n-1;}
+				else{ n0=n-1; n3=n+2; }
+				if(inverty){ m0=m+2; m3=m-1; }
+				else{ m0=m-1; m3=m+2; }
+
+				if(n0<0 || n3>=dataN || m0<0 || m3>=dataM){
+					interp[idx] = -9999.;
+					continue;
+				}
+
+				/*Local coordinates (between 0 and 1)*/
+				xprime = (x - datax[n1])/(datax[n2]-datax[n1]);
+				yprime = (y - datay[m1])/(datay[m2]-datay[m1]);
+
+				/*Get derivatives at current pixel*/
+				if(oldm!=m || oldn!=n){
+					dataderivatives(&A[0],datax,datay,data,dataM,dataN,m0,m1,m2,m3,n0,n1,n2,n3);
+					oldm = m;
+					oldn = n;
+				}
+
+				double a00 = A[0];
+				double a10 = A[4];
+				double a20 = -3*A[0]+3*A[1]-2*A[4]-A[5];
+				double a30 = 2*A[0]-2*A[1]+A[4]+A[5];
+				double a01 = A[8];
+				double a11 = A[12];
+				double a21 = -3*A[8]+3*A[9]-2*A[12]-A[13];
+				double a31 = 2*A[8]-2*A[9]+A[12]+A[13];
+				double a02 = -3*A[0]+3*A[2]-2*A[8]-A[10];
+				double a12 = -3*A[4]+3*A[6]-2*A[12]-A[14];
+				double a22 = 9*A[0]-9*A[1]-9*A[2]+9*A[3]+6*A[4]+3*A[5]-6*A[6]-3*A[7]+6*A[8]-6*A[9]+3*A[10]-3*A[11]+4*A[12]+2*A[13]+2*A[14]+A[15];
+				double a32 =-6*A[0]+6*A[1]+6*A[2]-6*A[3]-3*A[4]-3*A[5]+3*A[6]+3*A[7]-4*A[8]+4*A[9]-2*A[10]+2*A[11]-2*A[12]-2*A[13]-A[14]-A[15];
+				double a03 = 2*A[0]-2*A[2]+A[8]+A[10];
+				double a13 = 2*A[4]-2*A[6]+A[12]+A[14];
+				double a23 =-6*A[0]+6*A[1]+6*A[2]-6*A[3]-4*A[4]-2*A[5]+4*A[6]+2*A[7]-3*A[8]+3*A[9]-3*A[10]+3*A[11]-2*A[12]-A[13]-2*A[14]-A[15] ;
+				double a33 = 4*A[0]-4*A[1]-4*A[2]+4*A[3]+2*A[4]+2*A[5]-2*A[6]-2*A[7]+2*A[8]-2*A[9]+2*A[10]-2*A[11]+A[12]+A[13]+A[14]+A[15];
+
+				x1= xprime;
+				x2= x1*x1;
+				x3= x2*x1;
+				y1= yprime;
+				y2= y1*y1;
+				y3= y2*y1;
+				interp[idx] = (a00+a01*y1+a02*y2+a03*y3)+(a10+a11*y1+a12*y2+a13*y3)*x1+(a20+a21*y1+a22*y2+a23*y3)*x2+(a30+a31*y1+a32*y2+a33*y3)*x3;
+			}
+		}
+		else{
+			interp[idx] = -9999.;
+		}
+	}
+	//if(my_thread==0) printf("\r   interpolation progress = %5.1f%%\n",100.);
+
+	return NULL;
+}/*}}}*/
+/*binary_search_increasing {{{*/
+bool binary_search_increasing(int* pindex,double target,double* list,int n){
+
+	/*output*/
+	int  index;       //index, if found
+	bool found=false; //found=0 if target is not found, 1 otherwise.
+
+	/*intermediary*/
+	int n0 = 0;
+	int n1 = int(n/2);
+	//int n1 = int((target-list[0])/(list[1]-list[0]));
+	int n2 = n-1;
+
+	if(target<list[n0]){
+		found  = true;
+		index  = -1;
+	}
+	else if(target>list[n2]){
+		found  = true;
+		index  = n;
+	}
+	else{
+		while(!found){
+			/*did we find the target?*/
+			if(list[n1]<=target && list[n1+1]>=target){
+				found = true;
+				index = n1;
+				break;
+			}
+			if(target < list[n1]){
+				n2 = n1;
+				n1 = n0 + int((n2-n0)/2);
+			}
+			else{
+				n0 = n1;
+				n1 = n0 + int((n2-n0)/2);
+			}
+		}
+	}
+
+	/*Assign output pointers:*/
+	*pindex=index;
+	
+	/*Return result: */
+	return found;
+}/*}}}*/
+/*binary_search_decreasing{{{*/
+bool binary_search_decreasing(int* pindex,double target,double* list,int n){
+
+	/*output*/
+	int  index;       //index, if found
+	bool found=false; //found=0 if target is not found, 1 otherwise.
+
+	/*intermediary*/
+	int n0 = 0;
+	int n1 = int(n/2);
+	//int n1 = int((target-list[0])/(list[0]-list[1]));
+	int n2 = n-1;
+
+	if (target>list[n0]){
+		found  = true;
+		index  = -1;
+	}
+	else if(target<list[n2]){
+		found  = true;
+		index  = n;
+	}
+	else{
+		while(!found){
+			/*did we find the target?*/
+			if(list[n1]>=target && list[n1+1]<=target){
+				found = true;
+				index = n1;
+				break;
+			}
+			if(target > list[n1]){
+				n2 = n1;
+				n1 = n0 + int((n2-n0)/2);
+			}
+			else{
+				n0 = n1;
+				n1 = n0 + int((n2-n0)/2);
+			}
+		}
+	}
+
+	/*Assign output pointers:*/
+	*pindex=index;
+
+	/*Return result: */
+	return found;
+}/*}}}*/
+/*dataderivatives{{{*/
+void  dataderivatives(double* A,double* x,double* y,double* data,int dataM,int dataN,
+			int m0, int m1,int m2,int m3, int n0, int n1,int n2,int n3){
+
+   /* i+1 +  +-------+ f(1,1)
+    *     |  |       |
+    *     |  |f(0,0) |
+    *   i +  +-------+ f(1,0)
+    *     +--+-------+-----> x
+    *        j       j+1
+	 */
+
+
+   /*Function at corners*/
+   A[0] = f(m1,n1); // f(0,0)
+   A[1] = f(m1,n2); // f(1,0)
+   A[2] = f(m2,n1); // f(0,1)
+   A[3] = f(m2,n2); // f(1,1)
+
+   /*x component of the gradient*/
+   A[4] = .5*(f(m1,n2) - f(m1,n0));///(x[n2]-x[n0]); // dfdx(0,0)
+   A[5] = .5*(f(m1,n3) - f(m1,n1));///(x[n3]-x[n1]); // dfdx(1,0)
+   A[6] = .5*(f(m2,n2) - f(m2,n0));///(x[n2]-x[n0]); // dfdx(0,1)
+   A[7] = .5*(f(m2,n3) - f(m2,n1));///(x[n3]-x[n1]); // dfdx(1,1)
+
+   /*y component of the gradient*/
+   A[ 8] = .5*(f(m2,n1) - f(m0,n1));///(y[m2]-y[m0]); // dfdy(0,0)
+   A[ 9] = .5*(f(m2,n2) - f(m0,n2));///(y[m2]-y[m0]); // dfdy(1,0)
+   A[10] = .5*(f(m3,n1) - f(m1,n1));///(y[m3]-y[m1]); // dfdy(0,1)
+   A[11] = .5*(f(m3,n2) - f(m1,n2));///(y[m3]-y[m1]); // dfdy(1,1)
+
+   /*cross-component of the gradient*/
+   A[12] = .25*( (f(m2,n2) - f(m2,n0)) - (f(m0,n2) - f(m0,n0)) );///( (x[n2]-x[n0])*(y[m2]-y[m0]) ); // d2f/dxdy (0,0)
+   A[13] = .25*( (f(m2,n3) - f(m2,n1)) - (f(m0,n3) - f(m0,n1)) );///( (x[n3]-x[n1])*(y[m2]-y[m0]) ); // d2f/dxdy (1,0)
+   A[14] = .25*( (f(m3,n2) - f(m3,n0)) - (f(m1,n2) - f(m1,n0)) );///( (x[n2]-x[n0])*(y[m3]-y[m1]) ); // d2f/dxdy (0,1)
+   A[15] = .25*( (f(m3,n3) - f(m3,n1)) - (f(m1,n3) - f(m1,n1)) );///( (x[n3]-x[n1])*(y[m3]-y[m1]) ); // d2f/dxdy (1,1)
+}/*}}}*/
+/*LaunchThread{{{*/
+void LaunchThread(void* function(void*), void* usr,int num_threads){
+
+	int i;
+	int            *status  = NULL;
+	pthread_t      *threads = NULL;
+	pthread_handle *handles = NULL;
+
+	/*dynamically allocate: */
+	threads=(pthread_t*)mxMalloc(num_threads*sizeof(pthread_t));
+	handles=(pthread_handle*)mxMalloc(num_threads*sizeof(pthread_handle));
+
+	for(i=0;i<num_threads;i++){
+		handles[i].usr=usr;
+		handles[i].my_thread  =i;
+		handles[i].num_threads=num_threads;
+	}
+
+	if(num_threads==1){
+		function(handles);
+	}
+	else{
+		for(i=0;i<num_threads;i++){
+			if(pthread_create(threads+i,NULL,function,(void*)(handles+i))){
+				mexErrMsgTxt("pthread_create error");
+			}
+		}
+		for(i=0;i<num_threads;i++){
+			if(pthread_join(threads[i],(void**)&status)){
+				mexErrMsgTxt("pthread_join error");
+			}
+		}
+	}
+
+	/*Free resources:*/
+	mxFree(threads);
+	mxFree(handles);
+}/*}}}*/
+/*FetchMatrixPointer {{{*/
+void FetchMatrixPointer(double** pmatrix,int *pM,int *pN,const mxArray* dataref){
+
+	double *matrix=NULL;
+	double *values=NULL;
+	int     N,M;
+
+	if(mxIsEmpty(dataref) ){
+		M=N=0;
+		matrix=NULL;
+	}
+	else if (mxIsDouble(dataref) ){
+		M=mxGetM(dataref);
+		N=mxGetN(dataref);
+		matrix=(double*)mxGetPr(dataref);
+	}
+	else{
+		mexErrMsgTxt("matrix type not supported");
+	}
+
+	*pmatrix=matrix;
+	if (pN)*pN=N;
+	if (pM)*pM=M;
+}/*}}}*/
+/*FetchVectorPointer {{{*/
+void FetchVectorPointer(double** pvector,int *pN,const mxArray* dataref){
+
+	double *vector=NULL;
+	double *values=NULL;
+	int     N;
+
+	if(mxIsEmpty(dataref) ){
+		N=0;
+		vector=NULL;
+	}
+	else if (mxIsDouble(dataref) ){
+		if(mxGetM(dataref)!=1 && mxGetN(dataref)!=1){
+			mexErrMsgTxt("input is a matrix and not a vector");
+		}
+		N=mxGetN(dataref)*mxGetM(dataref);
+		vector=(double*)mxGetPr(dataref);
+	}
+	else{
+		mexErrMsgTxt("vector type not supported");
+	}
+
+	*pvector=vector;
+	if (pN)*pN=N;
+}/*}}}*/
+/*FetchString{{{*/
+void FetchString(char** pstring,const mxArray* dataref){
+
+	char* outstring=NULL;
+
+	/*Ok, the string should be coming directly from the matlab workspace: */
+	if (!mxIsClass(dataref,"char")){
+		mexErrMsgTxt("input data_type is not a string!");
+	}
+	else{
+		/*Recover the string:*/
+		int stringlen;
+
+		stringlen = mxGetM(dataref)*mxGetN(dataref)+1;
+		outstring = (char*)mxMalloc(stringlen*sizeof(char));
+		mxGetString(dataref,outstring,stringlen);
+	}
+
+	/*Assign output pointers:*/
+	*pstring=outstring;
+	return;
+}/*}}}*/
+/*WriteMatrix {{{*/
+void WriteMatrix(mxArray** pdataref,double* matrix,int M,int N){
+
+	mxArray* dataref=NULL;
+
+	if(matrix){
+		/*data is a double* pointer. set pointer and invert sizes*/
+		dataref = mxCreateDoubleMatrix(0,0,mxREAL);
+		mxSetM(dataref,(mwSize)M); 
+		mxSetN(dataref,(mwSize)N);
+		mxSetPr(dataref,(double*)matrix);
+	}
+	else{
+		dataref = mxCreateDoubleScalar(0.0);
+	}
+	*pdataref=dataref;
+}
+/*}}}*/
+/*WriteVector {{{*/
+void WriteVector(mxArray** pdataref,double* vector,int N){
+
+	mxArray* dataref=NULL;
+
+	if(vector){
+		/*data is a double* pointer. Copy into a vector: */
+		dataref = mxCreateDoubleMatrix(0,0,mxREAL);
+		mxSetM(dataref,(mwSize)N);
+		mxSetN(dataref,(mwSize)1);
+		mxSetPr(dataref,(double*)vector);
+	}
+	else{
+		dataref = mxCreateDoubleScalar(0.0);
+	}
+	*pdataref=dataref;
+}
+/*}}}*/
Index: /issm/trunk/src/m/modeldata/Makefile
===================================================================
--- /issm/trunk/src/m/modeldata/Makefile	(revision 28013)
+++ /issm/trunk/src/m/modeldata/Makefile	(revision 28013)
@@ -0,0 +1,2 @@
+all:
+	/Applications/MATLAB_R2019a.app/bin/mex InterpFromGrid.cpp
Index: /issm/trunk/src/m/modeldata/interpAdusumilliIceShelfMelt.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpAdusumilliIceShelfMelt.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpAdusumilliIceShelfMelt.m	(revision 28013)
@@ -0,0 +1,38 @@
+function output = interpAdusumilliIceShelfMelt(X,Y)
+%INTERPADUSUMILLIICESHELFMELT - imports basal melt rates from (Adusumilli et al., 2020).
+%   About the data: "Average basal melt rates for Antarctic ice shelves for the 2010–2018 period at 
+%   high spatial resolution, estimated using CryoSat-2 data. This data file was last updated on 2020-06-11."
+%
+%   Citation: Adusumilli, Susheel; Fricker, Helen A.; Medley, Brooke C.; Padman, Laurie; Siegfried, Matthew R. (2020). 
+%   Data from: Interannual variations in meltwater input to the Southern Ocean from Antarctic ice shelves. 
+%   UC San Diego Library Digital Collections. https://doi.org/10.6075/J04Q7SHT
+%
+%   Usage:
+%      output = interpAdusumilliIceShelfMelt(X,Y)
+
+% define path and filename for this machine
+switch (oshostname()),
+	case {'totten'}
+		filename ='/totten_1/ModelData/Antarctica/Adusumilli2020IceShelfMelt/ANT_iceshelf_melt_rates_CS2_2010-2018_v0.h5';
+	otherwise
+		error('hostname not supported yet');
+end
+
+disp(['   -- Adusumilli Ice Shelf Melt: loading melt data']);
+% read in coordinates:
+%	coordinates are in Polar Stereographic projection 'PS-71'
+xdata = double(h5read(filename,'/x'));
+ydata = double(h5read(filename,'/y'));
+
+% read in data:
+% 'Basal melt rate (2010–2018), in meters of ice equivalent per year, positive is melting'
+% 'For ice shelf areas where CryoSat-2 data were not available, w_b_interp provides the 
+%  mean melt rate measured at the same ice draft as the grid cell elsewhere on the ice shelf. 
+%  Ice draft was estimated using BedMachine data.'
+data = double(h5read(filename,'/w_b'));
+data_interp = double(h5read(filename,'/w_b_interp'));
+data = data';
+disp(['   -- Adusumilli Ice Shelf Melt: interpolating melt data']);
+data(isnan(data)) = data_interp(isnan(data));
+output = InterpFromGrid(xdata,ydata,data,X(:),Y(:));
+output = reshape(output,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpArcticdem.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpArcticdem.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpArcticdem.m	(revision 28013)
@@ -0,0 +1,68 @@
+function sout = interpArcticdem(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		path='/home/ModelData/Greenland/ArcticDemMosaic/arcticdem_mosaic_100m_v3.0.tif';
+	case {'totten'}
+		path='/totten_1/ModelData/Greenland/ArcticDemMosaic/arcticdem_mosaic_100m_v3.0.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(path);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(path);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	if 0,
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata<=ymax);
+		id1y=max(1,find(ydata>=ymin,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	else
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	end
+
+	data  = double(imread(path,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpBamber2001.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBamber2001.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBamber2001.m	(revision 28013)
@@ -0,0 +1,36 @@
+function [bedout thicknessout] = interpBamber2001(X,Y),
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		bamber2001bedpath ='/u/astrid-r1b/ModelData/BamberDEMGreenland5km/bedrock.mat';
+		bamber2001thxpath ='/u/astrid-r1b/ModelData/BamberDEMGreenland5km/thickness.mat';
+	case {'ronne'}
+		bamber2001bedpath ='/home/ModelData/Greenland/Bamber2001/bedrock.mat';
+		bamber2001thxpath ='/home/ModelData/Greenland/Bamber2001/thickness.mat';
+	case {'totten'}
+		bamber2001bedpath ='/totten_1/ModelData/Greenland/Bamber2001/bedrock.mat';
+		bamber2001thxpath ='/totten_1/ModelData/Greenland/Bamber2001/thickness.mat';
+	otherwise
+		error('machine not supported yet');
+end
+
+verbose = 0;
+
+%Convert to Bamber's projections
+if verbose, disp('   -- Bamber2001: converting coordinates'); end
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+[x3971,y3971] = ll2xy(LAT,LON  ,+1,39,71);
+
+if verbose, disp('   -- Bamber2001: loading bed'); end
+load(bamber2001bedpath);
+if verbose, disp('   -- Bamber2001: interpolating bed'); end
+bedout = InterpFromGrid((x_m(1:end-1)+x_m(2:end))/2,(y_m(1:end-1)+y_m(2:end))/2,bedrock,x3971,y3971);
+bedout = reshape(bedout,size(X,1),size(X,2));
+
+if nargout>1
+	if verbose, disp('   -- Bamber2001: loading thickness'); end
+	load(bamber2001thxpath);
+	if verbose, disp('   -- Bamber2001: interpolating thickness'); end
+	thicknessout = InterpFromGrid((x_m(1:end-1)+x_m(2:end))/2,(y_m(1:end-1)+y_m(2:end))/2,thickness,x3971,y3971);
+	thicknessout = reshape(thicknessout,size(X,1),size(X,2));
+end
Index: /issm/trunk/src/m/modeldata/interpBamber2009.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBamber2009.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBamber2009.m	(revision 28013)
@@ -0,0 +1,21 @@
+function demout = interpBamber2009(X, Y)
+%INTERPBAMBER2009 - interpolate surface dem of Bamber 2009
+%
+%   Surface dem Nominal year 2004 (WGS84, no firn correction)
+%
+%   Usage:
+%      demout = interpBamber2009(X, Y)
+
+switch oshostname(),
+	case {'totten'}
+		bamber2009path ='/totten_1/ModelData/Antarctica/Bamber2009DEM/krigged_dem_nsidc.mat';
+	otherwise
+		error('machine not supported yet');
+end
+
+%Convert to Bamber's projections
+%disp('   -- Bamber2009: loading dem'); 
+load(bamber2009path);
+
+disp('   -- Bamber2009: interpolating dem (WGS84)');
+demout = InterpFromGrid(x, y, surfacedem, X, Y);
Index: /issm/trunk/src/m/modeldata/interpBamber2013.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBamber2013.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBamber2013.m	(revision 28013)
@@ -0,0 +1,52 @@
+function output = interpBamber2013(X,Y,string),
+%INTERPBAMBER2013 - interpolate Bamber 2013 data
+%
+%   Available data:
+%      BedrockElevation
+%      SurfaceElevation
+%      IceThickness
+%      SurfaceRMSE
+%      BedrockError
+%      LandMask (Land mask, 0=ocean, 1=land, 2=ice sheet, 3=non-Greenlandic land, 4=ice shelf)
+%      NumberAirbornePoints
+%      Geoid
+%      BedrockChangeMask
+%      IceShelfSourceMask
+%      BedrockElevation_unprocessed
+%      IceThickness_unprocessed
+%      BathymetryDataMask
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		bamber2013nc='/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/Bamber2013/Greenland_bedrock_topography_V3.nc';
+	case {'ronne'}
+		bamber2013nc='/home/ModelData/Greenland/Bamber2013/Greenland_bedrock_topography_V3.nc';
+	case {'totten'}
+		bamber2013nc='/totten_1/ModelData/Greenland/Bamber2013/Greenland_bedrock_topography_V3.nc';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 0;
+
+if nargin==2,
+	string = 'BedrockElevation';
+end
+
+%Convert to Bamber's projections
+if verbose, disp('   -- Bamber2013: converting coordinates'); end
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+[x3971,y3971] = ll2xy(LAT,LON  ,+1,39,71);
+
+if verbose, disp('   -- Bamber2013: loading coordinates'); end
+xdata = double(ncread(bamber2013nc,'projection_x_coordinate'));%*1000;
+ydata = double(ncread(bamber2013nc,'projection_y_coordinate'));%*1000;
+
+if verbose, disp(['   -- Bamber2013: loading ' string]); end
+data  = double(ncread(bamber2013nc,string))';
+if verbose, disp(['   -- Bamber2013: interpolating ' string]); end
+if strcmpi(string,'LandMask');
+	output = InterpFromGrid(xdata,ydata,data,x3971,y3971,'nearest');
+else
+	output = InterpFromGrid(xdata,ydata,data,x3971,y3971);
+end
+output = reshape(output,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpBedmachineAntarctica.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBedmachineAntarctica.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBedmachineAntarctica.m	(revision 28013)
@@ -0,0 +1,165 @@
+function output = interpBedmachineAntarctica(X,Y,string,method,ncdate)
+%INTERPBEDMACHINEANTARCTICA - interpolate BedMachine data onto X and Y
+%
+%   Examples:
+%      bed       = interpBedmachineAntarctica(X,Y,'bed');
+%      surface   = interpBedmachineAntarctica(X,Y,'surface');
+%      thickness = interpBedmachineAntarctica(X,Y,'thickness');
+%      mask      = interpBedmachineAntarctica(X,Y,'mask');
+%      mask      = interpBedmachineAntarctica(X,Y,'mask','nearest','../Data/BedMachineAntarctica_2020-07-15_v02.nc');
+%
+%   - mask:   0 ocean, 1 land (ice free), 2 grounded ice, 3 floating ice
+%   - source: 1 IBCSO/RTopo-2, 2 MC, 3 interpolation, 4 hydrostatic eq, 
+%             5 Streamline diffusion, 6 Gravity inversion
+%   - optional 4th input argument: interpolation method.
+%             Supported interpolation methos: 'linear','cubic','nearest'
+%   - optional 5th input argument: path to dataset.
+%
+% Version 11/30/2018 Mathieu Morlighem mmorligh@uci.edu
+
+if nargin<3, string = 'bed'; end
+if nargin<4
+	if strcmp(string,'mask') | strcmp(string,'source')
+		method='nearest'; % default method
+	else
+		method='cubic'; % default method
+	end
+end
+if nargin<5
+	ncdate='2020-07-15'; %BedMachine v2
+	ncdate='v3.5';       %Official v3 release
+end
+basename = 'BedMachineAntarctica';
+
+if nargin==5
+	ncfile = ncdate;
+else
+	%List of common paths to try
+	paths = {...
+		['/u/astrid-r1b/ModelData/BedMachine/' basename '-' ncdate '.nc'],...
+		['/home/ModelData/Antarctica/BedMachine/' basename '-' ncdate '.nc'],...
+		['/totten_1/ModelData/Antarctica/BedMachine/' basename '-' ncdate '.nc'],...
+		['/Users/larour/ModelData/BedMachine/' basename '-' ncdate '.nc'],...
+		['./' basename '-' ncdate '.nc'],...
+		};
+
+	found = 0;
+	for i=1:numel(paths)
+		if exist(paths{i},'file')
+			ncfile = paths{i};
+			found = 1;
+			break;
+		end
+	end
+
+	if ~found
+		error(['Could not find ' basename '-' ncdate '.nc, you can add the path to the list or provide its path as a 5th argument']);
+	end
+end
+
+disp(['   -- BedMachine Antarctica version: ' ncdate]);
+xdata = double(ncread(ncfile,'x'));
+ydata = double(ncread(ncfile,'y'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+if isempty(posx), posx=numel(xdata); end
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata>=ymin);
+if isempty(posy), posy=numel(ydata); end
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+if strcmp(string,'icemask'),
+	disp(['   -- BedMachine Antarctica: loading ' string]);
+	%data  = double(ncread(ncfile,'mask'))';
+	data  = double(ncread(ncfile,'mask',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+	%ice ocean interface is between 0 and 3, so we might get some 1 by interpolating
+	data(find(data==3))=0;
+else
+	disp(['   -- BedMachine Antarctica: loading ' string]);
+	%data  = double(ncread(ncfile,string))';
+	data  = double(ncread(ncfile,string,[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+disp(['   -- BedMachine Antarctica: interpolating ' string]);
+disp(['       -- Interpolation method: ' method]);
+if strcmp(string,'mask') | strcmp(string,'source'),
+	%Need nearest neighbor to avoid interpolation between 0 and 2
+	output = InterpFromGrid(xdata,ydata,data,double(X),double(Y),'nearest');
+	%tic
+	%output = FastInterp(xdata,ydata,data,X,Y,'nearest');
+	%toc
+else
+	%disp('InterpFromGrid');
+	%tic
+	%output = InterpFromGrid(xdata,ydata,data,double(X),double(Y),'cubic'); 
+	output = InterpFromGrid(xdata,ydata,data,double(X),double(Y),method); % now the interpolation method can be defined by the user
+	%toc
+	%disp('FastInterp');
+	%tic
+	%output = FastInterp(xdata,ydata,data,X,Y,'bilinear');
+	%toc
+end
+
+end
+function zi = FastInterp(x,y,data,xi,yi,method)
+
+	%get data size
+	[M N] = size(data);
+
+	% Get X and Y library array spacing
+	ndx = 1/(x(2)-x(1));    ndy = 1/(y(2)-y(1));
+	% Begin mapping xi and yi vectors onto index space by subtracting library
+	% array minima and scaling to index spacing
+
+	xi = (xi - x(1))*ndx;       yi = (yi - y(1))*ndy;
+
+	% Fill Zi with NaNs
+	zi = NaN(size(xi));
+
+	if strcmpi(method,'nearest'),
+		% Find the nearest point in index space
+		rxi = round(xi)+1;  ryi = round(yi)+1;
+		% Find points that are in X,Y range
+		flag = rxi>0 & rxi<=N & ~isnan(rxi) & ryi>0 & ryi<=M & ~isnan(ryi);
+		% Map subscripts to indices
+		ind = ryi + M*(rxi-1);
+		zi(flag) = data(ind(flag));
+
+	else %Bilinear
+
+		% Transform to unit square
+		fxi = floor(xi)+1;  fyi = floor(yi)+1; % x_i and y_i
+		dfxi = xi-fxi+1;    dfyi = yi-fyi+1;   % Location in unit square
+
+		% flagIn determines whether the requested location is inside of the data arrays
+		flagIn = fxi>0 & fxi<N & ~isnan(fxi) & fyi>0 & fyi<M & ~isnan(fyi);
+
+		%Toss all out-of-bounds variables now to save time
+		fxi  = fxi(flagIn);  fyi  = fyi(flagIn);
+		dfxi = dfxi(flagIn); dfyi = dfyi(flagIn);
+
+		%Find bounding vertices
+		ind1 = fyi + M*(fxi-1);     % indices of (  x_i  ,  y_i  )
+		ind2 = fyi + M*fxi;         % indices of ( x_i+1 ,  y_i  )
+		ind3 = fyi + 1 + M*fxi;     % indices of ( x_i+1 , y_i+1 )
+		ind4 = fyi + 1 + M*(fxi-1); % indices of (  x_i  , y_i+1 )
+
+		% Bilinear interpolation
+		zi(flagIn) = ...
+			data(ind1).*(1-dfxi).*(1-dfyi) + ...
+			data(ind2).*dfxi.*(1-dfyi) + ...
+			data(ind4).*(1-dfxi).*dfyi + ...
+			data(ind3).*dfxi.*dfyi;
+	end
+end
Index: /issm/trunk/src/m/modeldata/interpBedmachineGreenland.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBedmachineGreenland.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBedmachineGreenland.m	(revision 28013)
@@ -0,0 +1,151 @@
+function output = interpBedmachineGreenland(X,Y,string,method,ncdate)
+%INTERPBEDMACHINEGREENLAND - interpolate BedMachine data onto X and Y
+%
+%   Examples:
+%      bed       = interpBedmachineGreenland(X,Y,'bed');
+%      surface   = interpBedmachineGreenland(X,Y,'surface');
+%      thickness = interpBedmachineGreenland(X,Y,'thickness');
+%      mask      = interpBedmachineGreenland(X,Y,'mask');
+%      mask      = interpBedmachineGreenland(X,Y,'mask','nearest','../Data/BedMachineGreenland_2020-07-15_v03.nc');
+%
+%   - mask:   0 ocean, 1 land (ice free), 2 grounded ice, 3 floating ice
+%   - source: 1 IBCSO/RTopo-2, 2 MC, 3 interpolation, 4 hydrostatic eq, 
+%             5 Streamline diffusion, 6 Gravity inversion
+%   - optional 4th input argument: interpolation method.
+%             Supported interpolation methos: 'linear','cubic','nearest'
+%   - optional 5th input argument: path to dataset.
+%
+% Version 11/30/2018 Mathieu Morlighem mmorligh@uci.edu
+
+if nargin<3, string = 'bed'; end
+if nargin<4
+	if strcmp(string,'mask') | strcmp(string,'source')
+		method='nearest'; % default method
+	else
+		method='cubic'; % default method
+	end
+end
+if nargin<5
+	%ncdate='2015-04-27'; %BedMachine v2
+	ncdate='2017-09-25'; %BedMachine v3
+	ncdate='2020-04-14';
+	ncdate='2021-08-27';
+	ncdate='2022-03-17';
+	ncdate='2022-05-18';
+	ncdate='2022-07-28';
+	ncdate='v6.0';
+	ncdate='v6.1';
+end
+basename = 'BedMachineGreenland';
+
+if nargin==5
+	ncfile = ncdate;
+else
+	%List of common paths to try
+	paths = {...
+		['/u/astrid-r1b/ModelData/ModelData/MCdataset-' ncdate '.nc'],...
+		['/home/ModelData/Greenland/BedMachine/' basename '-' ncdate '.nc'],...
+		['/totten_1/ModelData/Greenland/BedMachine/' basename '-' ncdate '.nc'],...
+		['/Users/larour/ModelData/BedMachine/' basename '-' ncdate '.nc'],...
+		['./' basename '-' ncdate '.nc'],...
+		};
+
+	found = 0;
+	for i=1:numel(paths)
+		if exist(paths{i},'file')
+			ncfile = paths{i};
+			found = 1;
+			break;
+		end
+	end
+
+	if ~found
+		error(['Could not find ' basename '-' ncdate '.nc, you can add the path to the list or provide its path as a 5th argument']);
+	end
+end
+
+disp(['   -- BedMachine Greenland version: ' ncdate]);
+xdata = double(ncread(ncfile,'x'));
+ydata = double(ncread(ncfile,'y'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+if isempty(posx), posx=numel(xdata); end
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata>=ymin);
+if isempty(posy), posy=numel(ydata); end
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+disp(['   -- BedMachine Greenland: loading ' string]);
+data  = double(ncread(ncfile,string,[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+data(find(data==-9999))=NaN;
+
+disp(['   -- BedMachine Greenland: interpolating ' string]);
+disp(['       -- Interpolation method: ' method]);
+if strcmp(string,'mask') | strcmp(string,'source'),
+	%Need nearest neighbor to avoid interpolation between 0 and 2
+	output = InterpFromGrid(xdata,ydata,data,double(X),double(Y),'nearest');
+else
+	output = InterpFromGrid(xdata,ydata,data,double(X),double(Y));
+end
+
+end
+function zi = FastInterp(x,y,data,xi,yi,method)
+
+	%get data size
+	[M N] = size(data);
+
+	% Get X and Y library array spacing
+	ndx = 1/(x(2)-x(1));    ndy = 1/(y(2)-y(1));
+	% Begin mapping xi and yi vectors onto index space by subtracting library
+	% array minima and scaling to index spacing
+
+	xi = (xi - x(1))*ndx;       yi = (yi - y(1))*ndy;
+
+	% Fill Zi with NaNs
+	zi = NaN(size(xi));
+
+	if strcmpi(method,'nearest'),
+		% Find the nearest point in index space
+		rxi = round(xi)+1;  ryi = round(yi)+1;
+		% Find points that are in X,Y range
+		flag = rxi>0 & rxi<=N & ~isnan(rxi) & ryi>0 & ryi<=M & ~isnan(ryi);
+		% Map subscripts to indices
+		ind = ryi + M*(rxi-1);
+		zi(flag) = data(ind(flag));
+
+	else %Bilinear
+
+		% Transform to unit square
+		fxi = floor(xi)+1;  fyi = floor(yi)+1; % x_i and y_i
+		dfxi = xi-fxi+1;    dfyi = yi-fyi+1;   % Location in unit square
+
+		% flagIn determines whether the requested location is inside of the data arrays
+		flagIn = fxi>0 & fxi<N & ~isnan(fxi) & fyi>0 & fyi<M & ~isnan(fyi);
+
+		%Toss all out-of-bounds variables now to save time
+		fxi  = fxi(flagIn);  fyi  = fyi(flagIn);
+		dfxi = dfxi(flagIn); dfyi = dfyi(flagIn);
+
+		%Find bounding vertices
+		ind1 = fyi + M*(fxi-1);     % indices of (  x_i  ,  y_i  )
+		ind2 = fyi + M*fxi;         % indices of ( x_i+1 ,  y_i  )
+		ind3 = fyi + 1 + M*fxi;     % indices of ( x_i+1 , y_i+1 )
+		ind4 = fyi + 1 + M*(fxi-1); % indices of (  x_i  , y_i+1 )
+
+		% Bilinear interpolation
+		zi(flagIn) = ...
+			data(ind1).*(1-dfxi).*(1-dfyi) + ...
+			data(ind2).*dfxi.*(1-dfyi) + ...
+			data(ind4).*(1-dfxi).*dfyi + ...
+			data(ind3).*dfxi.*dfyi;
+	end
+end
Index: /issm/trunk/src/m/modeldata/interpBedmap.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBedmap.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBedmap.m	(revision 28013)
@@ -0,0 +1,27 @@
+function [dataout] = interpBedmap(X,Y,string),
+%INTERPBEDMAP - interpolate bedmap data
+%
+%   Available data:
+%      1. bed                          is bed height
+%      2. thickness                    is ice thickness
+%
+%   Usage:
+%      [dataout] = interpBedmap(X,Y,string)
+
+path=[jplsvn() '/proj-morlighem/DatasetAntarctica/Data/BedMap/gridded/'];
+
+if strcmp(string,'bed'),
+	path = [path '/bed.mat'];
+	load(path);
+	x_m =(x_m(2:end)+x_m(1:end-1))/2.;
+	y_m =(y_m(2:end)+y_m(1:end-1))/2.;
+	dataout = InterpFromGrid(x_m,y_m,bed,double(X),double(Y));
+elseif strcmp(string,'thickness')
+	path = [path '/thickness.mat'];
+	load(path);
+	x_m =(x_m(2:end)+x_m(1:end-1))/2.;
+	y_m =(y_m(2:end)+y_m(1:end-1))/2.;
+	dataout = InterpFromGrid(x_m,y_m,thickness,double(X),double(Y));
+else
+	error('not supported');
+end
Index: /issm/trunk/src/m/modeldata/interpBedmap2.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpBedmap2.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpBedmap2.m	(revision 28013)
@@ -0,0 +1,110 @@
+function [output] = interpBedmap2(X,Y,string),
+%INTERPBEDMAP2 - interpolate bedmap2 data
+%
+%   Available data:
+%      1. bed                          is bed height
+%      2. surface                      is surface height
+%      3. thickness                    is ice thickness
+%      4. icemask_grounded_and_shelves is a mask file showing the grounding line and the extent of the floating ice shelves
+%      5. rockmask                     is a mask file showing rock outcrops
+%      6. lakemask_vostok              is a mask file showing the extent of the lake cavity of Lake Vostok
+%      7. grounded_bed_uncertainty     is the bed uncertainty grid shown in figure 12 of the manuscript
+%      8. thickness_uncertainty_5km    is the thickness uncertainty grid shown in figure 11 of the manuscript
+%      9. coverage                     is a binary grid showing the distribution of ice thickness data used in the grid of ice thickness
+%     10. gl04c_geoid_to_wgs84         is the height conversion values (as floating point) used to convert from WGS84 datum heights to
+%                                      g104c geoidal heights (to convert back to WGS84, add this grid)
+%
+%   Usage:
+%      [dataout] = interpBedmap2(X,Y,string)
+
+switch (oshostname()),
+	case {'ronne'}
+		nc = '/home/ModelData/Antarctica/BedMap2/bedmap2_bin/Bedmap2.nc';
+	case {'totten'}
+		nc = '/totten_1/ModelData/Antarctica/BedMap2/bedmap2_bin/Bedmap2.nc';
+	otherwise
+		error('hostname not supported yet');
+end
+if exist(nc,'file')
+	if strcmp(string,'thickness_uncertainty_5km')
+		xdata = double(ncread(nc,'x_5km'));
+		ydata = double(ncread(nc,'y_5km'));
+	else
+		xdata = double(ncread(nc,'x'));
+		ydata = double(ncread(nc,'y'));
+	end
+
+	offset=2;
+
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	ymin=min(Y(:)); ymax=max(Y(:));
+	posy=find(ydata>=ymin);
+	id1y=max(1,find(ydata<=ymax,1)-offset);
+	id2y=min(numel(ydata),posy(end)+offset);
+
+	data  = double(ncread(nc,string,[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+
+	if ~strcmp(string,'coverage'),
+		data(find(data==-9999))=NaN;
+	end
+
+	if strcmpi(string,'icemask_grounded_and_shelves') | strcmpi(string,'rockmask'),
+		output = InterpFromGrid(xdata,ydata,data,double(X),double(Y),'nearest');
+	else
+		output = InterpFromGrid(xdata,ydata,data,double(X),double(Y)); % linear interpolation is default
+	end
+	return;
+
+%For Eric's computer (using Binary files)
+elseif exist('/Users/larour/ModelData/BedMap2/bedmap2_bin/','dir')
+	% ================================  OLD ===============================================
+	path='/Users/larour/ModelData/BedMap2/bedmap2_bin/'
+	if strcmp(string,'gl04c_geoid_to_wgs84'),
+		filepath = [path '/gl04c_geiod_to_wgs84.flt'];
+	else
+		filepath = [path '/bedmap2_' string '.flt'];
+	end
+	fid=fopen(filepath,'r','l');
+	data=fread(fid,[6667,6667],'float32');
+	fclose(fid);
+
+	% define grid
+	if strcmp(string,'thickness_uncertainty_5km'),
+		ncols    =1361;
+		nrows    =1361;
+		xll      =-3401000;
+		yll      =-3402000;
+		gridsize =5000;
+	else
+		ncols    =6667;
+		nrows    =6667;
+		xll      =-3333000;
+		yll      =-3333000;
+		gridsize =1000;
+	end
+	x_m=xll+(0:1:ncols-1)'*gridsize;
+	y_m=yll+(0:1:nrows-1)'*gridsize;
+
+	%Change default to NaN
+	if ~strcmp(string,'coverage'),
+		data(find(data==-9999))=NaN;
+	end
+
+	%rotate 90 degrees clockwise
+	data = rot90(data);
+
+	%Interpolate
+	if strcmpi(string,'icemask_grounded_and_shelves') | strcmpi(string,'rockmask'),
+		dataout = InterpFromGrid(x_m,y_m,data,double(X),double(Y),'nearest');
+	else
+		dataout = InterpFromGrid(x_m,y_m,data,double(X),double(Y));
+	end
+else
+	error('not supported');
+end
Index: /issm/trunk/src/m/modeldata/interpChuter2015.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpChuter2015.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpChuter2015.m	(revision 28013)
@@ -0,0 +1,13 @@
+function output = interpChuter2015(X,Y),
+
+ncfile='/home/ModelData/Antarctica/ChuterBamberIceShelfH/ChuterBamber_2015_CS2_ice_equivalent_ice_shelf_thickness_Rignot_gl.nc';
+verbose = 0;
+
+if verbose, disp('   -- Chuter2015: loading coordinates'); end
+xdata = double(ncread(ncfile,'x_dimensions'))';
+ydata = double(ncread(ncfile,'y_dimensions'))';
+
+if verbose, disp(['   -- Chuter2015: loading thickenss']); end
+data  = double(ncread(ncfile,'ice_shelf_thickness'))';
+if verbose, disp(['   -- Chuter2015: interpolating ' string]); end
+output = InterpFromGrid(xdata(1,:),ydata(:,1),data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpDTU19MDT.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpDTU19MDT.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpDTU19MDT.m	(revision 28013)
@@ -0,0 +1,35 @@
+function mdt = interpDTU19MDT(X,Y,varargin);
+
+switch oshostname(),
+	case {'ronne'}
+		rootname='/ronne_2/home/ModelData/Global/DTU19MDT/dtu19mdt.mat';
+	case {'totten'}
+		rootname='/totten_1/ModelData/Global/DTU19MDT/dtu19mdt.mat';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+if nargin==3,
+	hemisphere = varargin{1};
+else
+	hemisphere = +1;
+end
+
+if hemisphere==+1,
+	if verbose, disp('   -- DTU19MDT: convert to lat/lon using Greenland projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+else
+	if verbose, disp('   -- DTU19MDT: convert to lat/lon using Antarctica projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),-1,0,71);
+end
+pos=find(LON<0);
+LON(pos) =360+LON(pos);
+LAT=reshape(LAT,size(X));
+LON=reshape(LON,size(X));
+
+if verbose, disp('   -- DTU19MDT: loading DTU19MDT'); end
+A=load(rootname);
+
+if verbose, disp('   -- DTU19MDT: interpolating'); end
+mdt = InterpFromGrid(A.lon_ext,A.lat_ext,A.mdt_ext,LON,LAT);
Index: /issm/trunk/src/m/modeldata/interpDhdt.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpDhdt.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpDhdt.m	(revision 28013)
@@ -0,0 +1,55 @@
+function out = interpDhdt(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		dhdtpath='/home/ModelData/Greenland/DHDT/dhdt0306.tif';
+	case {'totten'}
+		dhdtpath='/totten_1/ModelData/Greenland/DHDT/dhdt0306.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+%convert coordinates:
+[lat lon] = xy2ll(X,Y,+1);
+[X Y] = ll2utm(lat,lon,24);
+
+%Get image info
+Tinfo = imfinfo(dhdtpath);
+N     = Tinfo.Width;
+M     = Tinfo.Height;
+dx    = Tinfo.ModelPixelScaleTag(1);
+dy    = Tinfo.ModelPixelScaleTag(2);
+minx  = Tinfo.ModelTiepointTag(4);
+maxy  = Tinfo.ModelTiepointTag(5);
+
+%Generate vectors
+xdata = minx + dx/2 + ((0:N-1).*dx);
+ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+ydata = fliplr(ydata);
+
+%Get pixels we are interested in
+offset=2;
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+if 0,
+	ymin=min(Y(:)); ymax=max(Y(:));
+	posy=find(ydata<=ymax);
+	id1y=max(1,find(ydata>=ymin,1)-offset);
+	id2y=min(numel(ydata),posy(end)+offset);
+else
+	ymin=min(Y(:)); ymax=max(Y(:));
+	posy=find(ydata>=ymin);
+	id1y=max(1,find(ydata<=ymax,1)-offset);
+	id2y=min(numel(ydata),posy(end)+offset);
+end
+
+data  = double(imread(dhdtpath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+data(find(data>+10^3)) = 0;
+data(find(data<-10^3)) = 0;
+
+out = InterpFromGrid(xdata,ydata,data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpFromGeotiff.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpFromGeotiff.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpFromGeotiff.m	(revision 28013)
@@ -0,0 +1,84 @@
+function dataout = interpFromGeotiff(geotiffname,X,Y,nanValue,fillholes)
+%INTERPFROMGEOTIFF - interpolate field in geotiff onto list of points
+%
+%   Usage:
+%      dataout = interpFromGeotiff(geotiffname,X,Y,nanValue,fillholes)
+%      dataout = interpFromGeotiff(geotiffname,X,Y);
+
+
+if nargin < 4
+	nanValue = 10^30;
+	fillholes = false;
+end
+if nargin < 5
+	fillholes = false;
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(geotiffname);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(geotiffname);
+	N     = Tinfo(1).Width;
+	M     = Tinfo(1).Height;
+	dx    = Tinfo(1).ModelPixelScaleTag(1);
+	dy    = Tinfo(1).ModelPixelScaleTag(2);
+	minx  = Tinfo(1).ModelTiepointTag(4);
+	maxy  = Tinfo(1).ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+
+	%Read image
+	if 1
+		assert(dx>0); assert(dy>0);
+		ydata = fliplr(ydata);
+
+		%Get pixels we are interested in
+		offset=2;
+		xmin=min(X(:)); xmax=max(X(:));
+		posx=find(xdata<=xmax);
+		id1x=max(1,find(xdata>=xmin,1)-offset);
+		id2x=min(numel(xdata),posx(end)+offset);
+
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+
+		data  = double(imread(geotiffname,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+		xdata=xdata(id1x:id2x);
+		ydata=ydata(id1y:id2y);
+	else
+		data=double(flipud(imread(geotiffname)));
+	end
+	if nanValue > 0
+		data(find(abs(data)>=nanValue))=NaN;
+	else 
+		data(find(data<=nanValue))=NaN;
+	end
+	if fillholes
+		disp('Filling holes');
+		data = inpaint_nans(data);
+		disp('done');
+	end
+end
+
+dataout = InterpFromGrid(xdata,ydata,data,X,Y);
+dataout(dataout==-9999)=NaN;
Index: /issm/trunk/src/m/modeldata/interpGeoid.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGeoid.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGeoid.m	(revision 28013)
@@ -0,0 +1,36 @@
+function [geoid] = interpGeoid(X,Y,varargin),
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		rootname=[jplsvn() '/proj-morlighem/DatasetGreenland/Data/Geoid/eigen-6c4-1970.mat'];
+	case {'ronne'}
+		rootname='/home/ModelData/Global/Geoid/eigen-6c4-1970.mat';
+	case {'totten'}
+		rootname='/totten_1/ModelData/Global/Geoid/eigen-6c4-1970.mat';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+if nargin==3,
+	hemisphere = varargin{1};
+else
+	hemisphere = +1;
+end
+
+if hemisphere==+1,
+	if verbose, disp('   -- Geoid: convert to lat/lon using Greenland projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+else
+	if verbose, disp('   -- Geoid: convert to lat/lon using Antarctica projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),-1,0,71);
+end
+pos=find(LON<0);
+LON(pos) =360+LON(pos);
+
+if verbose, disp('   -- Geoid: loading eigen-6c4 '); end
+A=load(rootname);
+
+if verbose, disp('   -- Geoid: interpolating'); end
+geoid = InterpFromGrid(A.lon,A.lat,A.geoid,LON,LAT);
+geoid = reshape(geoid,size(X));
Index: /issm/trunk/src/m/modeldata/interpGimpdem.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGimpdem.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGimpdem.m	(revision 28013)
@@ -0,0 +1,71 @@
+function sout = interpGimpdem(X,Y),
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		howatpath='/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/gimpdem/gimpdem_90m.tif';
+	case {'ronne'}
+		howatpath='/home/ModelData/Greenland/gimpdem/gimpdem_90m.tif';
+	case {'totten'}
+		%howatpath='/totten_1/ModelData/Greenland/gimpdem/gimpdem_90m_v01.1.tif';
+		howatpath='/totten_1/ModelData/Greenland/gimpdem/gimpdem_90m.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(howatpath);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(howatpath);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	if 0,
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata<=ymax);
+		id1y=max(1,find(ydata>=ymin,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	else
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	end
+
+	data  = double(imread(howatpath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpGimpicemask.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGimpicemask.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGimpicemask.m	(revision 28013)
@@ -0,0 +1,71 @@
+function sout = interpGimpicemask(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		howatpath='/home/ModelData/Greenland/gimpmask/GimpIceMask_90m.tif';
+	case {'totten'}
+		howatpath='/totten_1/ModelData/Greenland/gimpmask/GimpIceMask_90m.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(howatpath);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(howatpath);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	if 0,
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata<=ymax);
+		id1y=max(1,find(ydata>=ymin,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	else
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	end
+
+	data  = double(imread(howatpath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y,'nearest');
+
+%Post process output (undefined = not ice)
+sout(find(sout==-9999))=0;
Index: /issm/trunk/src/m/modeldata/interpGimpoceanmask.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGimpoceanmask.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGimpoceanmask.m	(revision 28013)
@@ -0,0 +1,71 @@
+function sout = interpGimpoceanmask(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		howatpath='/home/ModelData/Greenland/gimpmask/GimpOceanMask_90m.tif';
+	case {'totten'}
+		howatpath='/totten_1/ModelData/Greenland/gimpmask/GimpOceanMask_90m.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(howatpath);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(howatpath);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	if 0,
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata<=ymax);
+		id1y=max(1,find(ydata>=ymin,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	else
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	end
+
+	data  = double(imread(howatpath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y,'nearest');
+
+%Post process output (undefined = ocean)
+sout(find(sout==-9999))=1;
Index: /issm/trunk/src/m/modeldata/interpGrIMP.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGrIMP.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGrIMP.m	(revision 28013)
@@ -0,0 +1,66 @@
+function sout = interpGimpdem(X,Y),
+
+switch oshostname(),
+	case {'totten'}
+		howatpath='/totten_1/ModelData/Greenland/GrIMP/GrIMP_100m.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(howatpath);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(howatpath);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	if 0,
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata<=ymax);
+		id1y=max(1,find(ydata>=ymin,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	else
+		ymin=min(Y(:)); ymax=max(Y(:));
+		posy=find(ydata>=ymin);
+		id1y=max(1,find(ydata<=ymax,1)-offset);
+		id2y=min(numel(ydata),posy(end)+offset);
+	end
+
+	data  = double(imread(howatpath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpGridsCReSIS.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGridsCReSIS.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGridsCReSIS.m	(revision 28013)
@@ -0,0 +1,30 @@
+function output = interpGridsCReSIS(X,Y,filename),
+
+%Convert to lat/lon
+disp('   -- Griggs2013: converting coordinates');
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+
+disp(['   -- GridsCReSIS: loading data']);
+if ~exist(filename)
+	error([filename ' does not exist']);
+end
+fid   = fopen(filename);
+for i=1:6,
+	thisline = fgetl(fid);
+	dummy    = regexp(thisline,'(\S+)','match');
+	if strcmp(dummy{1},'ncols'),       ncols=str2num(dummy{2}); end
+	if strcmp(dummy{1},'nrows'),       nrows=str2num(dummy{2}); end
+	if strcmp(dummy{1},'xllcorner'),    xllcorner=str2num(dummy{2}); end
+	if strcmp(dummy{1},'yllcorner'),    yllcorner=str2num(dummy{2}); end
+	if strcmp(dummy{1},'cellsize'),     cellsize=str2num(dummy{2}); end
+	if strcmp(dummy{1},'NODATA_value'), nodata=str2num(dummy{2}); end
+end
+data  = fscanf(fid,'%g %g %g %g %g',[ncols nrows])';
+fclose(fid);
+
+xdata=linspace(xllcorner+cellsize/2,xllcorner+cellsize/2+(ncols-1)*cellsize,ncols);
+ydata=linspace(yllcorner+cellsize/2,yllcorner+cellsize/2+(nrows-1)*cellsize,nrows);
+
+disp(['   -- GridsCReSIS: interpolating ']);
+output = InterpFromGrid(xdata,ydata,data,LAT,LON);
+output = reshape(output,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpGriggs2013.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpGriggs2013.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpGriggs2013.m	(revision 28013)
@@ -0,0 +1,29 @@
+function output = interpGriggs2013(X,Y,string),
+
+disp('============================================');
+disp(' ');
+disp('WARNING: interpBamber2013 should now be used');
+disp(' ');
+disp('============================================');
+error('interpBamber2013 should now be used');
+griggs2013nc='/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/Griggs2012/Greenland_bedrock_topography_and_geometry_062012_JGriggs.nc';
+verbose = 0;
+
+if nargout==2,
+	string = 'BedrockElevation';
+end
+
+%Convert to Bamber's projections
+if verbose, disp('   -- Griggs2013: converting coordinates'); end
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+[x3971,y3971] = ll2xy(LAT,LON  ,+1,39,71);
+
+if verbose, disp('   -- Griggs2013: loading coordinates'); end
+xdata = double(ncread(griggs2013nc,'projection_x_coordinate'))*1000;
+ydata = double(ncread(griggs2013nc,'projection_y_coordinate'))*1000;
+
+if verbose, disp(['   -- Griggs2013: loading ' string]); end
+data  = double(ncread(griggs2013nc,string))';
+if verbose, disp(['   -- Griggs2013: interpolating ' string]); end
+output = InterpFromGrid(xdata,ydata,data,x3971,y3971);
+output = reshape(output,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpIBCSO.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpIBCSO.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpIBCSO.m	(revision 28013)
@@ -0,0 +1,41 @@
+function [bedout sid] = interpIBCSO(X,Y),
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		ncpath='/home/ModelData/Antarctica/IBCSO/ibcso_v1_bed.grd';
+		sidpath='/home/ModelData/Antarctica/IBCSO/ibcso_v1_sid.grd';
+	case {'totten'}
+		ncpath='/totten_1/ModelData/Antarctica/IBCSO/ibcso_v1_bed.grd';
+		sidpath='/totten_1/ModelData/Antarctica/IBCSO/ibcso_v1_sid.grd';
+	otherwise
+		error('hostname not supported yet');
+end
+
+disp('   -- IBCSO: loading bathymetry');
+x_range = double(ncread(ncpath,'x_range'));
+y_range = double(ncread(ncpath,'y_range'));
+spacing = double(ncread(ncpath,'spacing'));
+xdata = (x_range(1)-spacing(1)/2) : spacing(1) : (x_range(2)-spacing(1)/2); 
+ydata = (y_range(1)-spacing(2)/2) : spacing(2) : (y_range(2)-spacing(2)/2); 
+data  = double(ncread(ncpath,'z'));
+data(find(data==-9999 | isinf(data))) = NaN;
+data  = reshape(data,[numel(xdata) numel(ydata)])';
+disp('   -- IBCSO: interpolating bed');
+bedout = InterpFromGrid(xdata,fliplr(ydata),data,double(X),double(Y));
+
+if nargout==2,
+	disp('   -- IBCSO: bathymetry sid');
+	xdata = ncread(sidpath,'x');
+	ydata = ncread(sidpath,'y');
+	data  = ncread(sidpath,'z')';
+	disp('   -- IBCSO: transforming coordinates');
+	[LAT,LON] = xy2ll(double(X(:)),double(Y(:)),-1,0,71);
+	[x065,y065] = ll2xy(LAT,LON,-1,0,65);
+	x065 = reshape(x065,size(X));
+	y065 = reshape(y065,size(Y));
+	disp('   -- IBCSO: interpolating sids');
+	sid = InterpFromGrid(xdata,ydata,data,x065,y065,'nearest');
+	sid(find(sid<200000)) = 0;
+	sid(find(sid>399999)) = 0;
+end
Index: /issm/trunk/src/m/modeldata/interpIBCSO2.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpIBCSO2.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpIBCSO2.m	(revision 28013)
@@ -0,0 +1,27 @@
+function [bedout sid] = interpIBCSO2(X,Y),
+
+%read data
+switch (oshostname()),
+	case {'totten'}
+		ncpath='/totten_1/ModelData/Antarctica/IBCSO2/IBCSO_v2_bed.nc';
+		sidpath='/totten_1/ModelData/Antarctica/IBCSO2/IBCSO_v2_TID.nc';
+	otherwise
+		error('hostname not supported yet');
+end
+
+disp('   -- IBCSOv2: Changing Coordinate system from 3031 to 9354');
+[X Y]=CoordTransform(double(X),double(Y),'EPSG:3031','EPSG:9354');
+
+disp('   -- IBCSOv2: loading bathymetry');
+xdata = double(ncread(ncpath,'x'));
+ydata = double(ncread(ncpath,'y'));
+data  = double(ncread(ncpath,'z'))';
+disp('   -- IBCSOv2: interpolating bed');
+bedout = InterpFromGrid(xdata,ydata,data,double(X),double(Y));
+
+if nargout==2,
+	disp('   -- IBCSOv2: bathymetry sid');
+	data  = ncread(sidpath,'tid')';
+	disp('   -- IBCSOv2: interpolating sids');
+	sid = InterpFromGrid(xdata,ydata,data,double(X),double(Y),'nearest');
+end
Index: /issm/trunk/src/m/modeldata/interpJakobsson2012.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpJakobsson2012.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpJakobsson2012.m	(revision 28013)
@@ -0,0 +1,35 @@
+function [bedout sourceout] = interpJakobsson2012(X,Y,string),
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		ncpath ='/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/IBCAO/IBCAO_V3_500m_RR.grd';
+	case {'ronne'}
+		ncpath ='/home/ModelData/Greenland/IBCAO/IBCAO_V3_500m_RR.grd';
+	otherwise
+		error('machine not supported yet');
+end
+
+%Convert to IBCAO projections
+disp('   -- Jakobsson2012: converting coordinates');
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+[x0075,y0075] = ll2xy(LAT,LON,+1,0,75);
+
+disp('   -- Jakobsson2012: loading bathymetry');
+xdata = double(ncread(ncpath,'x'));
+ydata = double(ncread(ncpath,'y'));
+data  = double(ncread(ncpath,'z'))';
+
+disp('   -- Jakobsson2012: interpolating bed');
+bedout = InterpFromGrid(xdata,ydata,data,x0075,y0075);
+bedout = reshape(bedout,size(X,1),size(X,2));
+
+if nargout==2,
+	ncpath ='/home/ModelData/Greenland/IBCAO/IBCAO_V3_SID_500m.grd';
+	disp('   -- Jakobsson2012: loading source');
+	xdata = double(ncread(ncpath,'x'));
+	ydata = double(ncread(ncpath,'y'));
+	data  = double(ncread(ncpath,'z'))';
+	disp('   -- Jakobsson2012: interpolating source');
+	sourceout = InterpFromGrid(xdata,ydata,data,x0075,y0075,'nearest');
+	sourceout = reshape(sourceout,size(X,1),size(X,2));
+end
Index: /issm/trunk/src/m/modeldata/interpJakobsson2020.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpJakobsson2020.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpJakobsson2020.m	(revision 28013)
@@ -0,0 +1,49 @@
+function [bedout sourceout] = interpJakobsson2020(X,Y,string),
+
+switch oshostname(),
+	case {'ronne'}
+		ncpath ='/home/ModelData/Greenland/IBCAO/IBCAO_v4_200m.nc';
+	case {'totten'}
+		ncpath ='/totten_1/ModelData/Greenland/IBCAO/IBCAO_v4_200m.nc';
+	otherwise
+		error('machine not supported yet');
+end
+
+%Convert to IBCAO projections
+disp('   -- Jakobsson2020: converting coordinates');
+[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+[x0075,y0075] = ll2xy(LAT,LON,+1,0,75);
+
+disp('   -- Jakobsson2020: loading coordinates');
+xdata = double(ncread(ncpath,'x'));
+ydata = double(ncread(ncpath,'y'));
+
+offset=2;
+
+xmin=min(x0075(:)); xmax=max(x0075(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(y0075(:)); ymax=max(y0075(:));
+posy=find(ydata>=ymin);
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+disp(['   -- Jakobsson2020: loading bathymetry']);
+data = double(ncread(ncpath,'z',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+
+disp('   -- Jakobsson2020: interpolating bed');
+bedout = InterpFromGrid(xdata,ydata,data,x0075,y0075);
+bedout = reshape(bedout,size(X,1),size(X,2));
+
+if nargout==2,
+	ncpath ='/totten_1/ModelData/Greenland/IBCAO/IBCAO_v4_200m_TID.nc';
+	disp('   -- Jakobsson2020: loading source');
+	data = double(ncread(ncpath,'z',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+	disp('   -- Jakobsson2020: interpolating source');
+	sourceout = InterpFromGrid(xdata,ydata,data,x0075,y0075,'nearest');
+	sourceout = reshape(sourceout,size(X,1),size(X,2));
+end
Index: /issm/trunk/src/m/modeldata/interpJoughin.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpJoughin.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpJoughin.m	(revision 28013)
@@ -0,0 +1,97 @@
+function [vxout vyout] = interpJoughin(X,Y,Date),
+	%Available dates:
+	% 2000 2005 2006 2007 2008
+
+switch oshostname(),
+	case {'murdo','thwaites','astrid'}
+		if nargin==3,
+			rootname = ['/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/Vel/Joughin/' num2str(Date) '/'];
+		else
+			error('not supported');
+		end
+	case {'ronne'}
+		error('not supported');
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+if ~exist(rootname,'dir'),
+	error(['file ' rootname ' not found']);
+end
+
+rootname = [rootname 'greenland_vel_mosaic500_' num2str(Date) '_' num2str(Date+1)];
+
+if verbose, disp('   -- Joughin: loading vx'); end
+[data,R] = geotiffread([rootname '_vx.tif']);
+pos=find(data<-10^9); data(pos)=NaN;
+data=double(flipud(data));
+xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+xdata =(xdata(1:end-1)+xdata(2:end))/2;
+ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+ydata =(ydata(1:end-1)+ydata(2:end))/2;
+if verbose, disp('   -- Joughin: interpolating vx'); end
+vxout = InterpFromGrid(xdata,ydata,data,X,Y);
+vxout = reshape(vxout,size(X,1),size(X,2));
+
+if verbose, disp('   -- Joughin: loading vy'); end
+[data,R] = geotiffread([rootname '_vy.tif']);
+pos=find(data<-10^9); data(pos)=NaN;
+data=double(flipud(data));
+xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+xdata =(xdata(1:end-1)+xdata(2:end))/2;
+ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+ydata =(ydata(1:end-1)+ydata(2:end))/2;
+if verbose, disp('   -- Joughin: interpolating vy'); end
+vyout = InterpFromGrid(xdata,ydata,data,X,Y);
+vyout = reshape(vyout,size(X,1),size(X,2));
+return
+
+% Get geodat info
+if verbose, disp('   -- Joughin: loading geodat info'); end
+xd=readgeodat(strcat(rootname,'.vx.geodat'));
+xmin=xd(3,1)*1000.+xd(2,1)/2;
+xmax=xd(3,1)*1000.+(xd(2,1)-1)*xd(1,1)+xd(2,1)/2;
+ymin=xd(3,2)*1000.+xd(2,2)/2;
+ymax=xd(3,2)*1000.+(xd(2,2)-1)*xd(1,2)+xd(2,2)/2;
+%xmin=xd(3,1)*1000.;
+%xmax=xd(3,1)*1000.+(xd(2,1)-1)*xd(1,1);
+%ymin=xd(3,2)*1000.;
+%ymax=xd(3,2)*1000.+(xd(2,2)-1)*xd(1,2);
+xdata=linspace(xmin,xmax,xd(1,1));
+ydata=linspace(ymin,ymax,xd(1,2));
+
+% Vx component
+if verbose, disp('   -- Joughin: loading vx'); end
+fid = fopen(strcat(rootname,'.vx'),'r','ieee-be');
+[data,count]=fread(fid,[xd(1,1) xd(1,2)],'float32');
+fclose(fid);
+
+if verbose, disp('   -- Joughin: interpolating vx'); end
+vxout = InterpFromGrid(xdata,ydata,data',X,Y);
+vxout = reshape(vxout,size(X,1),size(X,2));
+
+% Vy component
+fid = fopen(strcat(rootname,'.vy'),'r','ieee-be');
+[data,count]=fread(fid,[xd(1,1) xd(1,2)],'float32');
+fclose(fid);
+vyout = InterpFromGrid(xdata,ydata,data',X,Y);
+vyout = reshape(vyout,size(X,1),size(X,2));
+
+end
+
+function xgeo=readgeodat(filein)
+% Read a geodat file
+fid = fopen(filein,'r');
+xgeo=zeros(3,2);
+i=1;
+while ~feof(fid),
+	line=fgets(fid);
+	[A,count]=sscanf(line,'%f %f',[1 2]);
+	if(count == 2) 
+		xgeo(i,:)=A;
+		i=i+1;
+	end
+end
+fclose(fid);
+end
Index: /issm/trunk/src/m/modeldata/interpJoughinCompositeGreenland.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpJoughinCompositeGreenland.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpJoughinCompositeGreenland.m	(revision 28013)
@@ -0,0 +1,32 @@
+function [vxout vyout] = interpJoughinCompositeGreenland(X,Y),
+
+%data=load(['/u/astrid-r1b/morlighe/issmjpl/proj-morlighem/DatasetGreenland/Data/VelJoughin/IanGreenVel.mat']);
+filename = '/totten_1/ModelData/Greenland/VelJoughin/IanGreenVel.mat';
+
+%Figure out what subset of the matrix should be read
+load(filename,'x_m','y_m');
+velfile = matfile(filename);
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(x_m<=xmax);
+id1x=max(1,find(x_m>=xmin,1)-offset);
+id2x=min(numel(x_m),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(y_m>=ymin);
+id1y=max(1,find(y_m<=ymax,1)-offset);
+id2y=min(numel(y_m),posy(end)+offset);
+
+vx = velfile.vx(id1y:id2y,id1x:id2x);
+vy = velfile.vy(id1y:id2y,id1x:id2x);
+x = x_m(id1x:id2x);
+y = y_m(id1y:id2y);
+
+vxout = InterpFromGrid(x,y,double(vx),X,Y);
+vyout = InterpFromGrid(x,y,double(vy),X,Y);
+
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpJoughinMosaic.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpJoughinMosaic.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpJoughinMosaic.m	(revision 28013)
@@ -0,0 +1,36 @@
+function [vxout vyout] = interpJoughinMosaic(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		filename = '/home/ModelData/Greenland/VelJoughin/IanGreenVel.mat';
+	case {'totten'}
+		filename = '/totten_1/ModelData/Greenland/VelJoughin/IanGreenVel.mat';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+%Figure out what subset of the matrix should be read
+load(filename,'x_m','y_m');
+velfile = matfile(filename);
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(x_m<=xmax);
+id1x=max(1,find(x_m>=xmin,1)-offset);
+id2x=min(numel(x_m),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(y_m>=ymin);
+id1y=max(1,find(y_m<=ymax,1)-offset);
+id2y=min(numel(y_m),posy(end)+offset);
+
+vx = velfile.vx(id1y:id2y,id1x:id2x);
+vy = velfile.vy(id1y:id2y,id1x:id2x);
+x_m = x_m(id1x:id2x);
+y_m = y_m(id1y:id2y);
+
+%load(filename);
+vxout = InterpFromGrid(x_m,y_m,vx,X,Y);
+vyout = InterpFromGrid(x_m,y_m,vy,X,Y);
Index: /issm/trunk/src/m/modeldata/interpMartos2017.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpMartos2017.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpMartos2017.m	(revision 28013)
@@ -0,0 +1,18 @@
+function out = interpMartos2017(X,Y)
+%INTERPMARTOS2017 - interpolate geothermal heat flux
+%
+%   Usage:
+%      out = interpMartos2017(X,Y)
+
+switch oshostname(),
+	case {'ronne'}
+		gtfpath='/home/ModelData/Antarctica/GeothermalMartos/Antarctic_GHF.xyz';
+	otherwise
+		error('machine not supported yet');
+end
+
+%Load data
+data = load(gtfpath);
+
+%Interpolate using nearest neighbor (dataset stops at ocean boundary!)
+out = Kriging(data(:,1),data(:,2),data(:,3),X,Y,'output','nearestneighbor')/1e3; %from mW/m2 to W/m2
Index: /issm/trunk/src/m/modeldata/interpMouginotAnt2016.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpMouginotAnt2016.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpMouginotAnt2016.m	(revision 28013)
@@ -0,0 +1,46 @@
+function [vxout vyout]= interpMouginotAnt2016(X,Y),
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		filename = '/home/ModelData/Antarctica/MouginotVel/vel_ant_5Apr2016.mat';
+	case {'thwaites','murdo','astrid'}
+		filename = '/u/astrid-r1b/ModelData/RignotAntarcticaVelMosaic450m/vel_ant_5Apr2016.mat';
+	otherwise
+		error('hostname not supported yet');
+end
+
+%Figure out what subset of the matrix should be read
+load(filename,'x','y');
+velfile = matfile(filename);
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(x<=xmax);
+id1x=max(1,find(x>=xmin,1)-offset);
+id2x=min(numel(x),posx(end)+offset);
+
+if y(2)-y(1)<0
+	ymin=min(Y(:)); ymax=max(Y(:));
+	posy=find(y>=ymin);
+	id1y=max(1,find(y<=ymax,1)-offset);
+	id2y=min(numel(y),posy(end)+offset);
+else
+	ymin=min(X(:)); ymax=max(X(:));
+	posy=find(y<=ymax);
+	id1y=max(1,find(y>=ymin,1)-offset);
+	id2y=min(numel(y),posy(end)+offset);
+end
+
+vx = velfile.vx(id1y:id2y,id1x:id2x);
+vy = velfile.vy(id1y:id2y,id1x:id2x);
+x = x(id1x:id2x);
+y = y(id1y:id2y);
+
+vxout = InterpFromGrid(x,y,double(vx),X,Y);
+vyout = InterpFromGrid(x,y,double(vy),X,Y);
+
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpMouginotAnt2017.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpMouginotAnt2017.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpMouginotAnt2017.m	(revision 28013)
@@ -0,0 +1,41 @@
+function [vxout vyout]= interpMouginotAnt2017(X,Y),
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		nc = '/home/ModelData/Antarctica/MouginotVel/vel_nsidc.CF16_2.nc';
+	case {'totten'}
+		nc = '/totten_1/ModelData/Antarctica/MouginotVel/vel_nsidc.CF16_2.nc';
+	otherwise
+		error('hostname not supported yet');
+end
+
+xdata = double(ncread(nc,'x'));
+ydata = double(ncread(nc,'y'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata>=ymin);
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+disp(['   -- Mouginot 2017: loading velocities']);
+vxdata = double(ncread(nc,'VX',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+vydata = double(ncread(nc,'VY',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+
+disp(['   -- Mouginot 2017: interpolating ']);
+vxout = InterpFromGrid(xdata,ydata,vxdata,double(X),double(Y));
+vyout = InterpFromGrid(xdata,ydata,vydata,double(X),double(Y));
+
+%return vel if only one output is requested
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpMouginotAnt2019.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpMouginotAnt2019.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpMouginotAnt2019.m	(revision 28013)
@@ -0,0 +1,41 @@
+function [vxout vyout]= interpMouginotAnt2019(X,Y),
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		nc = '/home/ModelData/Antarctica/MouginotVel/v_mix.v13Mar2019.nc';
+	case {'totten'}
+		nc = '/totten_1/ModelData/Antarctica/MouginotVel/v_mix.v8Jul2019.nc';
+	otherwise
+		error('hostname not supported yet');
+end
+
+xdata = double(ncread(nc,'x'));
+ydata = double(ncread(nc,'y'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata>=ymin);
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+disp(['   -- Mouginot 2019: loading velocities']);
+vxdata = double(ncread(nc,'VX',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+vydata = double(ncread(nc,'VY',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+
+disp(['   -- Mouginot 2019: interpolating ']);
+vxout = InterpFromGrid(xdata,ydata,vxdata,double(X),double(Y));
+vyout = InterpFromGrid(xdata,ydata,vydata,double(X),double(Y));
+
+%return vel if only one output is requested
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpMouginotAntTimeSeries1973to2018.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpMouginotAntTimeSeries1973to2018.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpMouginotAntTimeSeries1973to2018.m	(revision 28013)
@@ -0,0 +1,197 @@
+function [vxout vyout errxout erryout stdxout stdyout]= interpMouginotAntTimeSeries1973to2018(X,Y,T)
+%INTERPMOUGINOTANTTIMESERIES1973TO2018 - interpolate observed (time series) velocities 
+%
+%   Inputs
+%      X,Y: spatial (scatter) coordinates
+%      T: time (indexed by YEAR1 (YEAR2 is optional); see below) 
+%
+%   Outputs
+%      vxout,vyout: interpolated velocities at X,Y, for each time requested in T
+%
+%   Available time series:
+%
+%          YEAR1  YEAR2
+%    1     1973   1975
+%    2     1973   1984
+%    3     1973   1988
+%    4     1984   1988
+%    5     1986   1988
+%    6     1988   1990
+%    7     1991   1992
+%    8     1995   1996
+%    9     2000   2001
+%   10     2002   2003
+%   11     2003   2004
+%   12     2005   2006
+%   13     2006   2007
+%   14     2007   2008
+%   15     2008   2009
+%   16     2009   2010
+%   17     2010   2011
+%   18     2011   2012
+%   19     2012   2013
+%   20     2013   2014
+%   21     2014   2015
+%   22     2015   2016
+%   23     2016   2017
+%   24     2017   2018
+%
+%   Usage:
+%      T refers to YEAR1, but the user can also use YEAR2 (e.g., the "1973" case in YEAR1).
+%  
+%      Then, these codes generate the same results:
+%
+%      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000]);
+%      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986 1988; 1991 1992; 1995 1996; 2000 2001]);
+%
+%      Another examples:
+%      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1973 1975; 1973 1988; 1991 1992; 2011 2012]);
+%      [vel]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000]);
+%      [vxout vyout errxout erryout stdxout stdyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000]);
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		nc = '/home/ModelData/Antarctica/MouginotVel/ASE_TimeSeries_1973-2018.nc';
+	case {'totten'}
+		nc = '/totten_1/ModelData/Antarctica/MouginotVel/ASE_TimeSeries_1973-2018.nc';
+	otherwise
+		error('hostname not supported yet');
+end
+
+xdata = double(ncread(nc,'x'));
+ydata = double(ncread(nc,'y'));
+year1 = ncread(nc,'YEAR1');
+year2 = ncread(nc,'YEAR2');
+
+% get the positions related to T
+if nargin==3
+	% initial checks %{{{
+	if ~isvector(T)
+		error('Size of input T not supported!');
+	end
+	T=T(:);
+	if size(T,2)==1 & any(T(:,1)==1973),
+		disp(' ');
+		disp('   Found year=1973 in T (array). Please, specify the data series using a second index.');
+		disp('   Data available for 1973:');
+		disp('      1973   1975');
+		disp('      1973   1984');
+		disp('      1973   1988');
+		disp(' ');
+		disp('   Usage:');
+		disp('      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1973 1975; 1973 1988; 1991 1992; 2011 2012])');
+		disp(' ');
+		error('   Change input T before continuing.');
+	end %}}}
+	pos = [];
+	for i=1:size(T,1),
+		flag = (T(i,1)==year1);
+		if size(T,2)==2, % ok, check both indexes (year1 and year2)
+			flag = (T(i,1)==year1).*(T(i,2)==year2);
+		end
+		pos = [pos; find(flag)];
+	end
+	% check again {{{
+	if length(pos)~=size(T,1) | length(unique(pos))~=length(pos),
+		disp(' ');
+		disp('   Time resquested does not exist in data set or is repeated!');
+		disp('   Data resquested:');
+		for i=1:length(T(:,1)),
+			str = ['      ' int2str(T(i,1)) '   '];
+			if size(T,2)==2, % ok, check both indexes (year1 and year2)
+				str = [str int2str(T(i,2))];
+			end
+			disp(str);
+		end
+		disp(' ');
+		disp('   Data available (24 series):');
+		for i=1:length(year1),
+			str = ['      ' int2str(year1(i)) '   ' int2str(year2(i))];
+			disp(str);
+		end
+		disp(' ');
+		disp('   Usage:');
+		disp('      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000])');
+		disp('      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986 1988; 1991 1992; 1995 1996; 2000 2001])');
+		disp('      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1973 1975; 1973 1988; 1991 1992; 2011 2012])');
+		disp(' ');
+		error('   Change input T before continuing.');
+	end%}}}
+elseif nargin<3,
+	pos = 1:24; % all available data		
+else
+	error('nargin not supported yet!');
+end
+if nargout~=1 & nargout~=2 & nargout~=6
+	error('nargout not supported!');
+end
+
+
+% get the spatial positions
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata>=ymin);
+id1y=max(1,find(ydata<=ymax,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+disp(['   -- Mouginot Time Series 1973 to 2018: loading velocities']);
+vxdata = [];
+vydata = [];
+if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+	errxdata = [];
+	errydata = [];
+	stdxdata = [];
+	stdydata = [];
+end
+for i=1:length(pos), 
+	disp(['      step = ' int2str(i) '/' int2str(length(pos)) ', position = ' int2str(pos(i)) ', year = '  int2str(year1(pos(i))) ' - ' int2str(year2(pos(i)))]);
+	vx = double(ncread(nc,'VX',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+	vy = double(ncread(nc,'VY',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+	vxdata(:,:,i) = permute(vx,[2 1 3]);
+	vydata(:,:,i) = permute(vy,[2 1 3]);
+	if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+		errx = double(ncread(nc,'ERRX',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+		erry = double(ncread(nc,'ERRY',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));	
+		stdx = double(ncread(nc,'STDX',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+		stdy = double(ncread(nc,'STDY',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));	
+		errxdata(:,:,i) = permute(errx,[2 1 3]);
+		errydata(:,:,i) = permute(erry,[2 1 3]);
+		stdxdata(:,:,i) = permute(stdx,[2 1 3]);
+		stdydata(:,:,i) = permute(stdy,[2 1 3]);
+	end
+end
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+
+disp(['   -- Mouginot Time Series 1973 to 2018: interpolating']);
+vxout = [];
+vyout = [];
+if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+	errxout = [];
+	erryout = [];
+	stdxout = [];
+	stdyout = [];
+end
+for i=1:length(pos),
+	disp(['      step = ' int2str(i) '/' int2str(length(pos)) ', position = ' int2str(pos(i)) ', year = '  int2str(year1(pos(i))) ' - ' int2str(year2(pos(i)))]);
+	vxout = [vxout InterpFromGrid(xdata,ydata,vxdata(:,:,i),double(X),double(Y))];
+	vyout = [vyout InterpFromGrid(xdata,ydata,vydata(:,:,i),double(X),double(Y))];
+	if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+		errxout = [errxout InterpFromGrid(xdata,ydata,errxdata(:,:,i),double(X),double(Y))];
+		erryout = [erryout InterpFromGrid(xdata,ydata,errydata(:,:,i),double(X),double(Y))];
+		stdxout = [stdxout InterpFromGrid(xdata,ydata,stdxdata(:,:,i),double(X),double(Y))];
+		stdyout = [stdyout InterpFromGrid(xdata,ydata,stdydata(:,:,i),double(X),double(Y))];
+	end
+end
+
+%return vel if only one output is requested
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpPaolo2015.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpPaolo2015.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpPaolo2015.m	(revision 28013)
@@ -0,0 +1,193 @@
+function [dh_raw_out dh_fil_out T_out] = interpPaolo2015(X,Y,T,method)
+%INTERPPAOLO2015 - interpolate observed (time series) height change [m]
+%
+%   Time series are average height changes [m] with respect to 1994 every three months (72 time steps)
+%
+%   Inputs
+%      X,Y: spatial (scatter) coordinates
+%      T: time (see below the available years) 
+%      ATTENTION: it is assumed that X and Y come in Polar Stereographic Projection (Std Latitude: 71S Meridian: 0E)
+%
+%   Outputs
+%      dh_raw_out: interpolated raw height change at X,Y, for each time requested in T
+%      dh_fil_out: interpolated filtered height change at X,Y, for each time requested in T
+%      T_out: time related to dh_raw_out and dh_fil_out (see below)
+%
+%   Available time series:
+%
+% 		 1		 1994.038
+% 		 2		 1994.285
+% 		 3		 1994.534
+% 		 4		 1994.786
+% 		 5		 1995.038
+% 		 6		 1995.285
+% 		 7		 1995.534
+% 		 8		 1995.786
+% 		 9		 1996.038
+% 		10		 1996.287
+% 		11		 1996.536
+% 		12		 1996.787
+% 		13		 1997.038
+% 		14		 1997.285
+% 		15		 1997.534
+% 		16		 1997.786
+% 		17		 1998.038
+% 		18		 1998.285
+% 		19		 1998.534
+% 		20		 1998.786
+% 		21		 1999.038
+% 		22		 1999.285
+% 		23		 1999.534
+% 		24		 1999.786
+% 		25		 2000.038
+% 		26		 2000.287
+% 		27		 2000.536
+% 		28		 2000.787
+% 		29		 2001.038
+% 		30		 2001.285
+% 		31		 2001.534
+% 		32		 2001.786
+% 		33		 2002.038
+% 		34		 2002.285
+% 		35		 2002.534
+% 		36		 2002.786
+% 		37		 2003.038
+% 		38		 2003.285
+% 		39		 2003.534
+% 		40		 2003.786
+% 		41		 2004.038
+% 		42		 2004.287
+% 		43		 2004.536
+% 		44		 2004.787
+% 		45		 2005.038
+% 		46		 2005.285
+% 		47		 2005.534
+% 		48		 2005.786
+% 		49		 2006.038
+% 		50		 2006.285
+% 		51		 2006.534
+% 		52		 2006.786
+% 		53		 2007.038
+% 		54		 2007.285
+% 		55		 2007.534
+% 		56		 2007.786
+% 		57		 2008.038
+% 		58		 2008.287
+% 		59		 2008.536
+% 		60		 2008.787
+% 		61		 2009.038
+% 		62		 2009.285
+% 		63		 2009.534
+% 		64		 2009.786
+% 		65		 2010.038
+% 		66		 2010.285
+% 		67		 2010.534
+% 		68		 2010.786
+% 		69		 2011.038
+% 		70		 2011.285
+% 		71		 2011.534
+% 		72		 2011.786
+%
+%
+%   Usage:
+%      % Get data at specific time:
+%      % In this example, T_out = [2006.038; 2007.038; 2008.038].
+%      [dh_raw_out dh_fil_out T_out] = interpPaolo2015(md.mesh.x, md.mesh.y, [2006.038; 2007.038; 2008.038]);
+%
+% 		 % Get all data in the provided years:
+%      % In this example, T_out = [2006.038; 2006.285; 2006.534; 2006.786; 2007.038; 2007.285; 2007.534; 2007.786]. 
+%      [dh_raw dh_fil T_out] = interpPaolo2015(md.mesh.x, md.mesh.y, [2006; 2007]);
+%
+% 		 % Get all data set:
+%      % In this example, T_out = [1994.038; ... ; 2011.786]. (all available time)
+%      [dh_raw dh_fil T_out] = interpPaolo2015(md.mesh.x, md.mesh.y);
+%
+%
+%   Info from ice_shelf_dh_v1.h5:	
+%      The dataset is a rectangular grid (480 points in x, 80 points in y) with x- and y-axes being longitude and latitude, respectively.
+%      Longitude/latitude coordinates refer to the center of the grid cells.
+%      The grid has a resolution of lon x lat: 0.75 x 0.25 deg (~27 km at latitude -71).
+%
+%
+%   Data are (grids in HDF5, ice_shelf_dh_v1.h5):
+%      time         : time coordinate [year; 72 values at 3-month time step]
+%      lon          : x-coordinate [degrees east; range 0/360]
+%      lat          : y-coordinate [degrees north; range -82/-62]
+%      height_raw   : Raw time series of height change [m]     
+%      height_filt  : Filtered time series of height change [m]
+%      height_err   : 2-standard-error time series [m]
+%
+
+if nargin>4 | nargin<2,
+	error('nargin not supported yet!');
+end
+
+% read data
+switch (oshostname()),
+	case {'ronne'}
+		h5 = '/home/ModelData/Antarctica/Paolo2015/ice_shelf_dh_v1.h5';
+	otherwise
+		error('hostname not supported yet');
+end
+
+disp(['   -- Paolo''s Time Series 1994 to 2012: loading data set']);
+t_data = h5read(h5,'/time');
+lat_data = h5read(h5,'/lat');
+lon_data = h5read(h5,'/lon');
+dh_raw_data = h5read(h5,'/height_raw');
+dh_fil_data = h5read(h5,'/height_filt');
+
+% set interpolation method
+if nargin<4,
+	method = 'linear'; % default method
+end
+
+% get the positions related to T
+if nargin<3,
+	pos = 1:length(t_data); % all available data		
+else
+	% initial check %{{{
+	if size(T,2)>1 | size(T,1)<1 | size(T,2)<1,
+		error('Size of input T not supported!');
+	end 
+	if size(X,1)>1 & size(X,2)>1
+		error('Size of input X not supported! X and Y should be vectors');
+	end
+	%}}}
+	% Loop over T
+	pos = [];
+	epsilon = 5e-4;
+	for i=1:length(T),
+		% find specific time
+		flag = (T(i)-epsilon<t_data & T(i)+epsilon>t_data);
+		if ~any(flag), 
+			% ok, find the time related to the requested year
+			flag = (T(i)==floor(t_data));
+		end
+		if ~any(flag)
+			error(['requested time (' num2str(T(i)) ') not found in data set'])
+		end
+		pos = [pos; find(flag)];
+	end
+	% Check if there is repeated positions
+	posunique = unique(pos);
+	if length(posunique)~=length(pos),
+		disp('   WARNING: found repeated positions in requested time');
+	end
+end
+
+% convert x/y to lat/lon:
+[LAT, LON] = xy2ll(X,Y,-1); % attention: it is assumed that X and Y comes in Polar Stereographic Projection (Std Latitude: 71S Meridian: 0E)
+posLON = find(LON<0);
+LON(posLON) =360+LON(posLON);
+
+disp(['   -- Paolo''s Time Series 1994 to 2012: interpolating in Lat/Long grid']);
+dh_raw_out = [];
+dh_fil_out = [];
+for i=1:length(pos),
+	disp(['      step = ' int2str(i) '/' int2str(length(pos)) ', position = ' int2str(pos(i)) ', year = '  num2str(t_data(pos(i)))]);
+	dh_raw_out = [dh_raw_out InterpFromGrid(lat_data(1,:),lon_data(:,1),dh_raw_data(:,:,pos(i)),LAT,LON,method)];
+	dh_fil_out = [dh_fil_out InterpFromGrid(lat_data(1,:),lon_data(:,1),dh_fil_data(:,:,pos(i)),LAT,LON,method)];
+end
+
+T_out = t_data(pos);
Index: /issm/trunk/src/m/modeldata/interpRACMO1km.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpRACMO1km.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpRACMO1km.m	(revision 28013)
@@ -0,0 +1,37 @@
+function [output] = interpRACMO1km(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		rootname='/home/ModelData/Greenland/RACMO2_1km/SMB_MEAN1960-1989_150m.nc';
+	case {'totten'}
+		rootname='/totten_1/ModelData/Greenland/RACMO2_1km/SMB_MEAN1960-1989_150m.nc';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+xdata = double(ncread(rootname,'xaxis'));
+ydata = double(ncread(rootname,'yaxis'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata<=ymax);
+id1y=max(1,find(ydata>=ymin,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+if verbose, disp('   -- RACMO 1-km: reading smb'); end
+data  = double(ncread(rootname,'SMB',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+data(find(data==-9999))=NaN;
+
+if verbose, disp('   -- RACMO 1-km: interpolating (assuming rho_ice = 917 kg/m^3)'); end
+%converting from mm / yr water eq to m/yr ice eq
+data = data/1000 * 1000/917;
+output = InterpFromGrid(xdata,ydata,data,double(X),double(Y));
Index: /issm/trunk/src/m/modeldata/interpRACMOant.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpRACMOant.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpRACMOant.m	(revision 28013)
@@ -0,0 +1,18 @@
+function smb = interpRACMOant(x,y);
+
+switch oshostname(),
+	case {'ronne'}
+		smbfile = '/home/ModelData/Antarctica/RACMO2SMB/SMB_RACMO2.3_1979_2011.nc';
+	case {'totten'}
+		smbfile = '/totten_1/ModelData/Antarctica/RACMO2SMB/SMB_RACMO2.3_1979_2011.nc';
+	otherwise
+		error('machine not supported yet');
+end
+	LAT=ncread(smbfile,'lat2d')';
+	LON=ncread(smbfile,'lon2d')';
+	SMB=ncread(smbfile,'SMB')';
+	[X Y]=ll2xy(LAT,LON,-1,0,71);
+
+	disp('   -- RACMO2.3 1979 - 2011: interpolating (assuming rho_ice = 917 kg/m^3)');
+	rho_ice = 917;
+	smb = griddata(X,Y,SMB,x,y) / 917;
Index: /issm/trunk/src/m/modeldata/interpREMA.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpREMA.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpREMA.m	(revision 28013)
@@ -0,0 +1,64 @@
+function sout = interpREMA(X,Y),
+
+switch oshostname(),
+	case {'ronne'}
+		remapath='/home/ModelData/Antarctica/REMA/REMA_200m_dem_filled.tif';
+	case {'totten'}
+		remapath='/totten_1/ModelData/Antarctica/REMA/REMA_200m_dem_filled.tif';
+	otherwise
+		error('machine not supported yet');
+end
+
+usemap = 0;
+if license('test','map_toolbox')==0,
+	disp('WARNING: map toolbox not installed, trying house code');
+	usemap = 0;
+elseif license('checkout','map_toolbox')==0
+	disp('WARNING: map toolbox not available (checkout failed), trying house code');
+	usemap = 0;
+end
+
+if usemap,
+	[data,R] = geotiffread(remapath);
+	data=double(flipud(data));
+	xdata=R.XLimWorld(1):R.DeltaX:R.XLimWorld(2); xdata=xdata(:);
+	xdata =(xdata(1:end-1)+xdata(2:end))/2;
+	ydata=R.YLimWorld(2):R.DeltaY:R.YLimWorld(1); ydata=flipud(ydata(:));
+	ydata =(ydata(1:end-1)+ydata(2:end))/2;
+else
+
+	%Get image info
+	Tinfo = imfinfo(remapath);
+	N     = Tinfo.Width;
+	M     = Tinfo.Height;
+	dx    = Tinfo.ModelPixelScaleTag(1);
+	dy    = Tinfo.ModelPixelScaleTag(2);
+	minx  = Tinfo.ModelTiepointTag(4);
+	maxy  = Tinfo.ModelTiepointTag(5);
+
+	%Generate vectors
+	xdata = minx + dx/2 + ((0:N-1).*dx);
+	ydata = maxy - dy/2 - ((M  -1:-1:0).*dy);
+	ydata = fliplr(ydata);
+
+	%Get pixels we are interested in
+	offset=2;
+	xmin=min(X(:)); xmax=max(X(:));
+	posx=find(xdata<=xmax);
+	id1x=max(1,find(xdata>=xmin,1)-offset);
+	id2x=min(numel(xdata),posx(end)+offset);
+
+	ymin=min(Y(:)); ymax=max(Y(:));
+	posy=find(ydata>=ymin);
+	id1y=max(1,find(ydata<=ymax,1)-offset);
+	id2y=min(numel(ydata),posy(end)+offset);
+
+	data  = double(imread(remapath,'PixelRegion',{[id1y,id2y],[id1x,id2x]}));
+	xdata=xdata(id1x:id2x);
+	ydata=ydata(id1y:id2y);
+end
+
+%convert no coverage data
+data(find(data==-9999))=NaN;
+
+sout = InterpFromGrid(xdata,ydata,data,X,Y);
Index: /issm/trunk/src/m/modeldata/interpRTopo2.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpRTopo2.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpRTopo2.m	(revision 28013)
@@ -0,0 +1,61 @@
+function [output] = interpRTopo2(X,Y,varargin),
+%INTERPRTOPO2 - interp from RTOPO-2 onto X and Y
+%
+%   Usage:
+%      bed = interpRTopo2(X,Y,varargin),
+%
+%   varargin = 1 (Greenland), default
+%             -1 (Antarctica)
+
+switch oshostname(),
+	case {'ronne'}
+		rootname='/home/ModelData/Global/RTopo-2/RTopo-2.0.1_30sec_bedrock_topography.nc';
+	case {'totten'}
+		rootname='/totten_1/ModelData/Global/RTopo-2/RTopo-2.0.1_30sec_bedrock_topography.nc';
+	otherwise
+		error('machine not supported yet');
+end
+verbose = 1;
+
+if nargin==3,
+	hemisphere = varargin{1};
+else
+	hemisphere = +1;
+end
+if abs(hemisphere)~=1,
+	error('hemisphere should be +/-1');
+end
+
+if hemisphere==+1,
+	if verbose, disp('   -- RTopo-2: convert to lat/lon using Greenland projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+else
+	if verbose, disp('   -- RTopo-2: convert to lat/lon using Antarctica projection'); end
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),-1,0,71);
+end
+
+Y=reshape(LAT,size(X)); X=reshape(LON,size(X));
+
+xdata = double(ncread(rootname,'lon'));
+ydata = double(ncread(rootname,'lat'));
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(xdata<=xmax);
+id1x=max(1,find(xdata>=xmin,1)-offset);
+id2x=min(numel(xdata),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+posy=find(ydata<=ymax);
+id1y=max(1,find(ydata>=ymin,1)-offset);
+id2y=min(numel(ydata),posy(end)+offset);
+
+if verbose, disp('   -- RTopo-2: reading bed topography'); end
+data  = double(ncread(rootname,'bedrock_topography',[id1x id1y],[id2x-id1x+1 id2y-id1y+1],[1 1]))';
+xdata=xdata(id1x:id2x);
+ydata=ydata(id1y:id2y);
+data(find(data==-9999))=NaN;
+
+if verbose, disp('   -- RTopo-2: interpolating'); end
+output = InterpFromGrid(xdata,ydata,data,double(X),double(Y));
Index: /issm/trunk/src/m/modeldata/interpRignot2012.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpRignot2012.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpRignot2012.m	(revision 28013)
@@ -0,0 +1,36 @@
+function [vxout vyout]= interpRignot2012(X,Y),
+
+filename = '/totten_1/ModelData/Greenland/VelMouginot/RignotGreenland2012Vel.mat';
+
+
+%Figure out what subset of the matrix should be read
+load(filename,'x','y');
+velfile = matfile(filename);
+
+offset=2;
+
+xmin=min(X(:)); xmax=max(X(:));
+posx=find(x<=xmax);
+id1x=max(1,find(x>=xmin,1)-offset);
+id2x=min(numel(x),posx(end)+offset);
+
+ymin=min(Y(:)); ymax=max(Y(:));
+%posy=find(y>=ymin);
+%id1y=max(1,find(y<=ymax,1)-offset);
+%id2y=min(numel(y),posy(end)+offset);
+posy=find(y<=ymax);
+id1y=max(1,find(y>=ymin,1)-offset);
+id2y=min(numel(y),posy(end)+offset);
+
+vx = velfile.vx(id1y:id2y,id1x:id2x);
+vy = velfile.vy(id1y:id2y,id1x:id2x);
+x = x(id1x:id2x);
+y = y(id1y:id2y);
+
+%load(filename);
+vxout = InterpFromGrid(x,y,double(vx),X,Y);
+vyout = InterpFromGrid(x,y,double(vy),X,Y);
+
+if nargout==1,
+	vxout = sqrt(vxout.^2+vyout.^2);
+end
Index: /issm/trunk/src/m/modeldata/interpRignotIceShelfMelt.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpRignotIceShelfMelt.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpRignotIceShelfMelt.m	(revision 28013)
@@ -0,0 +1,31 @@
+function output = interpRignotIceShelfMelt(X,Y,string)
+%INTERPRIGNOTICESHELFMELT - interp melt rates from Rignot et al. 2013
+%
+%   Usage:
+%      output = interpRignotIceShelfMelt(X,Y)
+
+switch (oshostname())
+	case {'ronne'}
+		rignotmelt='/home/ModelData/Antarctica/RignotMeltingrate/Ant_MeltingRate.nc';
+	case {'totten'}
+		rignotmelt='/totten_1/ModelData/Antarctica/RignotMeltingrate/Ant_MeltingRate.nc';
+	case {'thwaites','murdo','astrid'}
+		rignotmelt=['/home/seroussi/Data/Ant_MeltingRate.nc'];
+	otherwise
+		error('hostname not supported yet');
+end
+
+if nargin==2,
+	string = 'melt_actual';
+end
+
+disp(['   -- Rignot Ice Shelf Melt: loading ' string]);
+xdata = double(ncread(rignotmelt,'xaxis'));
+ydata = double(ncread(rignotmelt,'yaxis'));
+
+disp(['   -- Rignot Ice Shelf Melt: loading' string]);
+data  = double(ncread(rignotmelt,string))';
+
+disp(['   -- Rignot Ice Shelf Melt: interpolating ' string]);
+output = InterpFromGrid(xdata,ydata,data,X(:),Y(:));
+output = reshape(output,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpSeaRISE.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpSeaRISE.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpSeaRISE.m	(revision 28013)
@@ -0,0 +1,90 @@
+function [dataout] = interpSeaRISE(X,Y,string,varargin),
+%INTERPSEARISE - interpolate SeaRISE data
+%
+%   Available data:
+%      1.  sealeveltimes
+%      2.  dhdt
+%      3.  surfvelmag
+%      4.  balvelmag
+%      5.  oisotopestimes
+%      6.  bheatflx
+%      7.  presprcp
+%      8.  sealevel_time_series
+%      9.  usrf
+%      10. mapping
+%      11. surfvely
+%      12. surfvelx
+%      13. topg
+%      14. landcover
+%      15. temp_time_series
+%      16. thk
+%      17. time
+%      18. oisotopes_time_series
+%      19. runoff
+%      20. smb
+%      21. airtemp2m
+%      22. surftemp
+%
+%   Hemisphere: +1 Greenland, -1 Antarctica
+%
+%   Usage:
+%      [dataout] = /totten_1/dmangini/trunk-jpl/src/m/modeldata/interpSeaRISE.m(X,Y,string,hemisphere)
+%
+%   Examples:
+%      md.basalforcings.geothermalflux  = interpSeaRISE(md.mesh.x,md.mesh.y,'bheatflx_shapiro',-1); 
+
+verbose=0;
+
+if nargin==3,
+	hemisphere = +1;
+else
+	hemisphere = varargin{1};
+end
+
+%read data
+switch (oshostname()),
+	case {'ronne'}
+		if hemisphere==1,
+			searisenc='/home/ModelData/SeaRISE/Greenland_5km_dev1.2.nc';
+		elseif hemisphere==-1,
+			searisenc='/home/ModelData/SeaRISE/Antarctica_5km_dev1.0.nc';
+		end
+	case {'thwaites','murdo','astrid'}
+		if hemisphere==1,
+			searisenc='/u/astrid-r1b/ModelData/SeaRISE/Greenland5km_v1.2/Greenland_5km_dev1.2.nc';
+		elseif hemisphere==-1,
+			searisenc='/u/astrid-r1b/ModelData/SeaRISE/Antarctica5km_shelves_v1.0/Antarctica_5km_dev1.0.nc';
+		end
+	case {'totten'}
+		if hemisphere==1,
+			searisenc='/totten_1/ModelData/SeaRISE/Greenland_5km_dev1.2.nc';
+		elseif hemisphere==-1,
+			searisenc='/totten_1/ModelData/SeaRISE/Antarctica_5km_dev1.0.nc';
+		end
+	otherwise
+		error('hostname not supported yet');
+end
+
+%convert coordinates to SeaRISE projection
+if verbose, disp('   -- SeaRISE: converting coordinates'); end
+if hemisphere==1,
+	[LAT,  LON  ] = xy2ll(double(X(:)),double(Y(:)),+1,45,70);
+	[xproj,yproj] = ll2xy(LAT,LON  ,+1,39,71);
+elseif hemisphere==-1,
+	xproj=X; yproj=Y;
+end
+
+if verbose, disp('   -- SeaRISE: loading coordinates'); end
+xdata = double(ncread(searisenc,'x1'));%*1000;
+ydata = double(ncread(searisenc,'y1'));%*1000;
+
+if verbose, disp(['   -- SeaRISE: loading ' string]); end
+data  = double(ncread(searisenc,string))';
+
+if verbose, disp(['   -- SeaRISE: interpolating ' string]); end
+if strcmpi(string,'LandMask');
+	dataout = InterpFromGrid(xdata,ydata,data,xproj,yproj,'nearest');
+else
+	dataout = InterpFromGrid(xdata,ydata,data,xproj,yproj);
+end
+dataout = reshape(dataout,size(X,1),size(X,2));
Index: /issm/trunk/src/m/modeldata/interpShepherd2019.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpShepherd2019.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpShepherd2019.m	(revision 28013)
@@ -0,0 +1,55 @@
+function dhdt=interpShepherd2019(X,Y,string,varargin)
+%INTERPSHEPHERD2019 - interpolate Shepherd2019 data
+%
+%   Available data:
+%      1.  dhdt_1992_1996
+%      2.  dhdt_1997_2001
+%      3.  dhdt_2002_2006
+%      4.  dhdt_2007_2011
+%      5.  dhdt_2012_2016
+%      6.  dhdt_1992_2017
+%      7.  uncert_1992_1996
+%      8.  uncert_1997_2001
+%      9.  uncert_2002_2006
+%      10.  uncert_2007_2011
+%      11.  uncert_2012_2016
+%      12.  uncert_1992_2017
+%
+%   Usage:
+%      [dataout] = interpShepherd2019(X,Y,'dhdt_1992_2017')
+
+options={'dhdt_1992_1996','dhdt_1997_2001','dhdt_2002_2006','dhdt_2007_2011','dhdt_2012_2016','dhdt_1992_2017',...
+			'uncert_1992_1996','uncert_1997_2001','uncert_2002_2006','uncert_2007_2011','uncert_2012_2016','uncert_1992_2017'};
+tf=strcmp(string,options);
+
+if ~any(tf)
+	disp('String not available!');
+   disp('The options are:');
+   disp(options);
+   error('String not available. See message above.');
+end
+
+switch oshostname(),
+   case {'ronne'}
+		nc='/home/ModelData/Antarctica/DHDTShepherd/antarctic_dhdt_5km_grid_1992_2017.nc';
+	case {'totten'}
+		nc='/totten_1/ModelData/Antarctica/CPOM_dhdt/antarctic_dhdt_5km_grid_1992_2017.nc';
+	case {'recruta'}
+		nc='/home/santos/ModelData/CPOM_dhdt_shepherd_2019/antarctic_dhdt_5km_grid_1992_2017.nc';
+	otherwise
+      error('machine not supported yet');
+end
+
+if nargin==3,
+   method='linear';% default
+else
+   method=varargin{1};
+end
+
+xdata=double(ncread(nc,'x'));
+ydata=double(ncread(nc,'y'));
+data=double(ncread(nc,string))';
+
+dhdt=InterpFromGrid(xdata,ydata,data,X,Y,method);
+
+end
Index: /issm/trunk/src/m/modeldata/interpSmith2020.m
===================================================================
--- /issm/trunk/src/m/modeldata/interpSmith2020.m	(revision 28013)
+++ /issm/trunk/src/m/modeldata/interpSmith2020.m	(revision 28013)
@@ -0,0 +1,51 @@
+function dhdt=interpSmith2020(X,Y,string,varargin)
+%INTERPSMITH2020 - interpolate Smith2020 data
+%
+%	Available data:
+%		Filtered mass-change maps, for display only, units of m(ice-equivalent)/yr:
+%		ais_floating_filt
+%		ais_grounded_filt
+%		gris_filt
+%
+%		Raw mass-change maps, suitable for generation of basin-by-basin mass-change estimates, units of m(ice-equivalent)/yr:
+%		ais_floating
+%		ais_grounded
+%		gris
+%
+%   Usage:
+%      [dataout] = interpSmith2020(X,Y,'ais_floating_filt')
+
+options={'ais_floating_filt','ais_grounded_filt','gris_filt','ais_floating','ais_grounded','gris'};
+tf=strcmp(string,options);
+
+if ~any(tf)
+	disp('String not available!');
+	disp('The options are:');
+	disp(options);
+	error('String not available. See message above.');
+end
+	
+switch oshostname(),
+	case {'ronne'}
+		if strcmp(string,'gris_filt') | strcmp(string,'gris')
+			path='/home/ModelData/Greenland/DHDTSmith/';
+		else
+			path='/home/ModelData/Antarctica/DHDTSmith/';
+		end
+	case {'totten'}
+		if strcmp(string,'gris_filt') | strcmp(string,'gris')
+			path='/totten_1/ModelData/Greenland/DHDTSmith/';
+		else
+			path='/totten_1/ModelData/Antarctica/DHDTSmith/';
+		end
+	case {'recruta'}
+		path='/home/santos/ModelData/ICESat1_ICESat2_mass_change/';
+	otherwise
+		error('machine not supported yet');
+end
+
+file=strcat(path,string,'.tif');
+
+dhdt=interpFromGeotiff(file,X,Y);
+
+end
Index: /issm/trunk/src/m/modules/CoordTransform.m
===================================================================
--- /issm/trunk/src/m/modules/CoordTransform.m	(revision 28012)
+++ /issm/trunk/src/m/modules/CoordTransform.m	(revision 28013)
@@ -16,4 +16,5 @@
 %      Greenland  = 'EPSG:3413' (polar stereographic 70N 45W)
 %      Antarctica = 'EPSG:3031' (polar stereographic 71S 0E)
+%      IBCAO      = 'EPSG:3996' (polar stereographic 75N 0E)
 %      
 %   ll2xy previous default equivalent (uses with Hugues Ellispoid S)
Index: /issm/trunk/src/m/modules/ExpToLevelSet.py
===================================================================
--- /issm/trunk/src/m/modules/ExpToLevelSet.py	(revision 28012)
+++ /issm/trunk/src/m/modules/ExpToLevelSet.py	(revision 28013)
@@ -4,5 +4,5 @@
 
 
-def ExpToLevelSet(x, y, contourname): #{{{
+def ExpToLevelSet(x, y, contourname):  # {{{
     """EXPTOLEVELSET - Determine levelset distance between a contour and a 
     cloud of points
@@ -35,3 +35,3 @@
 
     return distance
-#}}}
+# }}}
Index: /issm/trunk/src/m/modules/InterpFromMesh2d.py
===================================================================
--- /issm/trunk/src/m/modules/InterpFromMesh2d.py	(revision 28012)
+++ /issm/trunk/src/m/modules/InterpFromMesh2d.py	(revision 28013)
@@ -2,5 +2,5 @@
 
 
-def InterpFromMesh2d(*args): #{{{
+def InterpFromMesh2d(*args):  # {{{
     """INTERPFROMMESH2D
 
@@ -39,3 +39,3 @@
 
     return data_prime[0] # NOTE: Value returned from wrapper function is a tuple, the first element of which being the result we actually want
-#}}}
+# }}}
Index: /issm/trunk/src/m/modules/InterpFromMeshToMesh2d.py
===================================================================
--- /issm/trunk/src/m/modules/InterpFromMeshToMesh2d.py	(revision 28012)
+++ /issm/trunk/src/m/modules/InterpFromMeshToMesh2d.py	(revision 28013)
@@ -18,4 +18,6 @@
             default:    default value if point is outsite of triangulation (instead of linear interpolation)
 
+    All fields should be arrays and not lists as this would crash the code.
+
     Example:
         load('temperature.mat')
@@ -32,4 +34,3 @@
     # Call Python module
     data_interp = InterpFromMeshToMesh2d_python(*args)
-
     return data_interp[0] # NOTE: Value returned from wrapper function is a tuple, the first element of which being the result we actually want
Index: /issm/trunk/src/m/netcdf/README.txt
===================================================================
--- /issm/trunk/src/m/netcdf/README.txt	(revision 28013)
+++ /issm/trunk/src/m/netcdf/README.txt	(revision 28013)
@@ -0,0 +1,87 @@
+The write_netCDF and read_netCDF modules provide a convenient way to save and restore the state of a model class instance 
+in binary format via NetCDF4. This allows users to store the class state on disk and retrieve it later, facilitating seamless 
+transitions between Python and MATLAB environments.
+
+To save a model, call either write_netCDF.py or write_netCDF.m depending on whether your class is in matlab or python. 
+To read a saved model, call either read_netCDF.py or read_netCDF.m depending on what language you prefer to use the model in.
+If you would like to log the names and locations of variables being stored, add the argument verbose = True (verbose = true for matlab).
+
+Usage Instructions:
+
+    Python:
+        - Saving a model: 
+            from write_netCDF import write_netCDF
+
+            md = bamg(model(), foo.csv, .01)
+
+            write_netCDF(md, 'adress_to_save/../filename.nc')            
+
+        - Reading a model:
+            from read_netCDF import read_netCDF
+
+            md = read_netCDF('adress_to_file/../filename.nc')
+
+        Verbose examples:
+            write_netCDF(md, adress_to_save/../filename.nc, verbose = True)
+            md = read_netCDF(adress_to_file/../filename.nc, verbose = True)
+
+    MATLAB:
+        - Saving a model:
+
+            write_netCDF(md, adress_to_save/../filename.nc);
+
+        - Reading a model:
+
+            md = read_netCDF(adress_to_file/../filename.nc);
+
+        Verbose examples:
+            write_netCDF(md, adress_to_save/../filename.nc, verbose = true);
+	    
+          or:
+
+	    write_netCDF(md, adress_to_save/../filename.nc, verbose);
+            md = read_netCDF(adress_to_file/../filename.nc, verbose = true);
+
+Dependencies:
+    Python: 
+        - NumPy 
+        - NetCDF4 / NetCDF4.Dataset
+        - The model() class
+        - results.solution / results.solutionstep / results.resultsdakota
+        - inversion.inversion / inversion.m1qn3inversion / inversion.taoinversion
+
+    MATLAB: 
+        - The model() class
+        - inversion.inversion / inversion.m1qn3inversion / inversion.taoinversion
+
+
+Additional Information:
+
+There are currently datatypes that both write_netCDF and read_netCDF modules may not be able to handle. These datatypes might 
+include lists with multiple datatypes (ie, ['number', 1, 'letter', a, 'color', 'blue']), lists of dicts ect. 
+
+To add functionality for these additional cases, one must simply create a function to handle the case and call it using a 
+conditional case within the create_var() function. To read the data from the NetCDF4 file, add the case to the 
+copy_variable_data_to_new_model() function in read_netCDF so that the data can be added to a new model() instance.
+
+Known issues:
+
+Unlike Python, MATLAB doesn't utilize subclasses in its model class. This leads to a loss of certain subclass instances. 
+For instance, the results.solutionstep() class poses a known issue. In MATLAB, there's no direct equivalent. The fields in 
+'md.results' in MATLAB might correspond to instances of resultsdakota(), solution(), or solutionstep() in Python, but 
+because those classes don't exist in MATLAB, there is no way for python to know which instance it needs. 
+
+The current workaround, while not theoretically sound, involves searching for the class name string in MATLAB's 'results' 
+field names. For instance, 'md.results.TransientSolution' is recorded as a solution() class instance. However, problems arise 
+in cases like 'md.results.StressbalanceSolution', where the code notes a solution() instance, while in Python, it should be a 
+solutionstep() instance.
+
+So far, there have been no recorded problems swapping a solutionstep() instance for a solution() instance.
+
+Potential solutions are:
+
+    - Restructure both Python and MATLAB solve frameworks. In Python, when creating an md.results.<solutionstep()> instance, 
+    embed 'solutionstep' in the class instance name.
+        >> This solution is very involved, and would include the tedious modification of >5 files in total
+    - Create a hash table linking solutions with their corresponding 'md.results.<class>' for reference when saving models to 
+    the netCDF file. 
Index: /issm/trunk/src/m/netcdf/read_netCDF.m
===================================================================
--- /issm/trunk/src/m/netcdf/read_netCDF.m	(revision 28013)
+++ /issm/trunk/src/m/netcdf/read_netCDF.m	(revision 28013)
@@ -0,0 +1,526 @@
+%{
+Given a NetCDF4 file, this set of functions will perform the following:
+    1. Enter each group of the file.
+    2. For each variable in each group, update an empty model with the variable's data
+    3. Enter nested groups and repeat
+
+
+If the model you saved has subclass instances that are not in the standard model() class
+you can:
+    1. Copy lines 30-35, set the "results" string to the name of the subclass instance,
+    2. Copy and modify the make_results_subclasses() function to create the new subclass 
+        instances you need. 
+From there, the rest of this script will automatically create the new subclass 
+instance in the model you're writing to and store the data from the netcdf file there.
+%}
+
+
+function model_copy = read_netCDF(filename, varargin)
+    if nargin > 1
+        verbose = true;
+    else
+        verbose = false;
+    end
+    
+    if verbose
+        fprintf('NetCDF42C v1.1.14\n');
+    end
+    % make a model framework to fill that is in the scope of this file
+    model_copy = model();
+
+    % Check if path exists
+    if exist(filename, 'file')
+        if verbose
+            fprintf('Opening %s for reading\n', filename);
+        end
+
+        % Open the given netCDF4 file
+        NCData = netcdf.open(filename, 'NOWRITE');
+        % Remove masks from netCDF data for easy conversion: NOT WORKING
+        %netcdf.setMask(NCData, 'NC_NOFILL');
+
+        % see if results is in there, if it is we have to instantiate some classes
+        try
+            results_group_id = netcdf.inqNcid(NCData, "results");
+            model_copy = make_results_subclasses(model_copy, NCData, verbose);
+        catch
+        end % 'results' group doesn't exist 
+
+        % see if inversion is in there, if it is we may have to instantiate some classes
+        try
+            inversion_group_id = netcdf.inqNcid(NCData, "inversion");
+            model_copy = check_inversion_class(model_copy, NCData, verbose);
+        catch
+        end % 'inversion' group doesn't exist 
+        
+        % loop over first layer of groups in netcdf file
+        for group = netcdf.inqGrps(NCData)
+            group_id = netcdf.inqNcid(NCData, netcdf.inqGrpName(group));
+            %disp(netcdf.inqGrpNameFull(group_id))
+            % hand off first level to recursive search
+            model_copy = walk_nested_groups(group_id, model_copy, NCData, verbose);
+        end
+        
+        % Close the netCDF file
+        netcdf.close(NCData);
+        if verbose
+            disp('Model Successfully Copied')
+        end
+    else
+        fprintf('File %s does not exist.\n', filename);
+    end
+end
+
+
+function model_copy = make_results_subclasses(model_copy, NCData, verbose)
+    resultsGroup = netcdf.inqNcid(NCData, "results");
+    variables = netcdf.inqVarIDs(resultsGroup);
+    for name = variables
+        class_instance = netcdf.inqVar(resultsGroup, name);
+        class_instance_names_raw = netcdf.getVar(resultsGroup, name, 'char').';
+        class_instance_names = cellstr(class_instance_names_raw);
+        for index = 1:numel(class_instance_names)
+            class_instance_name = class_instance_names{index};
+            model_copy.results = setfield(model_copy.results, class_instance_name, struct());
+        end
+        %model_copy.results = setfield(model_copy.results, class_instance, class_instance_name);
+    end
+    model_copy = model_copy;
+    if verbose
+        disp('Successfully recreated results structs:')
+        for fieldname = string(fieldnames(model_copy.results))
+            disp(fieldname)
+        end
+    end
+end
+
+
+function model_copy = check_inversion_class(model_copy, NCData, verbose)
+    % get the name of the inversion class: either inversion or m1qn3inversion or taoinversion
+    inversionGroup = netcdf.inqNcid(NCData, "inversion");
+    varid = netcdf.inqVarID(inversionGroup, 'inversion_class_name');
+    inversion_class = convertCharsToStrings(netcdf.getVar(inversionGroup, varid,'char'));
+    if strcmp(inversion_class, 'm1qn3inversion')
+        model_copy.inversion = m1qn3inversion();
+        if verbose
+            disp('Successfully created inversion class instance: m1qn3inversion')
+        end
+    elseif strcmp(inversion_class, 'taoinversion')
+        model_copy.inversion = taoinversion();
+        if verbose
+            disp('Successfully created inversion class instance: taoinversion')
+        end
+    else
+        if verbose
+            disp('No inversion class was found')
+        end
+    end
+    model_copy = model_copy;
+end
+
+
+function model_copy = walk_nested_groups(group_location_in_file, model_copy, NCData, verbose)  
+    % we search the current group level for variables by getting this struct
+    variables = netcdf.inqVarIDs(group_location_in_file); 
+
+    % from the variables struct get the info related to the variables
+    for variable = variables
+        [varname, xtype, dimids, numatts] = netcdf.inqVar(group_location_in_file, variable);
+        
+        % keep an eye out for nested structs:
+        if strcmp(varname, 'this_is_a_nested')
+            is_object = true;
+            model_copy = copy_nested_struct(group_location_in_file, model_copy, NCData, verbose);
+        elseif strcmp(varname, 'name_of_cell_array')
+            is_object = true;
+            model_copy = copy_cell_array_of_objects(variables, group_location_in_file, model_copy, NCData, verbose);
+        elseif strcmp(varname, 'solution')
+            % band-aid pass..
+        else
+            if logical(exist('is_object', 'var'))
+                % already handled
+            else
+                model_copy = copy_variable_data_to_new_model(group_location_in_file, varname, xtype, model_copy, NCData, verbose);
+            end
+        end
+    end
+
+    % try to find groups in current level, if it doesn't work it's because there is nothing there
+    %try
+    % if it's a nested struct the function copy_nested_struct has already been called
+    if logical(exist('is_object', 'var'))
+        % do nothing
+    else
+        % search for nested groups in the current level to feed back to this function
+        groups = netcdf.inqGrps(group_location_in_file);
+        if not(isempty(groups))
+            for group = groups
+                group_id = netcdf.inqNcid(group_location_in_file, netcdf.inqGrpName(group));
+                %disp(netcdf.inqGrpNameFull(group_id))
+                model_copy = walk_nested_groups(group, model_copy, NCData, verbose);
+            end
+        end
+    end
+    %catch % no nested groups here
+    %end
+end
+
+
+% to read cell arrays with objects: 
+function model_copy = copy_cell_array_of_objects(variables, group_location_in_file, model_copy, NCData, verbose);
+    %{
+        The structure in netcdf for groups with the name_of_cell_array variable is like:
+
+        group: 2x6_cell_array_of_objects {
+            name_of_cell_array = <name_of_cell_array>
+
+            group: Row_1_of_2 {
+                group: Col_1_of_6 {
+                    ... other groups can be here that refer to objects
+                } // group Col_6_of_6
+            } // group Row_1_of_2
+
+            group: Row_2_of_2 {
+                group: Col_1_of_6 {
+                    ... other groups can be here that refer to objects
+                } // group Col_6_of_6
+            } // group Row_2_of_2
+        } // group 2x6_cell_array_of_objects
+
+        We have to navigate this structure to extract all the data and recreate the 
+        original structure when the model was saved
+    %}
+
+    % get the name_of_cell_array, rows and cols vars
+    name_of_cell_array_varID = netcdf.inqVarID(group_location_in_file, 'name_of_cell_array');
+    rows_varID = netcdf.inqVarID(group_location_in_file, 'rows');
+    cols_varID = netcdf.inqVarID(group_location_in_file, 'cols');
+
+    name_of_cell_array = netcdf.getVar(group_location_in_file, name_of_cell_array_varID).'; % transpose
+    rows = netcdf.getVar(group_location_in_file, rows_varID);
+    cols = netcdf.getVar(group_location_in_file, cols_varID);
+
+    % now we work backwards: make the cell array, fill it in, and assign it to the model
+
+    % make the cell array
+    cell_array_placeholder = cell(rows, cols);
+
+    % get subgroups which are elements of the cell array
+    subgroups = netcdf.inqGrps(group_location_in_file); % numerical cell array with ID's of subgroups
+
+    % enter each subgroup, get the data, assign it to the corresponding index of cell array
+    if rows > 1
+        % we go over rows
+        % set index for cell array rows
+        row_idx = 1;
+        for row = subgroups
+            % now columns
+            columns = netcdf.inqGrps(group_location_in_file);
+            
+            % set index for cell array cols
+            col_idx = 1;
+            for column = columns
+                % now variables
+                current_column_varids = netcdf.inqVarIDs(column);
+
+                % if 'class_is_a' or 'this_is_a_nested' variables is present at this level we have to handle them accordingly
+                try
+                    class_is_aID = netcdf.inqVarID(column, 'class_is_a');
+                    col_data = deserialize_class(column, NCData, verbose);
+                    is_object = true;
+                catch
+                end
+                
+                try
+                    this_is_a_nestedID = netcdf.inqVarID(column, 'this_is_a_nested');
+                    % functionality not supported
+                    disp('Error: Cell Arrays of structs not yet supported!')
+                    % copy_nested_struct(column, model_copy, NCData, verbose)
+                    is_object = true;
+                catch
+                end
+
+                if logical(exist('is_object', 'var'))
+                    % already taken care of
+                else
+                    % store the variables as normal -- to be added later
+                    disp('Error: Cell Arrays of mixed objects not yet supported!')
+                    for var = current_column_varids
+                        % not supported
+                    end
+                end
+
+                cell_array_placeholder{row_idx, col_idx} = col_data;
+                col_idx = col_idx + 1;
+            end
+            row_idx = row_idx + 1;
+        end 
+    else
+        % set index for cell array
+        col_idx = 1;
+        for column = subgroups
+            % now variables
+            current_column_varids = netcdf.inqVarIDs(column);
+
+            % if 'class_is_a' or 'this_is_a_nested' variables is present at this level we have to handle them accordingly
+            try
+                classID = netcdf.inqVarID(column, 'class_is_a');
+                col_data = deserialize_class(classID, column, NCData, verbose);
+                is_object = true;
+            catch ME
+                rethrow(ME)
+            end
+            
+            try
+                this_is_a_nestedID = netcdf.inqVarID(column, 'this_is_a_nested');
+                % functionality not supported
+                disp('Error: Cell Arrays of structs not yet supported!')
+                % col_data = copy_nested_struct(column, model_copy, NCData, verbose);
+                is_object = true;
+            catch
+            end
+            if logical(exist('is_object', 'var'))
+                % already taken care of
+            else
+                % store the variables as normal -- to be added later
+                disp('Error: Cell Arrays of mixed objects not yet supported!')
+                for var = current_column_varids
+                    % col_data = not supported
+                end
+            end
+
+            cell_array_placeholder{col_idx} = col_data;
+            col_idx = col_idx + 1;
+
+        end 
+    end
+   
+
+    % Like in copy_nested_struct, we can only handle things 1 layer deep.
+    % assign cell array to model
+    address_to_attr_list = split(netcdf.inqGrpNameFull(group_location_in_file), '/');
+    address_to_attr = address_to_attr_list{2};
+    if isprop(model_copy.(address_to_attr), name_of_cell_array);
+        model_copy.(address_to_attr).(name_of_cell_array) = cell_array_placeholder;
+    else
+        model_copy = addprop(model_copy.(address_to_attr), name_of_cell_array, cell_array_placeholder);
+    end
+
+    if verbose
+        fprintf("Successfully loaded cell array %s to %s\n", name_of_cell_array,address_to_attr_list{2})
+    end
+end
+
+
+
+
+function output = deserialize_class(classID, group, NCData, verbose)
+    %{
+        This function will recreate a class
+    %}
+
+    % get the name of the class
+    name = netcdf.getVar(group, classID).';
+
+    % instantiate it
+    class_instance = eval([name, '()']);
+
+    % get and assign properties
+    subgroups = netcdf.inqGrps(group); % numerical cell array with ID's of subgroups
+
+    if numel(subgroups) == 1
+        % get properties
+        varIDs = netcdf.inqVarIDs(subgroups);
+        for varID = varIDs
+            % var metadata
+            [varname, xtype, dimids, numatts] = netcdf.inqVar(subgroups, varID);
+            % data
+            data = netcdf.getVar(subgroups, varID);
+
+            % netcdf uses Row Major Order but MATLAB uses Column Major Order so we need to transpose all arrays w/ more than 1 dim
+            if all(size(data)~=1) || xtype == 2
+                data = data.';
+            end
+
+            % some classes have permissions... so we skip those
+            try
+                % if property already exists, assign new value
+                if isprop(class_instance, varname)
+                    class_instance.(varname) = data;
+                else
+                    addprop(class_instance, varname, data);
+                end
+            catch
+            end
+        end
+    else
+        % not supported
+    end
+    output = class_instance;
+end
+
+
+function model_copy = copy_nested_struct(group_location_in_file, model_copy, NCData, verbose)
+    %{
+        A common multidimensional struct array is the 1xn md.results.TransientSolution struct. 
+        The process to recreate is as follows:
+            1. Get the name of the struct from group name
+            2. Get the fieldnames from the subgroups 
+            3. Recreate the struct with fieldnames 
+            4. Populate the fields with their respective values
+    %}
+
+    % step 1
+    name_of_struct = netcdf.inqGrpName(group_location_in_file);
+
+    % step 2
+    subgroups = netcdf.inqGrps(group_location_in_file); % numerical cell array with ID's of subgroups
+    % get single subgroup's data
+    single_subgroup_ID = subgroups(1);
+    subgroup_varids = netcdf.inqVarIDs(single_subgroup_ID);
+    fieldnames = {};
+    for variable = subgroup_varids
+        [varname, xtype, dimids, numatts] = netcdf.inqVar(single_subgroup_ID, variable);
+        fieldnames{end+1} = varname;
+    end
+
+    % step 3
+    address_in_model_raw = split(netcdf.inqGrpNameFull(group_location_in_file), '/');
+    address_in_model = address_in_model_raw{2};
+    
+    % we cannot assign a variable to represent this object as MATLAB treats all variables as copies
+    % and not pointers to the same memory address
+    % this means that if address_in_model has more than 1 layer, we need to modify the code. For now, 
+    % we just hope this will do. An example of a no-solution would be model().abc.def.ghi.field whereas we're only assuming model().abc.field now
+    
+    model_copy.(address_in_model).(name_of_struct) = struct();
+    % for every fieldname in the subgroup, create an empty field
+    for fieldname = string(fieldnames)
+        model_copy.(address_in_model).(name_of_struct).(fieldname) = {};
+    end
+
+    % use repmat to make the struct array multidimensional along the fields axis
+    number_of_dimensions = numel(subgroups);
+    model_copy.(address_in_model).(name_of_struct) = repmat(model_copy.(address_in_model).(name_of_struct), 1, number_of_dimensions);
+    
+    % step 4
+    % for every layer of the multidimensional struct array, populate the fields
+    for current_layer = 1:number_of_dimensions
+        % choose subgroup
+        current_layer_subgroup_ID = subgroups(current_layer);
+        % get all vars
+        current_layer_subgroup_varids = netcdf.inqVarIDs(current_layer_subgroup_ID);
+        % get individual vars and set fields at layer current_layer
+        for varid = current_layer_subgroup_varids
+            [varname, xtype, dimids, numatts] = netcdf.inqVar(current_layer_subgroup_ID, varid);
+            data = netcdf.getVar(current_layer_subgroup_ID, varid);
+
+            % netcdf uses Row Major Order but MATLAB uses Column Major Order so we need to transpose all arrays w/ more than 1 dim
+            if all(size(data)~=1) || xtype == 2
+                data = data.';
+            end
+            
+            % set the field
+            model_copy.(address_in_model).(name_of_struct)(current_layer).(varname) = data;
+            %address_to_struct_in_model = setfield(address_to_struct_in_model(current_layer), varname, data)
+        end
+        model_copy.(address_in_model).(name_of_struct)(current_layer);
+        if verbose
+            fprintf("Successfully loaded layer %s to multidimension struct array\n", num2str(current_layer))
+        end
+    end
+    model_copy = model_copy;
+    if verbose
+        fprintf('Successfully recreated multidimensional structure array %s in md.%s\n', name_of_struct, address_in_model)
+    end
+end
+
+
+
+
+%{
+Since there are two types of objects that MATLAB uses (classes and structs), we have to check 
+which object we're working with before we can set any fields/attributes of it. After this is completed,
+we can write the data to that location in the model.
+%}
+
+function model_copy = copy_variable_data_to_new_model(group_location_in_file, varname, xtype, model_copy, NCData, verbose)
+    %disp(varname)
+    % this is an inversion band-aid
+    if strcmp(varname, 'inversion_class_name') || strcmp(varname, 'name_of_struct') || strcmp(varname, 'solution')
+        % we don't need this
+    else
+        % putting try/catch here so that any errors generated while copying data are logged and not lost by the try/catch in walk_nested_groups function
+        try
+            %disp(netcdf.inqGrpNameFull(group_location_in_file))
+            %disp(class(netcdf.inqGrpNameFull(group_location_in_file)))
+            address_to_attr = strrep(netcdf.inqGrpNameFull(group_location_in_file), '/', '.');
+            varid = netcdf.inqVarID(group_location_in_file, varname);
+            data = netcdf.getVar(group_location_in_file, varid);
+            
+    
+            % if we have an empty string
+            if xtype == 2 && isempty(all(data))
+                data = cell(char());
+            % if we have an empty cell-char array
+            elseif numel(data) == 1 && xtype == 3 && data == -32767
+                data = cell(char());
+            elseif isempty(all(data))
+                data = []
+            end
+            % band-aid for some cell-char-arrays:
+            if xtype == 2 && strcmp(data, 'default')
+                data = {'default'};
+            end
+            
+            % netcdf uses Row Major Order but MATLAB uses Column Major Order so we need to transpose all arrays w/ more than 1 dim
+            if all(size(data)~=1) || xtype == 2
+                data = data.';
+            end
+    
+            % if we have a list of strings
+            if xtype == 2
+                try
+                    if strcmp(netcdf.getAtt(group_location_in_file, varid, "type_is"), 'cell_array_of_strings')
+                        data = cellstr(data);
+                    end
+                catch
+                    % no attr found so we pass
+                end
+            end
+            
+            % the issm c compiler does not work with int64 datatypes, so we need to convert those to int16
+            % reference this (very hard to find) link for netcdf4 datatypes: https://docs.unidata.ucar.edu/netcdf-c/current/netcdf_8h_source.html
+            %xtype
+            if xtype == 10
+                arg_to_eval = ['model_copy', address_to_attr, '.', varname, ' = ' , 'double(data);'];
+                eval(arg_to_eval);
+                %disp('Loaded int64 as int16')
+            else
+                arg_to_eval = ['model_copy', address_to_attr, '.', varname, ' = data;'];
+                eval(arg_to_eval);
+            end
+            
+            if verbose
+                full_addy = netcdf.inqGrpNameFull(group_location_in_file);
+                %disp(xtype)
+                %class(data)
+                fprintf('Successfully loaded %s to %s\n', varname, full_addy);
+            end
+
+        catch ME %ME is an MException struct
+            % Some error occurred if you get here.
+            fprintf(1,'There was an error with %s! \n', varname)
+            errorMessage = sprintf('Error in function %s() at line %d.\n\nError Message:\n%s', ME.stack.name, ME.stack.line, ME.message);
+            fprintf(1, '%s\n', errorMessage);
+            uiwait(warndlg(errorMessage));
+            %line = ME.stack.line
+            %fprintf(1,'There was an error with %s! \n', varname)
+            %fprintf('The message was:\n%s\n',ME.message);
+            %fprintf(1,'The identifier was:\n%s\n',ME.identifier);
+            
+            % more error handling...
+        end
+    end
+    model_copy = model_copy;
+end
Index: /issm/trunk/src/m/netcdf/read_netCDF.py
===================================================================
--- /issm/trunk/src/m/netcdf/read_netCDF.py	(revision 28013)
+++ /issm/trunk/src/m/netcdf/read_netCDF.py	(revision 28013)
@@ -0,0 +1,501 @@
+# imports
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+from os import path, remove
+from model import *
+import re
+from results import *
+from m1qn3inversion import m1qn3inversion
+from taoinversion import taoinversion
+from collections import OrderedDict
+import sys
+from massfluxatgate import massfluxatgate
+
+
+
+'''
+Given a NetCDF4 file, this set of functions will perform the following:
+    1. Enter each group of the file.
+    2. For each variable in each group, update an empty model with the variable's data
+    3. Enter nested groups and repeat
+'''
+
+
+# make a model framework to fill that is in the scope of this file
+model_copy = model()
+
+def read_netCDF(filename, verbose = False):
+    if verbose:
+        print('NetCDF42C v1.2.0')
+
+    '''
+    filename = path and name to save file under
+    verbose = T/F = show or muted log statements. Naturally muted
+    '''
+
+    # this is a precaution so that data is not lost
+    try:
+        # check if path exists
+        if path.exists(filename):
+            if verbose:
+                print('Opening {} for reading'.format(filename))
+            else: pass
+    
+            # open the given netCDF4 file
+            NCData = Dataset(filename, 'r')
+            # remove masks from numpy arrays for easy conversion
+            NCData.set_auto_mask(False)
+        else:
+            return 'The file you entered does not exist or cannot be found in the current directory'
+        
+        # in order to handle some subclasses in the results class, we have to utilize this band-aid
+        # there will likely be more band-aids added unless a class name library is created with all class names that might be added to a md
+        try:
+            # if results has meaningful data, save the name of the subclass and class instance
+            NCData.groups['results']
+            make_results_subclasses(NCData, verbose)
+        except:
+            pass
+    
+        # similarly, we need to check and see if we have an m1qn3inversion class instance
+        try:
+            NCData.groups['inversion']
+            check_inversion_class(NCData, verbose)
+        except:
+            pass
+        
+        # walk through each group looking for subgroups and variables
+        for group in NCData.groups.keys():
+            if 'debris' in group:
+                pass
+            else:
+                # have to send a custom name to this function: filename.groups['group']
+                name = "NCData.groups['" + str(group) + "']"
+                walk_nested_groups(name, NCData, verbose)
+        
+        if verbose:
+            print("Model Successfully Loaded.")
+            
+        NCData.close()
+        
+        return model_copy
+
+    # just in case something unexpected happens
+    except Exception as e:
+        if 'NCData' in locals():
+            NCData.close()
+        raise e
+
+def make_results_subclasses(NCData, verbose = False):
+    '''
+        There are 3 possible subclasses: solution, solutionstep, resultsdakota.
+        In the NetCDF file these are saved as a list of strings. Ie, say there are 2
+        instances of solution under results, StressbalanceSolution and TransientSolution. 
+        In the NetCDF file we would see solution = "StressbalanceSolution", "TransientSolution"
+        To deconstruct this, we need to iteratively assign md.results.StressbalanceSolution = solution()
+        and md.results.TransientSolution = solution() and whatever else.
+    '''
+    # start with the subclasses
+    for subclass in NCData.groups['results'].variables.keys():
+        class_instance = subclass + '()'
+
+        # now handle the instances
+        for instance in NCData.groups['results'].variables[subclass][:]:
+            # this is an ndarray of numpy bytes_ that we have to convert to strings
+            class_instance_name = instance.tobytes().decode('utf-8').strip()
+            # from here we can make new subclasses named as they were in the model saved
+            setattr(model_copy.results, class_instance_name, eval(class_instance))
+            if verbose:
+                print(f'Successfully created results subclass instance {class_instance} named {class_instance_name}.')
+
+
+def check_inversion_class(NCData, verbose = False):
+    # get the name of the inversion class: either inversion or m1qn3inversion or taoinversion
+    inversion_class_is = NCData.groups['inversion'].variables['inversion_class_name'][:][...].tobytes().decode()
+    if inversion_class_is == 'm1qn3inversion':
+        # if it is m1qn3inversion we need to instantiate that class since it's not native to model()
+        model_copy.inversion = m1qn3inversion(model_copy.inversion)
+        if verbose:
+            print('Conversion successful')
+    elif inversion_class_is == 'taoinversion':
+        # if it is taoinversion we need to instantiate that class since it's not native to model()
+        model_copy.inversion = taoinverion()
+        if verbose:
+            print('Conversion successful')
+    else: pass
+
+
+def walk_nested_groups(group_location_in_file, NCData, verbose = False):
+    # first, we enter the group by: filename.groups['group_name']
+    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
+    # at this step we check for multidimensional structure arrays/ arrays of objects and filter them out
+    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
+    # if a nested groups exist, repeat all
+
+    for variable in eval(group_location_in_file + '.variables.keys()'):
+        if 'is_object' not in locals():
+            if variable == 'this_is_a_nested' and 'results' in group_location_in_file and 'qmu' not in group_location_in_file:
+                # have to do some string deconstruction to get the name of the class instance/last group from 'NetCDF.groups['group1'].groups['group1.1']'
+                pattern = r"\['(.*?)'\]"
+                matches = re.findall(pattern, group_location_in_file)
+                name_of_struct = matches[-1] #eval(group_location_in_file + ".variables['solution']") 
+                deserialize_nested_results_struct(group_location_in_file, name_of_struct, NCData)
+                is_object = True
+    
+            elif variable == 'name_of_cell_array':
+                # reconstruct an array of elements
+                deserialize_array_of_objects(group_location_in_file, model_copy, NCData, verbose)
+                is_object = True
+    
+            elif variable == 'this_is_a_nested' and 'qmu' in group_location_in_file:
+                if verbose:
+                    print('encountered qmu structure that is not yet supported.')
+                else: pass
+                    
+                is_object = True
+        
+            else:
+                location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
+                # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
+                # Define the regex pattern to match the groups within brackets
+                pattern = r"\['(.*?)'\]"
+                # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
+                matches = re.findall(pattern, location_of_variable_in_file)
+                variable_name = matches[-1]
+                location_of_variable_in_model = '.'.join(matches[:-1])
+                deserialize_data(location_of_variable_in_file, location_of_variable_in_model, variable_name, NCData, verbose=verbose)
+
+    # if one of the variables above was an object, further subclasses will be taken care of when reconstructing it
+    if 'is_object' in locals():
+        pass
+    else:
+        for nested_group in eval(group_location_in_file + '.groups.keys()'):
+            new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
+            walk_nested_groups(new_nested_group, NCData, verbose=verbose)
+
+
+
+'''
+    MATLAB has Multidimensional Structure Arrays in 2 known classes: results and qmu.
+    The python classes results.py and qmu.py emulate this MATLAB object in their own
+    unique ways. The functions in this script will assign data to either of these 
+    classes such that the final structure is compatible with its parent class.
+'''
+
+def deserialize_nested_results_struct(group_location_in_file, name_of_struct, NCData, verbose = False):
+    '''
+    A common multidimensional array is the 1xn md.results.TransientSolution object.
+
+    The way that this object emulates the MATLAB mutli-dim. struct. array is with 
+    the solution().steps attr. which is a list of solutionstep() instances
+        The process to recreate is as follows:
+            1. Get instance of solution() with solution variable (the instance is made in make_results_subclasses)
+            2. For each subgroup, create a solutionstep() class instance
+             2a. Populate the instance with the key:value pairs
+             2b. Append the instance to the solution().steps list
+    '''
+    # step 1
+    class_instance_name = name_of_struct
+    
+    # for some reason steps is not already a list
+    setattr(model_copy.results.__dict__[class_instance_name], 'steps', list())
+
+    steps = model_copy.results.__dict__[class_instance_name].steps
+    
+    # step 2
+    layer = 1
+    for subgroup in eval(group_location_in_file + ".groups.keys()"):
+        solutionstep_instance = solutionstep()
+        # step 2a
+        subgroup_location_in_file = group_location_in_file + ".groups['" + subgroup + "']"
+        for key in eval(subgroup_location_in_file + ".variables.keys()"):
+            value = eval(subgroup_location_in_file + ".variables['" + str(key) + "'][:]")
+            setattr(solutionstep_instance, key, value)
+        # step 2b
+        steps.append(solutionstep_instance)
+        if verbose:
+            print('Succesfully loaded layer ' + str(layer) + ' to results.' + str(class_instance_name) + ' struct.')
+        else: pass
+        layer += 1
+
+    if verbose:
+        print('Successfully recreated results structure ' + str(class_instance_name))
+
+
+
+def deserialize_array_of_objects(group_location_in_file, model_copy, NCData, verbose):
+    '''
+        The structure in netcdf for groups with the name_of_cell_array variable is like:
+
+        group: 2x6_cell_array_of_objects {
+            name_of_cell_array = <name_of_cell_array>
+
+            group: Row_1_of_2 {
+                group: Col_1_of_6 {
+                    ... other groups can be here that refer to objects
+                } // group Col_6_of_6
+            } // group Row_1_of_2
+
+            group: Row_2_of_2 {
+                group: Col_1_of_6 {
+                    ... other groups can be here that refer to objects
+                } // group Col_6_of_6
+            } // group Row_2_of_2
+        } // group 2x6_cell_array_of_objects
+
+        We have to navigate this structure to extract all the data and recreate the 
+        original structure when the model was saved
+    '''
+
+    if verbose: 
+        print(f"Loading array of objects.")
+
+    # get the name_of_cell_array, rows and cols vars
+    name_of_cell_array_varID = eval(group_location_in_file + ".variables['name_of_cell_array']")
+    rows_varID = eval(group_location_in_file + ".variables['rows']")
+    cols_varID = eval(group_location_in_file + ".variables['cols']")
+
+    name_of_cell_array = name_of_cell_array_varID[:][...].tobytes().decode()
+    rows = rows_varID[:]
+    cols = cols_varID[:]
+
+    # now we work backwards: make the array, fill it in, and assign it to the model
+
+    # make the array
+    array = list()
+
+    subgroups = eval(group_location_in_file + ".groups") #.keys()")
+
+    # enter each subgroup, get the data, assign it to the corresponding index of cell array
+    if rows > 1:
+        # we go over rows
+        # set index for rows
+        row_idx = 0
+        for row in list(subgroups):
+            # make list for each row
+            current_row = list()
+            columns = subgroups[str(row)].groups.keys()
+
+            # set index for columns
+            col_idx = 0
+
+            # iterate over columns
+            for col in list(columns):
+                # now get the variables 
+                current_col_vars = columns.groups[str(col)].variables
+
+                # check for special datastructures                
+                if "class_is_a" in current_col_vars:
+                    class_name = subgroups[str(col)].variables['class_is_a'][:][...].tobytes().decode()
+                    col_data = deserialize_class_instance(class_name, columns.groups[str(col)], NCData, verbose)
+                    is_object = True
+                elif "this_is_a_nested" in current_col_vars:
+                    # functionality not yet supported
+                    print('Error: Cell Arrays of structs not yet supported!')
+                    is_object = True
+                else:
+                    if 'is_object_' in locals():
+                        pass
+                        # already taken care of
+                    else:
+                        # store the variables as normal -- to be added later
+                        print('Error: Arrays of mixed objects not yet supported!')
+                        for var in current_col_vars:
+                            # this is where that functionality would be handled
+                            pass
+                col_idx += 1
+                # add the entry to our row list
+                current_row.append(col_data)
+
+            # add the list of columns to the array
+            array.append(current_row)
+            row_idx += 1
+
+    else:
+        # set index for columns
+        col_idx = 0
+
+        # iterate over columns
+        for col in list(subgroups):
+            # now get the variables 
+            current_col_vars = subgroups[str(col)].variables
+            
+            # check for special datastructures
+            if "class_is_a" in current_col_vars:
+                class_name = subgroups[str(col)].variables['class_is_a'][:][...].tobytes().decode()
+                col_data = deserialize_class_instance(class_name, subgroups[str(col)], NCData, verbose)
+                is_object = True
+            elif "this_is_a_nested" in current_col_vars:
+                # functionality not yet supported
+                print('Error: Cell Arrays of structs not yet supported!')
+                is_object = True
+            else:
+                if 'is_object_' in locals():
+                    pass
+                    # already taken care of
+                else:
+                    # store the variables as normal -- to be added later
+                    print('Error: Arrays of mixed objects not yet supported!')
+                    for var in current_col_vars:
+                        # this is where that functionality would be handled
+                        pass
+            col_idx += 1
+            # add the list of columns to the array
+            array.append(col_data)
+
+    # finally, add the attribute to the model
+    pattern = r"\['(.*?)'\]"
+    matches = re.findall(pattern, group_location_in_file)
+    variable_name = matches[0]
+    setattr(model_copy.__dict__[variable_name], name_of_cell_array, array)
+
+    if verbose:
+        print(f"Successfully loaded array of objects: {name_of_cell_array} to {variable_name}")
+
+
+
+def deserialize_class_instance(class_name, group, NCData, verbose=False):
+
+    if verbose:
+        print(f"Loading class: {class_name}")
+
+    # this function requires the class module to be imported into the namespace of this file.
+    # we make a custom error in case the class module is not in the list of imported classes.
+    # most ISSM classes are imported by from <name> import <name>
+    class ModuleError(Exception):
+        pass
+    
+    if class_name not in sys.modules:
+        raise ModuleError(str('Model requires the following class to be imported from a module: ' + class_name + ". Please add the import to read_netCDF.py in order to continue."))
+
+    # Instantiate the class
+    class_instance = eval(class_name + "()")
+
+    # Get and assign properties
+    subgroups = list(group.groups.keys())
+
+    if len(subgroups) == 1:
+        # Get properties
+        subgroup = group[subgroups[0]]
+        varIDs = subgroup.variables.keys()
+        for varname in varIDs:
+            # Variable metadata
+            var = subgroup[varname]
+
+            # Data
+            if 'char' in var.dimensions[0]:
+                data = var[:][...].tobytes().decode()
+            else:
+                data = var[:]
+
+            # Some classes may have permissions, so we skip those
+            try:
+                setattr(class_instance, varname, data)
+            except:
+                pass
+    else:
+        # Not supported
+        pass
+
+    if verbose: 
+        print(f"Successfully loaded class instance {class_name} to model")
+    return class_instance
+
+
+
+def deserialize_data(location_of_variable_in_file, location_of_variable_in_model, variable_name, NCData, verbose = False):
+    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
+    # NetCDF4 has a property called "_FillValue" that sometimes saves empty lists, so we have to catch those
+    FillValue = -9223372036854775806
+    try:
+        # results band-aid...
+        if str(location_of_variable_in_model + '.' + variable_name) in ['results.solutionstep', 'results.solution', 'results.resultsdakota']:
+            pass
+        # qmu band-aid
+        elif 'qmu.statistics.method' in str(location_of_variable_in_model + '.' + variable_name):
+            pass
+        # handle any strings:
+        elif 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
+            setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:][...].tobytes().decode()'))
+        # handle ndarrays + lists
+        elif len(eval(location_of_variable_in_file + '[:]'))>1:
+            # check for bool
+            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
+                if eval(location_of_variable_in_file + '.units') == 'bool':
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool))
+                else:
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+            except:
+                setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+        # catch everything else
+        else:
+            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
+            try:
+                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
+                var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
+                if FillValue == var_to_save:
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, [])
+                else:
+                    if var_to_save.is_integer():
+                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, int(var_to_save))
+                    else:
+                        # we have to convert numpy datatypes to native python types with .item()
+                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, var_to_save.item())
+            except:
+                setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+    except AttributeError:
+        deserialize_dict(location_of_variable_in_file, location_of_variable_in_model, NCData, verbose=verbose)
+
+    if verbose:
+        print('Successfully loaded ' + location_of_variable_in_model + '.' + variable_name + ' into model.')
+
+
+
+def deserialize_dict(location_of_variable_in_file, location_of_variable_in_model, NCData, verbose = False):
+    FillValue = -9223372036854775806
+
+    # the key will be the last item in the location
+    key = ''.join(location_of_variable_in_model.split('.')[-1])
+
+    # update the location to point to the dict instead of the dict key
+    location_of_variable_in_model = '.'.join(location_of_variable_in_model.split('.')[:-1])
+
+    # verify we're working with a dict:
+    if isinstance(eval('model_copy.' + location_of_variable_in_model), OrderedDict):
+        dict_object = eval('model_copy.' + location_of_variable_in_model)
+        
+        # handle any strings:
+        if 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
+            data = eval(location_of_variable_in_file + '[:][...].tobytes().decode()')
+            dict_object.update({key: data})
+            
+        # handle ndarrays + lists
+        elif len(eval(location_of_variable_in_file + '[:]'))>1:
+            # check for bool
+            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
+                if eval(location_of_variable_in_file + '.units') == 'bool':
+                    data = np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool)
+                    dict_object.update({key: data})
+                else:
+                    data = eval(location_of_variable_in_file + '[:]')
+                    dict_object.update({key: data})
+            except:
+                data = eval(location_of_variable_in_file + '[:]')
+                dict_object.update({key: data})
+        # catch everything else
+        else:
+            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
+            try:
+                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
+                if FillValue == eval(location_of_variable_in_file + '[:][0]'):
+                    dict_object.update({key: []})
+                else:
+                    # we have to convert numpy datatypes to native python types with .item()
+                    var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
+                    dict_object.update({key:  var_to_save.item()})
+            except:
+                data = eval(location_of_variable_in_file + '[:]')
+                dict_object.update({key: data})
+    else:
+        print(f"Unrecognized object was saved to NetCDF file and cannot be reconstructed: {location_of_variable_in_model}")
Index: /issm/trunk/src/m/netcdf/write_netCDF.m
===================================================================
--- /issm/trunk/src/m/netcdf/write_netCDF.m	(revision 28013)
+++ /issm/trunk/src/m/netcdf/write_netCDF.m	(revision 28013)
@@ -0,0 +1,768 @@
+%{
+Given a model, this set of functions will perform the following:
+    1. Enter each nested class of the model.
+    2. View each attribute of each nested class.
+    3. Compare state of attribute in the model to an empty model class.
+    4. If states are identical, pass.
+    5. Otherwise, create nested groups named after class structure.
+    6. Create variable named after class attribute and assign value to it.
+%}
+
+
+function write_netCDF(model_var, filename, varargin)
+    if nargin > 2
+        verbose = true;
+    else
+        verbose = false;
+    end
+    if verbose
+        disp('MATLAB C2NetCDF4 v1.1.14');
+    end
+    
+    % model_var = class object to be saved
+    % filename = path and name to save file under
+    
+    % Create a NetCDF file to write to
+    NetCDF = make_NetCDF(filename, verbose);
+    
+    % Create an instance of an empty model class to compare model_var against
+    empty_model = model();
+
+    % Walk through the model_var class and compare subclass states to empty_model
+    walk_through_model(model_var, empty_model, NetCDF, verbose);
+
+    % in order to handle some subclasses in the results class, we have to utilize this band-aid
+    % there will likely be more band-aids added unless a class name library is created with all class names that might be added to a model
+    try
+        % if results had meaningful data, save the name of the subclass and class instance
+        netcdf.inqNcid(NetCDF,'results');
+        results_subclasses_bandaid(model_var, NetCDF, verbose);
+        % otherwise, ignore
+    catch
+    end
+    
+    netcdf.close(NetCDF);
+    if verbose
+        disp('Model successfully saved as NetCDF4');
+    end
+end
+
+
+
+function NetCDF = make_NetCDF(filename, verbose)
+    % matlab can't handle input in the jupyter interface, so we just yell at the user to rename
+    % their file if needed
+    % If file already exists delete / rename it
+    if exist(filename, 'file') == 2
+        fprintf('File %s already exists\n', filename);
+        disp('Please rename your file.')
+        return
+    
+        % If so, inquire for a new name or to delete the existing file
+        %newname = input('Give a new name or input "delete" to replace: ', 's');
+
+        %if strcmpi(newname, 'delete')
+            %delete filename;
+        %else
+            %fprintf('New file name is %s\n', newname);
+            %filename = newname;
+        %end
+    else
+        % Otherwise create the file and define it globally so other functions can call it
+        
+        NetCDF = netcdf.create(filename, 'NETCDF4');
+        netcdf.putAtt(NetCDF, netcdf.getConstant('NC_GLOBAL'), 'history', ['Created ', datestr(now)]);
+        netcdf.defDim(NetCDF, 'Unlim', netcdf.getConstant('NC_UNLIMITED')); % unlimited dimension
+        netcdf.defDim(NetCDF, 'float', 1);     % single integer dimension
+        netcdf.defDim(NetCDF, 'int', 1);       % single float dimension
+
+        if verbose
+            fprintf('Successfully created %s\n', filename);
+        end
+
+        return 
+    end
+end
+
+
+%{
+    Since python uses subclass instances and MATLAB uses fields, we need to guess which subclass instance python will need
+    given the name of the sub-field in MATLAB. We make this guess based on the name of the MATLAB subfield that will contain
+    the name of the python subclass instance. For example, md.results.StressbalanceSolution is an subfield in MATLAB,
+    but a class instance of solution(). Notice that StressbalanceSolution contains the name "Solution" in it. This is what
+    we will save to the netCDF file for python to pick up.
+%}
+
+function results_subclasses_bandaid(model_var, NetCDF, verbose)
+    
+    % The results class may have nested fields within it, so we need to record the name of 
+    % the nested field as it appears in the model that we're trying to save
+    quality_control = {};
+    
+    % Access the results subclass of model_var
+    results_var = model_var.results;
+
+    % get the results group id so we can write to it
+    groupid = netcdf.inqNcid(NetCDF,'results');
+    
+    % Loop through each class instance in results
+    class_instance_names = fieldnames(results_var);
+
+    % we save lists of instances to the netcdf
+    solutions = {};
+    solutionsteps = {};
+    resultsdakotas = {};
+    
+    for i = 1:numel(class_instance_names)
+        class_instance_name = class_instance_names{i};
+        % there are often mutliple instances of the same class/struct so we have to number them
+        % Check to see if there is a solutionstep class instance
+        if contains(class_instance_name, 'solutionstep',IgnoreCase=true)
+            quality_control{end+1} = 1;
+            solutionsteps{end+1} = class_instance_name;
+            if verbose
+                disp('Successfully stored class python subclass instance: solutionstep')
+            end
+        end
+        
+        % Check to see if there is a solution class instance
+        if contains(class_instance_name, 'solution',IgnoreCase=true)
+            quality_control{end+1} = 1;
+            solutions{end+1} = class_instance_name;
+            if verbose
+                disp('Successfully stored class python subclass instance: solution')
+            end
+        end
+        
+        % Check to see if there is a resultsdakota class instance
+        if contains(class_instance_name, 'resultsdakota',IgnoreCase=true)
+            quality_control{end+1} = 1;
+            resultsdakotas{end+1} = class_instance_name;
+            if verbose
+                disp('Successfully stored class python subclass instance: resultsdakota')
+            end
+        end
+    end
+    if ~isempty(solutionsteps)
+        write_cell_with_strings('solutionstep', solutionsteps, groupid, NetCDF, verbose)
+    end
+    if ~isempty(solutions)
+        write_cell_with_strings('solution', solutions, groupid, NetCDF, verbose)
+    end
+    if ~isempty(resultsdakotas)
+        write_cell_with_strings('resultsdakota', resultsdakotas, groupid, NetCDF, verbose)
+    end
+    
+    
+
+    % Check if all class instances were processed correctly
+    if numel(quality_control) ~= numel(class_instance_names)
+        disp('Error: The class instance within your model.results class is not currently supported by this application');
+    else
+        if verbose
+            disp('The results class was successfully stored on disk');
+        end
+    end
+end
+
+
+
+function walk_through_model(model_var, empty_model, NetCDF, verbose)
+    % Iterate over first layer of model_var attributes and assume this first layer is only classes fundamental to the model() class
+    % note that groups are the same as class instances/subfields in this context
+    groups = fieldnames(model_var);
+    for group = 1:numel(groups)
+        % now this new variable takes the form model.mesh , model.damage etc.
+        model_subclass = model_var.(groups{group});
+        empty_model_subclass = empty_model.(groups{group});
+        % Now we can recursively walk through the remaining subclasses
+        list_of_layers = {groups{group}};
+        walk_through_subclasses(model_subclass, empty_model_subclass, list_of_layers, empty_model, NetCDF, verbose);
+    end
+end
+        
+
+function walk_through_subclasses(model_subclass, empty_model_subclass, given_list_of_layers, empty_model, NetCDF, verbose)
+    % Recursivley iterate over each subclass' attributes and look for more subclasses and variables with relevant data
+    % model_subclass is an object (ie, md.mesh.elements)
+    % list_of_layers is a cell array of subclasses/attributes/fields so that we can copy the structure into netcdf (ie, {'mesh', 'elements'})
+    % need to check if inversion or m1qn3inversion or taoinversion class
+    if numel(given_list_of_layers) == 1
+        if strcmp(given_list_of_layers{1}, 'inversion')
+            create_group(model_subclass, given_list_of_layers, NetCDF, verbose);
+            check_inversion_class(model_subclass, NetCDF, verbose);
+        end
+    end
+    
+    % Use try/except since model_subclass is either a subclass/struct w/ props/fields or it's not, no unknown exceptions
+    try 
+        % look for children - this is where the catch would be called
+        children = fieldnames(model_subclass);
+
+        % if there are children, loop through them and see if we need to save any data
+        for child = 1:numel(children)
+            % record our current location
+            list_of_layers = given_list_of_layers;
+            current_child = children{child};
+            list_of_layers{end+1} = current_child;
+        
+            % this is the value of the current location in the model (ie, md.mesh.elements)
+            location_of_child = model_subclass.(current_child);
+            
+            % if the empty model does not have this attribute, it's because it's new so we save it to netcdf
+            % there are 2 cases: the location is a struct, the location is a class
+            if isstruct(model_subclass)
+                % if the current field is a nested struct assume it has valuable data that needs to be saved
+                if isstruct(location_of_child) && any(size(location_of_child) > 1)
+                    create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                
+                % this would mean that the layer above the layer we're interested in is a struct, so
+                % we can navigate our empty model as such
+                elseif isfield(empty_model_subclass, current_child)
+                    % the layer we're interested in does exist, we just need to compare states
+                    location_of_child_in_empty_model = empty_model_subclass.(current_child);
+
+                    % if the current attribute is a numerical array assume it has valuable data that needs to be saved
+                    if isnumeric(location_of_child) && logical(numel(location_of_child) > 1)
+                        create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                    % if the attributes are identical we don't need to save anything
+                    elseif (all(isnan(location_of_child)) && all(isnan(location_of_child_in_empty_model))) || isempty(setxor(location_of_child, location_of_child_in_empty_model))
+                        walk_through_subclasses(location_of_child, location_of_child_in_empty_model, list_of_layers, empty_model, NetCDF, verbose);
+                    % if the attributes are not the same we need to save ours
+                    else
+                        % THE ORDER OF THESE LINES IS CRITICAL
+                        walk_through_subclasses(location_of_child, location_of_child_in_empty_model, list_of_layers, empty_model, NetCDF, verbose);
+                        create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                    end
+                % this would mean that the layer we're interested in is not fundamental to the model architecture
+                % and thus needs to be saved to the netcdf
+                else
+                    walk_through_subclasses(location_of_child, empty_model_subclass, list_of_layers, empty_model, NetCDF, verbose);
+                    create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                end
+            % this would mean it's not a struct, and must be a class/subclass
+            % we now check the state of the class property
+            else 
+                try
+                    if isprop(empty_model_subclass, current_child)
+                        % the layer we're interested in does exist, we just need to compare states
+                        location_of_child_in_empty_model = empty_model_subclass.(current_child);
+                        % if the current attribute is a numerical array assume it has valuable data that needs to be saved
+                        if isnumeric(location_of_child) && logical(numel(location_of_child) > 1)
+                            create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                        
+                        elseif iscell(location_of_child)
+                            % if the attributes are identical we don't need to save anything
+                            if isempty(setxor(location_of_child, location_of_child_in_empty_model))
+                                % pass
+                            else
+                            % otherwise we need to save
+                                walk_through_subclasses(location_of_child, empty_model_subclass, list_of_layers, empty_model, NetCDF, verbose);
+                                create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                            end
+                        elseif (all(isnan(location_of_child)) && all(isnan(location_of_child_in_empty_model)))
+                            walk_through_subclasses(location_of_child, location_of_child_in_empty_model, list_of_layers, empty_model, NetCDF, verbose);
+                        % if the attributes are not the same we need to save ours
+                        else
+                            % THE ORDER OF THESE LINES IS CRITICAL
+                            walk_through_subclasses(location_of_child, location_of_child_in_empty_model, list_of_layers, empty_model, NetCDF, verbose);
+                            create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                        end
+                    else
+                        walk_through_subclasses(location_of_child, empty_model_subclass, list_of_layers, empty_model, NetCDF, verbose);
+                        create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                    end
+                catch
+                    walk_through_subclasses(location_of_child, empty_model_subclass, list_of_layers, empty_model, NetCDF, verbose);
+                    create_group(location_of_child, list_of_layers, NetCDF, verbose);
+                end
+            end
+        end
+    catch ME
+        % If the caught error is a fieldname error, it's just saying that a variable has no fields and thus can be ignored
+        if strcmp(ME.identifier, 'MATLAB:fieldnames:InvalidInput')
+            % do nothing
+        % this is if we come accross instances/subfields in our model that are not fundamental to the model class (ie, taoinversion)
+        elseif strcmp(ME.identifier, 'MATLAB:UndefinedFunction')
+            walk_through_subclasses(location_of_child, empty_model_subclass, given_list_of_layers, empty_model, NetCDF, verbose);
+            create_group(location_of_child, list_of_layers, NetCDF, verbose);
+        % If it's a different error, rethrow it to MATLAB's default error handling
+        else
+            disp(ME.identifier)
+            disp(given_list_of_layers)
+            rethrow(ME);
+        end
+    end
+end 
+        
+
+function create_group(location_of_child, list_of_layers, NetCDF, verbose)
+    %disp(list_of_layers)
+    % location_of_child is an object
+    % list_of_layers is a list like {'inversion', 'StressbalanceSolution','cost_functions_coefficients'}
+    % first we make the group at the highest level (ie, inversion)
+    group_name = list_of_layers{1};
+    variable_name = list_of_layers{end};
+    
+    % if the group is already made, get it's ID instead of creating it again
+    try % group hasn't been made
+        group = netcdf.defGrp(NetCDF, group_name);
+    catch % group was already made
+        group = netcdf.inqNcid(NetCDF, group_name);    
+    end
+
+    % if the data is nested, create nested groups to match class structure
+    if numel(list_of_layers) > 2
+        % the string() method is really important here since matlab apparently can't handle the infinite complexity of a string without the string method.
+        for name = string(list_of_layers(2:end-1))
+            % the group levels may have already been made
+            try % group hasn't been made
+                group = netcdf.defGrp(group, name);
+            catch % group was already made
+                group = netcdf.inqNcid(group, name);
+            end
+        end
+    end
+    % sometimes objects are passed through twice so we account for that with this try/catch
+    try
+        % we may be dealing with an object
+        % first we screen for structs
+        if isstruct(location_of_child) % && any(size(location_of_child) > 1) -- this is being tested
+            % we have a struct
+            copy_nested_struct(variable_name, location_of_child, group, NetCDF, verbose);
+        
+        % now for cell arrays of datastructures:
+        elseif logical(~isstruct(location_of_child) && iscell(location_of_child) && isobject(location_of_child{1}))
+            copy_cell_array_of_objects(variable_name, location_of_child, group, NetCDF, verbose);
+        else
+            if ~isobject(location_of_child) && ~isstruct(location_of_child)
+                % we're dealing with raw data
+                create_var(variable_name, location_of_child, group, NetCDF, verbose);
+            end
+        end
+    catch
+        % do nothing
+    end
+end
+
+
+
+function copy_cell_array_of_objects(variable_name, address_of_child, group, NetCDF, verbose)
+    % make subgroup to represent the array
+    [rows, cols] = size(address_of_child);
+    name_of_subgroup = [num2str(rows), 'x', num2str(cols), '_cell_array_of_objects'];
+    subgroup = netcdf.defGrp(group, name_of_subgroup);
+
+    % save the name of the cell array
+    write_string_to_netcdf('name_of_cell_array', variable_name, subgroup, NetCDF, verbose);
+
+    % save the dimensions of the cell array
+    create_var('rows', rows, subgroup, NetCDF, verbose);
+    create_var('cols', cols, subgroup, NetCDF, verbose);
+
+    % if this is a multidimensional cell array, iterate over rows here and cols in copy_objects
+    if rows>1
+        for row = 1:rows
+            % make a subgroup for each row
+            name_of_subgroup = ['Row_', num2str(row), '_of_', num2str(rows)];
+            subgroup = netcdf.defGrp(group, name_of_subgroup);
+            copy_objects(address_of_child, subgroup, NetCDF, cols, verbose);
+        end
+    else
+        copy_objects(address_of_child, subgroup, NetCDF, cols, verbose);
+    end
+end
+
+
+
+function copy_objects(address_of_child, group, NetCDF, cols, verbose)
+    for col = 1:cols
+        % make subgroup to contain each col of array
+        name_of_subgroup = ['Col_', num2str(col), '_of_', num2str(cols)];
+        subgroup = netcdf.defGrp(group, name_of_subgroup);
+
+        % get the kind of object we're working with:
+        if isstruct(address_of_child{col})
+            % handle structs
+            name_raw = fields(address_of_child{col});
+            variable_name = name_raw{1};
+            copy_nested_struct(variable_name, address_of_child, subgroup, NetCDF, verbose);
+            
+        elseif numel(properties(address_of_child{col})) > 0
+            % handle class instances
+            copy_class_instance(address_of_child{col}, subgroup, NetCDF, verbose);
+        else
+            disp('ERROR: Cell arrays of mixed types are not yet supported in read_netCDF!\n Deserialization will not be able to complete!')
+            % handle regular datastructures that are already supported
+            name_raw = fields(address_of_child);
+            variable_name = name_raw{col};
+            create_var(variable_name, address_of_child, subgroup, NetCDF, verbose);
+        end
+    end
+end
+
+
+function copy_class_instance(address_of_child, subgroup, NetCDF, verbose)
+    % get parent class name
+    name = class(address_of_child);
+
+    % save the name of the class
+    write_string_to_netcdf('class_is_a', name, subgroup, NetCDF, verbose);
+    
+    % make subgroup to contain properties
+    name_of_subgroup = ['Properties_of_', name];
+    subgroup = netcdf.defGrp(subgroup, name_of_subgroup);
+
+    % get properties
+    props = properties(address_of_child);
+
+    for property = 1:length(props)
+        variable_name = props{property};
+        create_var(variable_name, address_of_child.(variable_name), subgroup, NetCDF, verbose);
+    end
+
+end
+
+
+function copy_nested_struct(parent_struct_name, address_of_struct, group, NetCDF, verbose)
+    %{
+        This function takes a struct of structs and saves them to netcdf. 
+
+        It also works with single structs.
+
+        To do this, we get the number of dimensions (substructs) of the parent struct.
+        Next, we iterate through each substruct and record the data. 
+        For each substruct, we create a subgroup of the main struct.
+        For each variable, we create dimensions that are assigned to each subgroup uniquely.
+    %}
+
+    % make a new subgroup to contain all the others:
+    group = netcdf.defGrp(group, parent_struct_name);
+    
+    % make sure other systems can flag the nested struct type
+    dimID = netcdf.defDim(group, 'struct', 6);
+    string_var = netcdf.defVar(group, 'this_is_a_nested', "NC_CHAR", dimID);
+    uint_method=uint8('struct').';
+    method_ID = char(uint_method);
+    netcdf.putVar(group, string_var, method_ID);
+
+    % other systems know the name of the parent struct because it's covered by the results/qmu functions above
+    
+    % 'a' will always be 1 and is not useful to us
+    [a, no_of_dims] = size(address_of_struct);
+
+    for substruct = 1:no_of_dims
+        % we start by making subgroups with nice names like "TransientSolution_substruct_44"
+        name_of_subgroup = ['1x', num2str(substruct)];
+        subgroup = netcdf.defGrp(group, name_of_subgroup);
+
+        % do some housekeeping to keep track of the current layer
+        current_substruct = address_of_struct(substruct);
+        substruct_fields = fieldnames(current_substruct)'; % transpose because matlab only interates over n x 1 arrays
+        
+        % now we need to iterate over each variable of the nested struct and save it to this new subgroup
+        for variable_name = string(substruct_fields)
+            address_of_child = current_substruct.(variable_name);
+            create_var(variable_name, address_of_child, subgroup, NetCDF, verbose);
+        end
+    end
+    if verbose
+        fprintf(["Succesfully transferred nested MATLAB struct ",  parent_struct_name, " to the NetCDF\n"])
+    end
+end
+
+
+
+% ironically inversion does not have the same problem as results as inversion subfields
+% are actually subclasses and not fields
+function check_inversion_class(model_var, NetCDF, verbose)
+    
+    % Define a persistent variable to ensure this function is only run once
+    persistent executed;
+    % Check if the function has already been executed
+    if isempty(executed)
+        if verbose
+            disp('Deconstructing Inversion class instance')
+        end
+        % Need to make sure that we have the right inversion class: inversion, m1qn3inversion, taoinversion
+        groupid = netcdf.inqNcid(NetCDF,'inversion');
+
+        if isa(model_var, 'm1qn3inversion')
+            write_string_to_netcdf('inversion_class_name', 'm1qn3inversion', groupid, NetCDF, verbose);
+            if verbose
+                disp('Successfully saved inversion class instance m1qn3inversion')
+            end
+        elseif isa(model_var, 'taoinversion')
+            write_string_to_netcdf('inversion_class_name', 'taoinversion', groupid, NetCDF, verbose);
+            if verbose 
+                disp('Successfully saved inversion class instance taoinversion')
+            end
+        else
+            write_string_to_netcdf('inversion_class_name', 'inversion', groupid, NetCDF,  verbose);
+            if verbose
+                disp('Successfully saved inversion class instance inversion')
+            end
+        end
+        % Set the persistent variable to indicate that the function has been executed
+        executed = true;
+    end
+end
+
+
+function create_var(variable_name, address_of_child, group, NetCDF, verbose)
+    % There are lots of different variable types that we need to handle from the model class
+    
+    % get the dimensions we'll need
+    intdim = netcdf.inqDimID(NetCDF,'int');
+    floatdim = netcdf.inqDimID(NetCDF,'float');
+    unlimdim = netcdf.inqDimID(NetCDF,'Unlim');
+    
+    % This first conditional statement will catch numeric arrays (matrices) of any dimension and save them
+    if any(size(address_of_child)>1) && ~iscellstr(address_of_child) && ~ischar(address_of_child)
+        write_numeric_array_to_netcdf(variable_name, address_of_child, group, NetCDF, verbose);
+
+    % check if it's a string
+    elseif ischar(address_of_child)
+        write_string_to_netcdf(variable_name, address_of_child, group, NetCDF, verbose);
+
+    % or an empty variable
+    elseif isempty(address_of_child)
+        variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", intdim);
+
+    % or a list of strings
+    elseif iscellstr(address_of_child) || iscell(address_of_child) && ischar(address_of_child{1})
+        write_cell_with_strings(variable_name, address_of_child, group, NetCDF, verbose)
+        
+    % or an empty list
+    elseif iscell(address_of_child) && isempty(address_of_child) || isa(address_of_child, 'double') && isempty(address_of_child)
+        variable = netcdf.defVar(group, variable_name, "NC_INT", intdim);
+        netcdf.putVar(group,variable, -32767);
+
+    % or a bool
+    elseif islogical(address_of_child)
+        % netcdf4 can't handle bool types like true/false so we convert all to int 1/0 and add an attribute named units with value 'bool'
+        variable = netcdf.defVar(group, variable_name, 'NC_SHORT', intdim);
+        netcdf.putVar(group,variable,int8(address_of_child));
+        % make sure other systems can flag the bool type
+        netcdf.putAtt(group,variable,'units','bool');
+
+    % or a regular list
+    elseif iscell(address_of_child)
+        disp('made list w/ unlim dim')
+        variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", unlimdim);
+        netcdf.putVar(group,variable,address_of_child);
+        
+    % or a float
+    elseif isfloat(address_of_child) && numel(address_of_child) == 1
+        variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", floatdim);
+        netcdf.putVar(group,variable,address_of_child);
+        
+    % or a int
+    elseif mod(address_of_child,1) == 0 || isinteger(address_of_child) && numel(address_of_child) == 1
+        variable = netcdf.defVar(group, variable_name, "NC_SHORT", intdim);
+        netcdf.putVar(group,variable,address_of_child);
+
+    % anything else... (will likely need to add more cases; ie dict)
+    else
+        try
+            variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", unlimdim);
+            netcdf.putVar(group,variable,address_of_child);
+        catch ME
+            disp(ME.message);
+            disp(['Datatype given: ', class(address_of_child)]);
+        end
+    end
+    if verbose
+        fprintf('Successfully transferred data from %s to the NetCDF\n', variable_name);
+    end
+end
+
+
+function write_cell_with_strings(variable_name, address_of_child, group, NetCDF, verbose)
+    %{
+    Write cell array (ie {'one' 'two' 'three'}) to netcdf
+    %}
+    
+    if isempty(address_of_child)
+        % if the char array is empty, save an empty char
+        name_of_dimension = ['char', num2str(0)];
+        try
+            dimID = netcdf.defDim(group, name_of_dimension, 0);
+        catch
+            dimID = netcdf.inqDimID(group, name_of_dimension);
+        end
+        % Now we can make a variable in this dimension:
+        string_var = netcdf.defVar(group, variable_name, "NC_CHAR", [dimID]);
+        % we leave empty now
+    else
+        % covert data to char array
+        method_ID = char(address_of_child);
+    
+        % make dimensions
+        [rows, cols] = size(method_ID);
+        
+        IDDim1 = netcdf.defDim(group,'cols',cols);
+        IDDim2 = netcdf.defDim(group,'rows',rows);
+    
+        % create the variable slot
+        IDVarId = netcdf.defVar(group,variable_name,'NC_CHAR', [IDDim1 IDDim2]);
+    
+        % save the variable
+        netcdf.putVar(group, IDVarId, method_ID'); %transpose
+    
+        % tell other platforms that this is a cell of strings
+        netcdf.putAtt(group, IDVarId, 'type_is','cell_array_of_strings');
+    end
+end
+
+
+function write_string_to_netcdf(variable_name, address_of_child, group, NetCDF, verbose)
+    % netcdf and strings don't get along.. we have to do it 'custom':
+
+    the_string_to_save = address_of_child;
+
+    if isempty(the_string_to_save)
+        % if the char array is empty, save an empty char
+        name_of_dimension = ['char', num2str(0)];
+        try
+            dimID = netcdf.defDim(group, name_of_dimension, 0);
+        catch
+            dimID = netcdf.inqDimID(group, name_of_dimension);
+        end
+        % Now we can make a variable in this dimension:
+        string_var = netcdf.defVar(group, variable_name, "NC_CHAR", [dimID]);
+        % we leave empty now
+    else
+        % convert string to 
+        uint_method=uint8(the_string_to_save).';
+        method_ID = char(uint_method);
+        length_of_the_string = numel(method_ID);
+        
+        % Convert the string to character data using string array
+        %str_out = char(the_string_to_save)
+    
+        % Determine the length of the string
+        %length_of_the_string = numel(str_out)
+    
+        % Check if the dimension already exists, and if not, create it
+        name_of_dimension = ['char', num2str(length_of_the_string)];
+        try
+            dimID = netcdf.defDim(group, name_of_dimension, length_of_the_string);
+        catch
+            dimID = netcdf.inqDimID(group, name_of_dimension);
+        end
+        % Now we can make a variable in this dimension:
+        string_var = netcdf.defVar(group, variable_name, "NC_CHAR", [dimID]);
+        % Finally, we can write the variable (always transpose for matlab):
+        netcdf.putVar(group, string_var, method_ID);
+    end
+
+    if verbose
+        disp(['Successfully transferred data from ', variable_name, ' to the NetCDF']);
+    end
+end
+
+
+function write_numeric_array_to_netcdf(variable_name, address_of_child, group, NetCDF, verbose)
+
+    % get the dimensions we'll need
+    intdim = netcdf.inqDimID(NetCDF,'int');
+    floatdim = netcdf.inqDimID(NetCDF,'float');
+    unlimdim = netcdf.inqDimID(NetCDF,'Unlim');
+    
+    typeis = class(address_of_child);
+    
+    if isa(typeis, 'logical')
+            % because matlab transposes all data into and out of netcdf and because we want cross-platform-compat
+            % we need to transpose data before it goes into netcdf
+            data = address_of_child.';
+
+            % make the dimensions
+            dimensions = [];
+            for dimension = size(data)
+                dim_name = ['dim',int2str(dimension)];
+                % if the dimension already exists we can't have a duplicate
+                try
+                    dimID = netcdf.defDim(group, dim_name, dimension);
+                catch
+                    dimID = netcdf.inqDimID(group, dim_name);
+                end
+                % record the dimension for the variable
+                dimensions(end+1) = dimID;
+            end
+    
+            % write the variable
+            netcdf.putVar(group,variable,data);
+
+            % make sure other systems can flag the bool type
+            netcdf.putAtt(group,variable,'units','bool');
+            
+    % handle all other datatypes here
+    else
+        % sometimes an array has just 1 element in it, we account for those cases here:
+        if numel(address_of_child) == 1
+            if isinteger(address_of_child)
+                variable = netcdf.defVar(group, variable_name, "NC_SHORT", intdim);
+                netcdf.putVar(group,variable,address_of_child);
+            elseif isa(address_of_child, 'double') || isa(address_of_child, 'float')
+                variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", floatdim);
+                netcdf.putVar(group,variable,address_of_child);
+            else 
+                disp('Encountered single datatype that was not float64 or int64, saving under unlimited dimension, may cause errors.')
+                variable = netcdf.defVar(group, variable_name, "NC_DOUBLE", unlimdim);
+                netcdf.putVar(group,variable,address_of_child);
+            end
+        % this is in case of lists so that python doesn't get a (nx1) numpy array and instead gets an n-element list
+        elseif any(size(address_of_child)==1)
+            % because matlab transposes all data into and out of netcdf and because we want cross-platform-compat
+            % we need to transpose data before it goes into netcdf
+            data = address_of_child.';
+
+            % make the dimensions
+            dimensions = [];
+            for dimension = size(data)
+                if dimension ~= 1
+                    dim_name = ['dim',int2str(dimension)];
+                    % if the dimension already exists we can't have a duplicate
+                    try
+                        dimID = netcdf.defDim(group, dim_name, dimension);
+                    catch
+                        dimID = netcdf.inqDimID(group, dim_name);
+                    end
+                    % record the dimension for the variable
+                    dimensions(end+1) = dimID;
+                end
+            end
+            % create the variable
+            variable = netcdf.defVar(group, variable_name, "NC_DOUBLE",dimensions);
+    
+            % write the variable
+            netcdf.putVar(group,variable,data);
+
+        % This catches all remaining arrays:
+        else
+            % because matlab transposes all data into and out of netcdf and because we want cross-platform-compat
+            % we need to transpose data before it goes into netcdf
+            data = address_of_child.';
+
+            % make the dimensions
+            dimensions = [];
+            for dimension = size(data)
+                dim_name = ['dim',int2str(dimension)];
+                % if the dimension already exists we can't have a duplicate
+                try
+                    dimID = netcdf.defDim(group, dim_name, dimension);
+                catch
+                    dimID = netcdf.inqDimID(group, dim_name);
+                end
+                % record the dimension for the variable
+                dimensions(end+1) = dimID;
+            end
+            % create the variable
+            variable = netcdf.defVar(group, variable_name, "NC_DOUBLE",dimensions);
+    
+            % write the variable
+            netcdf.putVar(group,variable,data);
+        end
+    end
+end
Index: /issm/trunk/src/m/netcdf/write_netCDF.py
===================================================================
--- /issm/trunk/src/m/netcdf/write_netCDF.py	(revision 28013)
+++ /issm/trunk/src/m/netcdf/write_netCDF.py	(revision 28013)
@@ -0,0 +1,595 @@
+# imports
+import netCDF4
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+import time
+import os
+from model import *
+from results import *
+from m1qn3inversion import m1qn3inversion
+from taoinversion import taoinversion
+#import OrderedStruct
+
+
+'''
+Given a md, this set of functions will perform the following:
+    1. View each attribute of each nested class.
+    2. Compare state of attribute in the model to an empty model.
+    3. If states are identical, pass. (except for np arrays which will always be saved)
+    4. Otherwise, create nested groups named after class structure.
+    5. Create variable named after class attribute and assign value to it.
+'''
+
+
+def write_netCDF(md, filename: str, verbose = False):
+    if verbose:
+        print('Python C2NetCDF4 v1.2.0')
+    else: pass
+    '''
+    md = model class instance to be saved
+    filename = path and name to save file under
+    verbose = T/F = show or muted log statements. Naturally muted
+    '''
+    # this is a precaution so that data is not lost
+    try:
+        # Create a NCData file to write to
+        NCData = create_NetCDF(filename, verbose)
+        
+        # Create an instance of an empty md class to compare md_var against
+        empty_model = model()
+    
+        # Walk through the md class and compare subclass states to empty_model
+        walk_through_model(md, empty_model, NCData, verbose)
+    
+        # in order to handle some subclasses in the results class, we have to utilize this band-aid
+        # there will likely be more band-aids added unless a class name library is created with all class names that might be added to a md
+        try:
+            # if results has meaningful data, save the name of the subclass and class instance
+            NCData.groups['results']
+            results_subclasses_bandaid(md, NCData, verbose)
+            # otherwise, ignore
+        except KeyError:
+            pass
+            
+        NCData.close()
+        if verbose:
+            print('Model successfully saved as NetCDF4')
+        else: pass
+
+    # just in case something unexpected happens
+    except Exception as e:
+        if 'NCData' in locals():
+            NCData.close()
+        raise e
+    
+
+def results_subclasses_bandaid(md, NCData, verbose = False):
+    # since the results class may have nested classes within it, we need to record the name of the 
+    # nested class instance variable as it appears in the md that we're trying to save
+    quality_control = []
+
+    # we save lists of instances to the NCData
+    solutions = []
+    solutionsteps = []
+    resultsdakotas = []
+    
+    for class_instance_name in md.results.__dict__.keys():
+        if verbose:
+            print(class_instance_name)
+        # for each class instance in results, see which class its from and record that info in the NCData to recreate structure later
+        # check to see if there is a solutionstep class instance
+        if isinstance(md.results.__dict__[class_instance_name],solutionstep):
+            quality_control.append(1)
+            solutionsteps.append(class_instance_name)
+
+        # check to see if there is a solution class instance
+        if isinstance(md.results.__dict__[class_instance_name],solution):
+            quality_control.append(1)
+            solutions.append(class_instance_name)
+
+        # check to see if there is a resultsdakota class instance
+        if isinstance(md.results.__dict__[class_instance_name],resultsdakota):
+            quality_control.append(1)
+            resultsdakotas.append(class_instance_name)
+
+    if solutionsteps != []:
+        serialize_string(variable_name=str('solutionstep'), address_of_child=solutionsteps, group=NCData.groups['results'], list=True, NCData=NCData, verbose=verbose)
+
+    if solutions != []:
+        serialize_string(variable_name=str('solution'), address_of_child=solutions, group=NCData.groups['results'], list=True, NCData=NCData, verbose=verbose)
+
+    if resultsdakotas != []:
+        serialize_string(variable_name=str('resultsdakota'), address_of_child=resultsdakotas, group=NCData.groups['results'], list=True, NCData=NCData, verbose=verbose)
+
+    
+    if len(quality_control) != len(md.results.__dict__.keys()):
+        print('Error: The class instance within your md.results class is not currently supported by this application')
+        print(type(md.results.__dict__[class_instance_name]))
+    else:
+        if verbose:
+            print('The results class was successfully stored on disk')
+        else: pass
+
+
+def create_NetCDF(filename: str, verbose = False):
+    # If file already exists delete / rename it
+    if os.path.exists(filename):
+        print('File {} allready exist'.format(filename))
+    
+        # If so, inqure for a new name or to do delete the existing file
+        newname = input('Give a new name or "delete" to replace: ')
+
+        if newname == 'delete':
+            os.remove(filename)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+    else:
+        # Otherwise create the file and define it globally so other functions can call it
+        NCData = Dataset(filename, 'w', format='NETCDF4')
+        NCData.history = 'Created ' + time.ctime(time.time())
+        NCData.createDimension('Unlim', None)  # unlimited dimension
+        NCData.createDimension('float', 1)     # single integer dimension
+        NCData.createDimension('int', 1)       # single float dimension
+    
+    if verbose:
+        print('Successfully created ' + filename)
+
+    return NCData
+
+
+def walk_through_model(md, empty_model, NCData, verbose= False):
+    # Iterate over first layer of md attributes and assume this first layer is only classes
+    for group in md.__dict__.keys():
+        address = md.__dict__[group]
+        empty_address = empty_model.__dict__[group]
+        # we need to record the layers of the md so we can save them to the NCData file
+        layers = [group]
+
+        # Recursively walk through subclasses
+        walk_through_subclasses(address, empty_address, layers, NCData, empty_model, verbose)       
+
+
+def walk_through_subclasses(address, empty_address, layers: list, NCData, empty_model, verbose = False):
+    # See if we have an object with keys or a not
+    try:
+        address.__dict__.keys()
+        is_object = True
+    except: is_object = False # this is not an object with keys
+
+    if is_object:
+        # enter the subclass, see if it has nested classes and/or attributes
+        # then compare attributes between mds and write to NCData if they differ
+        # if subclass found, walk through it and repeat
+        for child in address.__dict__.keys():
+            # record the current location
+            current_layer = layers.copy()
+            current_layer.append(child)
+            
+            # navigate to child in each md
+            address_of_child = address.__dict__[child]
+            
+            # if the current object is a results.<solution> object and has nonzero steps attr it needs special treatment
+            if isinstance(address_of_child, solution) and len(address_of_child.steps) != 0:
+                create_group(address_of_child, current_layer, is_struct = True, is_special_list = False,  NCData=NCData, verbose = verbose)
+
+            # if the current object is a list of objects (currently only filters for lists/arrays of classes)
+            elif isinstance(address_of_child, list) and len(address_of_child) > 0 and hasattr(address_of_child[0], '__dict__'):
+                create_group(address_of_child, current_layer, is_struct = False, is_special_list = True, NCData=NCData, verbose = verbose)
+
+            # if the variable is an array, assume it has relevant data (this is because the next line cannot evaluate "==" with an array)
+            elif isinstance(address_of_child, np.ndarray):
+                create_group(address_of_child, current_layer, is_struct = False, is_special_list = False,  NCData=NCData, verbose = verbose)
+            
+            # see if the child exists in the empty md. If not, record it in the NCData
+            else:
+                try: 
+                    address_of_child_in_empty_class = empty_address.__dict__[child]
+                    # if that line worked, we can see how the mds' attributes at this layer compare:
+    
+                    # if the attributes are identical we don't need to save anything
+                    if address_of_child == address_of_child_in_empty_class:
+                        walk_through_subclasses(address_of_child, address_of_child_in_empty_class, current_layer, NCData, empty_model, verbose)
+    
+                    # If it has been modified, record it in the NCData file
+                    else:
+                        create_group(address_of_child, current_layer, is_struct = False, is_special_list = False,  NCData=NCData, verbose = verbose)
+                        walk_through_subclasses(address_of_child, address_of_child_in_empty_class, current_layer, NCData, empty_model, verbose)
+    
+                except KeyError: # record in NCData and continue to walk thru md
+                    walk_through_subclasses(address_of_child, empty_address, current_layer, NCData, empty_model, verbose)
+                    create_group(address_of_child, current_layer, is_struct = False, is_special_list = False,  NCData=NCData, verbose = verbose)
+    else: pass
+
+
+def create_group(address_of_child, layers, is_struct = False, is_special_list = False,  NCData=None, verbose = False):
+
+    # Handle the first layer of the group(s)
+    group_name = layers[0]
+    
+    # try to make a group unless the group is already made
+    try:
+        group = NCData.createGroup(str(group_name))
+    except:
+        group = NCData.groups[str(group_name)]
+
+    # need to check if inversion or m1qn3inversion class
+    if group_name == 'inversion':
+        check_inversion_class(address_of_child, NCData, verbose)
+    else: pass
+
+    # if the data is nested in md, create nested groups to match class structure
+    if len(layers) > 2:
+        for name in layers[1:-1]:
+            try:
+                group = group.createGroup(str(name))
+            except:
+                group = NCData.groups[str(name)]
+    else: pass
+
+    # Lastly, handle the variable(s)
+    if is_struct:
+        parent_struct_name = layers[-1]
+        serialize_nested_results_struct(parent_struct_name, address_of_child, group, NCData, verbose)
+
+    elif is_special_list:
+        list_name = layers[-1]
+        serialize_array_of_objects(list_name, address_of_child, group, NCData, verbose)
+    
+    else:
+        variable_name = layers[-1]
+        serialize_var(variable_name, address_of_child, group, NCData, verbose)
+            
+
+def singleton(func):
+    """
+    A decorator to ensure a function is only executed once.
+    """
+    def wrapper(*args, **kwargs):
+        if not wrapper.has_run:
+            wrapper.result = func(*args, **kwargs)
+            wrapper.has_run = True
+        return wrapper.result
+    wrapper.has_run = False
+    wrapper.result = None
+    return wrapper
+    
+
+@singleton
+def check_inversion_class(address_of_child, NCData, verbose = False):
+    # need to make sure that we have the right inversion class: inversion, m1qn3inversion, taoinversion
+    if isinstance(address_of_child, m1qn3inversion):
+        serialize_string(variable_name=str('inversion_class_name'), address_of_child=str('m1qn3inversion'), group=NCData.groups['inversion'], NCData=NCData, verbose = verbose)
+        if verbose:
+            print('Successfully saved inversion class instance ' + 'm1qn3inversion')
+    elif isinstance(address_of_child, taoinversion):
+        serialize_string(variable_name=str('inversion_class_name'), address_of_child=str('taoinversion'), group=NCData.groups['inversion'], NCData=NCData, verbose = verbose)
+        if verbose:
+            print('Successfully saved inversion class instance ' + 'taoinversion')
+    else:
+        serialize_string(variable_name=str('inversion_class_name'), address_of_child=str('inversion'), group=NCData.groups['inversion'], NCData=NCData, verbose = verbose)
+        if verbose:
+            print('Successfully saved inversion class instance ' + 'inversion')
+
+
+def serialize_nested_results_struct(parent_struct_name, address_of_struct, group, NCData, verbose = False):
+    '''
+        This function takes a results.solution class instance and saves the solutionstep instances from <solution>.steps to the NCData. 
+
+        To do this, we get the number of dimensions (substructs) of the parent struct (list).
+        Next, we iterate through each substruct and record the data. 
+        For each substruct, we create a subgroup of the main struct.
+        For each variable, we create dimensions that are assigned to each subgroup uniquely.
+    '''
+    if verbose:
+        print("Beginning transfer of nested MATLAB struct to the NCData")
+    
+    # make a new subgroup to contain all the others:
+    group = group.createGroup(str(parent_struct_name))
+
+    # make sure other systems can flag the nested struct type
+    serialize_string('this_is_a_nested', 'struct', group, list=False, NCData=NCData, verbose = verbose)
+
+    # other systems know the name of the parent struct because it's covered by the results/qmu functions above
+    no_of_dims = len(address_of_struct)
+    for substruct in range(0, no_of_dims):
+        # we start by making subgroups with nice names like "1x4"
+        name_of_subgroup = '1x' + str(substruct)
+        subgroup = group.createGroup(str(name_of_subgroup))
+
+        # do some housekeeping to keep track of the current layer
+        current_substruct = address_of_struct[substruct]
+        substruct_fields = current_substruct.__dict__.keys()
+
+        # now we need to iterate over each variable of the nested struct and save it to this new subgroup
+        for variable in substruct_fields:
+            address_of_child = current_substruct.__dict__[variable]
+            serialize_var(variable, address_of_child, subgroup, NCData, verbose = verbose)
+    
+    if verbose:
+        print(f'Successfully transferred struct {parent_struct_name} to the NCData\n')
+
+
+
+
+def serialize_array_of_objects(list_name, address_of_child, group, NCData, verbose):
+    if verbose: 
+        print(f"Serializing array of objects.")
+    
+    # Get the dimensions of the cell array
+    if len(np.shape(address_of_child)) > 1: 
+        rows, cols = np.shape(address_of_child)
+    else: rows, cols = 1, np.shape(address_of_child)[0]
+
+    # Make subgroup to represent the array
+    name_of_subgroup = f"{str(rows)}x{str(cols)}_cell_array_of_objects"
+    subgroup = group.createGroup(name_of_subgroup)
+
+    # Save the name of the cell array
+    serialize_string('name_of_cell_array', list_name, subgroup, NCData, verbose)
+
+    # Save the dimensions of the cell array
+    rowsID = subgroup.createVariable('rows', int, ('int',))
+    colsID = subgroup.createVariable('cols', int, ('int',))
+    rowsID[:] = rows
+    colsID[:] = cols
+
+
+    # If this is a multidimensional cell array, iterate over rows here and cols in serialize_objects
+    if rows > 1:
+        for row in range(rows):
+            # Make a subgroup for each row
+            name_of_subgroup = f"Row_{row+1}_of_{rows}"
+            subgroup = group.createGroup(name_of_subgroup)
+            serialize_objects(address_of_child, subgroup, NCData, cols, verbose)
+    else:
+        serialize_objects(address_of_child, subgroup, NCData, cols, verbose)
+        
+    if verbose:
+        print(f"Successfully serialized array of objects: {list_name}")
+
+
+def serialize_objects(address_of_child, group, NCData, cols, verbose):
+    for col in range(cols):
+        # Make subgroup to contain each col of array
+        name_of_subgroup = f'Col_{col+1}_of_{cols}'
+        subgroup = group.createGroup(name_of_subgroup)
+
+        # index the current item
+        variable = address_of_child[col]
+
+        # Get the kind of object we're working with:
+        # see if it's a solution instance
+        if isinstance(variable, solution) and len(variable.steps) != 0:
+            pass
+            # this needs more work...
+        
+        # see if it's a general class -- assume ISSM classes all have __dict__
+        elif hasattr(variable, '__dict__'):
+            # Handle class instances
+            serialize_class_instance(variable, subgroup, NCData, verbose)
+        else:
+            print('ERROR: Cell arrays of mixed types are not yet supported in read_NCData!')
+            print('Deserialization will not be able to complete!')
+            # Handle regular data structures that are already supported
+            serialize_var(variable_name, variable, subgroup, NCData, verbose)
+
+
+def serialize_class_instance(instance, group, NCData, verbose):
+    # get parent class name:
+    name = instance.__class__.__name__
+
+    # save the name of the class
+    serialize_string(variable_name='class_is_a', address_of_child=name, group=group, NCData=NCData, verbose = verbose)
+
+    # make subgroup to contain attributes
+    name_of_subgroup = 'Properties_of_' + name
+    subgroup = group.createGroup(name_of_subgroup)
+
+    # get attributes
+    keys = instance.__dict__.keys()
+
+    for name in keys:
+        serialize_var(name, instance.__dict__[name], subgroup, NCData, verbose)
+    
+
+
+        
+def serialize_var(variable_name, address_of_child, group, NCData, verbose = False):
+    # There are lots of different variable types that we need to handle from the md class
+    
+    # This first conditional statement will catch numpy arrays of any dimension and save them
+    if isinstance(address_of_child, np.ndarray):
+        serialize_numpy_array(variable_name, address_of_child, group, NCData, verbose=verbose)
+    
+    # check if it's an int
+    elif isinstance(address_of_child, int) or isinstance(address_of_child, np.integer):
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = address_of_child
+    
+    # or a float
+    elif isinstance(address_of_child, float) or isinstance(address_of_child, np.floating):
+        variable = group.createVariable(variable_name, float, ('float',))
+        variable[:] = address_of_child
+
+    # or a string
+    elif isinstance(address_of_child, str):
+        serialize_string(variable_name, address_of_child, group, NCData, verbose=verbose)
+
+    #or a bool
+    elif isinstance(address_of_child, bool) or isinstance(address_of_child, np.bool_):
+        # NetCDF can't handle bool types like True/False so we convert all to int 1/0 and add an attribute named units with value 'bool'
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = int(address_of_child)
+        variable.units = "bool"
+        
+    # or an empty list
+    elif isinstance(address_of_child, list) and len(address_of_child)==0:
+        variable = group.createVariable(variable_name, int, ('int',))
+
+    # or a list of strings -- this needs work as it can only handle a list of 1 string
+    elif isinstance(address_of_child,list) and isinstance(address_of_child[0],str):
+        for string in address_of_child:
+            serialize_string(variable_name, string, group, list=True, NCData=NCData, verbose=verbose)
+
+    # or a regular list
+    elif isinstance(address_of_child, list):
+        variable = group.createVariable(variable_name, type(address_of_child[0]), ('Unlim',))
+        variable[:] = address_of_child
+
+    # anything else... (will likely need to add more cases; ie helpers.OrderedStruct)
+    else:
+        try:
+            variable = group.createVariable(variable_name, type(address_of_child), ('Unlim',))
+            variable[:] = address_of_child
+            print(f'Unrecognized variable was saved {variable_name}')
+        except TypeError: pass # this would mean that we have an object, so we just let this continue to feed thru the recursive function above
+        except Exception as e:
+            print(f'There was error with {variable_name} in {group}')
+            print("The error message is:")
+            print(e)
+            print('Datatype given: ' + str(type(address_of_child)))
+    
+    if verbose:
+        print(f'Successfully transferred data from {variable_name} to the NCData')
+    
+
+def serialize_string(variable_name, address_of_child, group, list=False, NCData=None, verbose = False):
+    # NCData and strings dont get along.. we have to do it 'custom':
+    # if we hand it an address we need to do it this way:
+    if list:
+        """    
+        Convert a list of strings to a numpy.char_array with utf-8 encoded elements
+        and size rows x cols with each row the same # of cols and save to NCData
+        as char array.
+        """
+        try:
+            strings = address_of_child
+            # get dims of array to save
+            rows = len(strings)
+            cols = len(max(strings, key = len))
+    
+            # Define dimensions for the strings
+            rows_name = 'rows' + str(rows)
+            cols_name = 'cols' + str(cols)
+            try:
+                group.createDimension(rows_name, rows)
+            except: pass
+
+            try:
+                group.createDimension(cols_name, cols)
+            except: pass
+                
+            # Create a variable to store the strings
+            string_var = group.createVariable(str(variable_name), 'S1', (rows_name, cols_name))
+    
+            # break the list into a list of lists of words with the same length as the longest word:
+            # make words same sizes by adding spaces 
+            modded_strings = [word + ' ' * (len(max(strings, key=len)) - len(word)) for word in strings]
+            # encoded words into list of encoded lists
+            new_list = [[s.encode('utf-8') for s in word] for word in modded_strings]
+    
+            # make numpy char array with dims rows x cols
+            arr = np.chararray((rows, cols))
+    
+            # fill array with list of encoded lists
+            for i in range(len(new_list)):
+                arr[i] = new_list[i]
+    
+            # save array to NCData file
+            string_var[:] = arr
+
+            if verbose:
+                print(f'Saved {len(modded_strings)} strings to {variable_name}')
+    
+        except Exception as e:
+            print(f'Error: {e}')
+        
+    else:
+        the_string_to_save = address_of_child
+        length_of_the_string = len(the_string_to_save)
+        numpy_datatype = 'S' + str(length_of_the_string)
+        str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))        
+    
+        # we'll need to make a new dimension for the string if it doesn't already exist
+        name_of_dimension = 'char' + str(length_of_the_string)
+        try: 
+            group.createDimension(name_of_dimension, length_of_the_string)
+        except: pass
+        # this is another band-aid to the results sub classes...
+        try:
+            # now we can make a variable in this dimension:
+            string = group.createVariable(variable_name, 'S1', (name_of_dimension))
+            #finally we can write the variable:
+            string[:] = str_out
+        #except RuntimeError: pass
+        except Exception as e:
+            print(f'There was an error saving a string from {variable_name}')
+            print(e)
+
+
+def serialize_numpy_array(variable_name, address_of_child, group, NCData, verbose = False):
+    # to make a nested array in NCData, we have to get the dimensions of the array,
+    # create corresponding dimensions in the NCData file, then we can make a variable
+    # in the NCData with dimensions identical to those in the original array
+    
+    # start by getting the data type at the lowest level in the array:
+    typeis = address_of_child.dtype
+
+    # catch boolean arrays here
+    if typeis == bool:
+        # sometimes an array has just 1 element in it, we account for those cases here:
+        if len(address_of_child) == 1:
+            variable = group.createVariable(variable_name, int, ('int',))
+            variable[:] = int(address_of_child)
+            variable.units = "bool"
+        else:
+            # make the dimensions
+            dimensions = []
+            for dimension in np.shape(address_of_child):
+                dimensions.append(str('dim' + str(dimension)))
+                # if the dimension already exists we can't have a duplicate
+                try:
+                    group.createDimension(str('dim' + str(dimension)), dimension)
+                except: pass # this would mean that the dimension already exists
+    
+            # create the variable:
+            variable = group.createVariable(variable_name, int, tuple(dimensions))
+            # write the variable:
+            variable[:] = address_of_child.astype(int)
+            variable.units = "bool"
+
+    # handle all other datatypes here
+    else:
+        # sometimes an array has just 1 element in it, we account for those cases here:
+        if len(address_of_child) == 1:
+            if typeis is np.dtype('float64'):
+                variable = group.createVariable(variable_name, typeis, ('float',))
+                variable[:] = address_of_child[0]
+            elif typeis is np.dtype('int64'):
+                variable = group.createVariable(variable_name, typeis, ('int',))
+                variable[:] = address_of_child[0]
+            else:
+                print(f'Encountered single datatype from {variable_name} that was not float64 or int64, saving under unlimited dimension, may cause errors.')
+                variable = group.createVariable(variable_name, typeis, ('Unlim',))
+                variable[:] = address_of_child[0]
+    
+        # This catches all arrays/lists:
+        else:
+            # make the dimensions
+            dimensions = []
+            for dimension in np.shape(address_of_child):
+                dimensions.append(str('dim' + str(dimension)))
+                # if the dimension already exists we can't have a duplicate
+                try:
+                    group.createDimension(str('dim' + str(dimension)), dimension)
+                except: pass # this would mean that the dimension already exists
+    
+            # create the variable:
+            variable = group.createVariable(variable_name, typeis, tuple(dimensions))
+    
+            # write the variable:
+            variable[:] = address_of_child
+
+            
Index: /issm/trunk/src/m/parameterization/parameterize.m
===================================================================
--- /issm/trunk/src/m/parameterization/parameterize.m	(revision 28012)
+++ /issm/trunk/src/m/parameterization/parameterize.m	(revision 28013)
@@ -2,7 +2,7 @@
 %PARAMETERIZE - parameterize a model
 %
-%   from a parameter matlab file, start filling in all the @model fields that were not 
+%   from a parameter MATLAB file, start filling in all the @model fields that were not 
 %   filled in by the mesh.m and mask.m @model methods.
-%   Warning: the parameter file must be able to be run in Matlab
+%   Warning: the parameter file must be able to be run in MATLAB
 %
 %   Usage:
@@ -56,3 +56,5 @@
 	md.miscellaneous.name=root; 
 end
-md.miscellaneous.notes=['Model created by using parameter file: ' parametername ' on: ' datestr(now)];
+if isempty(md.miscellaneous.notes), 
+	md.miscellaneous.notes=['Model created by using parameter file: ' parametername ' on: ' datestr(now)];
+end
Index: /issm/trunk/src/m/parameterization/parameterize.py
===================================================================
--- /issm/trunk/src/m/parameterization/parameterize.py	(revision 28012)
+++ /issm/trunk/src/m/parameterization/parameterize.py	(revision 28013)
@@ -4,9 +4,9 @@
 
 def parameterize(md, parametername):
-    """PARAMETERIZE - parameterize a model
+    """parameterize - parameterize a model
 
     From a parameter Python file, start filling in all the model fields that 
-    were not filled in by the mesh.py and mask.py model methods. Warning: the 
-    parameter file must be able to be run in Python
+    were not filled in by the mesh.py and mask.py model methods. 
+    Warning: the parameter file must be able to be run in Python
 
     Usage:
@@ -17,16 +17,16 @@
     """
 
-    #some checks
+    # Some checks
     if not os.path.exists(parametername):
         raise IOError("parameterize error message: file '%s' not found!" % parametername)
 
-    #Try and run parameter file.
+    # Try and run parameter file
     exec(compile(open(parametername).read(), parametername, 'exec'))
 
-    #Name and notes
+    # Name and notes
     if not md.miscellaneous.name:
         md.miscellaneous.name = os.path.basename(parametername).split('.')[0]
 
-    md.miscellaneous.notes = "Model created by using parameter file: '%s' on: %s." % (parametername, datetime.datetime.strftime(datetime.datetime.now(), '%c'))
+    md.miscellaneous.notes = 'Model created by using parameter file: \'%s\' on: %s.' % (parametername, datetime.datetime.strftime(datetime.datetime.now(), '%c'))
 
     return md
Index: /issm/trunk/src/m/parameterization/setmask.py
===================================================================
--- /issm/trunk/src/m/parameterization/setmask.py	(revision 28012)
+++ /issm/trunk/src/m/parameterization/setmask.py	(revision 28013)
@@ -53,5 +53,5 @@
     vertexongroundedice[md.mesh.elements[np.nonzero(elementongroundedice), :] - 1] = True
     vertexonfloatingice[np.nonzero(np.logical_not(vertexongroundedice))] = True
-    #}}}
+    # }}}
 
     #level sets
Index: /issm/trunk/src/m/paraview/exportVTK.m
===================================================================
--- /issm/trunk/src/m/paraview/exportVTK.m	(revision 28013)
+++ /issm/trunk/src/m/paraview/exportVTK.m	(revision 28013)
@@ -0,0 +1,197 @@
+function exportVTK(filename,model,varargin)
+%EXPORTVTK -  vtk export
+%
+%   function exportVTK(filename,model)
+%   creates a directory with the vtk files for displays in paraview
+%   (only work for triangle and wedges based on their number of nodes)
+%   By default only the results are exported, you can add whichever
+%   field you need as a string:
+%   add 'geometry' to export md.geometry
+%
+%   USAGE:
+%      exportVTK(filename,model,varargin)
+%
+%   EXAMPLE:
+%      exportVTK('ResultSimulation1',md)
+
+[path,name,ext]=fileparts(filename);
+separator=filesep;
+mkdir(filename);
+
+%get the element related variables
+if dimension(model.mesh)==2,
+	points=[model.mesh.x model.mesh.y zeros(model.mesh.numberofvertices,1)];
+else
+	points=[model.mesh.x model.mesh.y model.mesh.z];
+end
+
+[num_of_points,dim]=size(points);
+[num_of_elt]=size(model.mesh.elements,1);
+[point_per_elt]=size(model.mesh.elements,2);
+
+%Select the type of element function of the number of nodes per elements
+if point_per_elt==3;
+	celltype=5; %triangles
+elseif point_per_elt==6;
+	celltype=13; %wedges
+else
+	error('Your Element definition is not taken into account \n');
+end
+
+%this is the result structure
+res_struct=model.results;
+%checking for results
+if (length(fields(res_struct))>0);
+	%Getting all the solutions of the model
+	solnames=fields(res_struct);
+	num_of_sols=length(solnames);
+	num_of_timesteps=1;
+	%building solution structure 
+	for i=1:num_of_sols
+		sol_struct{i}=res_struct.(solnames{i});
+		%looking for multiple time steps
+		if(size(sol_struct{i},2)>num_of_timesteps);
+			num_of_timesteps=size(sol_struct{i},2);
+			if isa(model.timestepping,'timesteppingadaptive')
+				disp('Warning: timesteppingadaptive not totally supported!');				
+			elseif isa(model.timestepping,'timestepping')
+				outstep=model.timestepping.time_step*model.settings.output_frequency;
+			else
+				error('timestepping class not supported!');
+			end
+		end
+	end
+else
+	num_of_timesteps=1;
+end
+for step=1:num_of_timesteps;
+	
+	timestep=step;
+
+	fid = fopen(strcat(path,filesep,name,filesep,'timestep.vtk',int2str(timestep),'.vtk'),'w+');
+	fprintf(fid,'# vtk DataFile Version 2.0 \n');
+	fprintf(fid,'Data for run %s \n',model.miscellaneous.name);
+	fprintf(fid,'ASCII \n');
+	fprintf(fid,'DATASET UNSTRUCTURED_GRID \n');
+	
+	fprintf(fid,'POINTS %d float\n',num_of_points);
+	if(dim==3);
+		s='%f %f %f \n';
+	elseif(dim==2);
+		s='%f %f \n';
+  end
+	P=[points zeros(num_of_points,3-dim)];
+	fprintf(fid,s,P');
+	
+	fprintf(fid,'CELLS %d %d\n',num_of_elt,num_of_elt*(point_per_elt+1));
+	s='%d';
+	for j=1:point_per_elt
+		s=horzcat(s,{' %d'});
+  end
+	s=cell2mat(horzcat(s,{'\n'}));
+		fprintf(fid,s,[(point_per_elt)*ones(num_of_elt,1)	model.mesh.elements-1]');
+	
+	fprintf(fid,'CELL_TYPES %d\n',num_of_elt);
+	s='%d\n';
+	fprintf(fid,s,celltype*ones(num_of_elt,1));
+	fprintf(fid,'POINT_DATA %s \n',num2str(num_of_points));
+
+	%loop over the different solution structures
+	if (exist('num_of_sols'));
+		for j=1:num_of_sols
+			%dealing with results on different timesteps
+			if(size(sol_struct{j},2)>timestep);
+				timestep = step;
+			else
+				timestep = size(sol_struct{j},2);
+	    end
+			
+			%getting the number of fields in the solution
+			fieldnames=fields(sol_struct{j}(timestep));
+			num_of_fields=length(fieldnames);
+			
+			%check which field is a real result and print
+			for k=1:num_of_fields
+				if ((numel(sol_struct{j}(timestep).(fieldnames{k})))==num_of_points);
+					%paraview does not like NaN, replacing
+					nanval=find(isnan(sol_struct{j}(timestep).(fieldnames{k})));
+					sol_struct{j}(timestep).(fieldnames{k})(nanval)=-9999;
+					%also checking for verry small value that mess up
+					smallval=(abs(sol_struct{j}(timestep).(fieldnames{k}))<1.0e-20);
+					sol_struct{j}(timestep).(fieldnames{k})(smallval)=0.0;
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					s='%e\n';
+					fprintf(fid,s,sol_struct{j}(timestep).(fieldnames{k}));
+		    end		
+	    end 
+	  end
+  end
+	%loop on arguments, if something other than result is asked, do
+	%it now
+	for j= 1:nargin-2
+		res_struct=model.(varargin{j});
+		fieldnames=fields(res_struct);
+		num_of_fields=length(fieldnames);
+		for k=1:num_of_fields
+			if ((numel(res_struct.(fieldnames{k})))==num_of_points);
+				%paraview does not like NaN, replacing
+				nanval=find(isnan(res_struct.(fieldnames{k})));
+				res_struct.(fieldnames{k})(nanval)=-9999;
+				%also checking for verry small value that mess up
+				smallval=(abs(res_struct.(fieldnames{k}))<1.0e-20);
+				res_struct.(fieldnames{k})(smallval)=0.0;
+				fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+				fprintf(fid,'LOOKUP_TABLE default\n');
+				s='%e\n';
+				fprintf(fid,s,res_struct.(fieldnames{k}));
+				%check for forcings	
+			elseif (size(res_struct.(fieldnames{k}),1)==num_of_points+1);
+				%paraview does not like NaN, replacing
+				nanval=find(isnan(res_struct.(fieldnames{k})));
+				res_struct.(fieldnames{k})(nanval)=-9999;
+				%also checking for verry small value that mess up
+				smallval=(abs(res_struct.(fieldnames{k}))<1.0e-20);
+				res_struct.(fieldnames{k})(smallval)=0.0;
+				if (size(res_struct.(fieldnames{k}),2)==num_of_timesteps),
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					s='%e\n';
+					fprintf(fid,s,res_struct.(fieldnames{k})(1:end-1,timestep));
+				else,
+					%forcing and results not on the same timestep,need some treatment
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					index=1;
+					currenttime=((timestep-1)*outstep)+model.timestepping.start_time;
+					while (res_struct.(fieldnames{k})(end,index)<=currenttime);
+						if index==size(res_struct.(fieldnames{k}),2)
+							break
+						end	
+						index=index+1;
+		      end
+					uptime=res_struct.(fieldnames{k})(end,index);
+					uplim=res_struct.(fieldnames{k})(1:end-1,index);
+					while (res_struct.(fieldnames{k})(end,index)>=currenttime);
+						if index==1
+							break
+			      end
+						index=index-1;
+		      end
+					lowtime=res_struct.(fieldnames{k})(end,index);
+					lowlim=res_struct.(fieldnames{k})(1:end-1,index);
+					if uptime==currenttime,
+						interp=uplim;
+					elseif lowtime==currenttime,
+						interp=lowlim;
+					else
+						interp=lowlim+(uplim-lowlim)*((currenttime-lowtime)/(uptime-lowtime));
+					end
+					s='%e\n';
+					fprintf(fid,s,interp);
+				end
+		  end		
+		end 
+	end
+	fclose(fid);
+end
Index: /issm/trunk/src/m/paraview/exportVTK.py
===================================================================
--- /issm/trunk/src/m/paraview/exportVTK.py	(revision 28013)
+++ /issm/trunk/src/m/paraview/exportVTK.py	(revision 28013)
@@ -0,0 +1,537 @@
+import numpy as np
+from os import path, remove, mkdir
+from glob import glob
+
+
+def exportVTK(filename, md, *args, enveloppe=False, **kwargs):
+    '''
+    vtk export
+    function exportVTK(filename, md)
+    creates a directory with the vtk files for displays in paraview
+    (only work for triangle and wedges based on their number of nodes)
+
+    Usage:
+    exportVTK('DirName', md)
+    exportVTK('DirName', md, 'geometry', 'mesh')
+    exportVTK('DirName', md, 'geometry', 'mesh', enveloppe = True)
+
+    DirName is the name of the output directory, each timestep then has it
+    own file ('Timestep.vtkX.vtk') with X the number of the output step
+    enveloppe is an option keeping only the enveloppe of the md (it is False by default)
+
+    Options:
+        - clipping : allows to reduce your domain (cliping=[Xmin, Xmax, Ymin, Ymax])
+        - coarsetime : output one timestep every X (coarsetime=X, with X an integer)
+        - singletime : output only timestep X (singletime=X, with X an integer or -1 for last)
+
+    TODO: - make time easily accessible
+
+    Basile de Fleurian:
+    '''
+    #verbosity of the code, 0 is no messages, 5 is chatty
+    verbose = 0
+
+    print("""
+    =========================================
+    #     A                                 #
+    #    / \      exportVTK is now obsolete #
+    #   / | \     You should use export VTU #
+    #  /  |  \    faster, smaller files     #
+    # /   o   \   and more capacities       #
+    # ---------                             #
+    #========================================
+    """)
+
+
+    for key in kwargs.keys():
+        if key not in ['clipping', 'coarsetime', 'singletime']:
+            raise BadOption('Provided option "{}" is not supported possibilities are : {}'.format(key, ['cliping', 'coarsetime', 'singletime']))
+
+    if 'coarsetime' in kwargs.keys() and 'singletime' in kwargs.keys():
+        raise BadOption("You can't specify both 'coarsetime' and 'singletime'")
+
+    # File checking and creation {{{
+    Dir = path.basename(filename)
+    Path = filename[:-len(Dir)]
+    if path.exists(filename):
+        print(('File {} allready exist'.format(filename)))
+        newname = input('Give a new name or "delete" to replace: ')
+        if newname == 'delete':
+            filelist = glob(filename + '/* ')
+            for oldfile in filelist:
+                remove(oldfile)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+            mkdir(filename)
+    else:
+        mkdir(filename)
+    # }}}
+
+    # this is the result structure {{{
+    if verbose > 3:
+        print('Getting accessorie variables')
+    res_struct = md.results
+    moving_mesh = False
+    if(type(res_struct) != list):
+        #Getting all the solutions of the md
+        solnames = dict.keys(res_struct.__dict__)
+        num_of_sols = len(solnames)
+        num_of_timesteps = 1
+        #%building solutionstructure
+        for solution in solnames:
+            #looking for multiple time steps
+            try:
+                if len(res_struct.__dict__[solution]) > num_of_timesteps:
+                    num_of_timesteps = len(res_struct.__dict__[solution])
+                    num_of_timesteps = int(num_of_timesteps)
+                    if 'Surface' in dict.keys(res_struct.__dict__[solution][0].__dict__):
+                        moving_mesh = True
+            except TypeError:
+                continue
+    else:
+        num_of_timesteps = 1
+    # }}}
+
+    # get the element related variables {{{
+    if verbose > 3:
+        print('Now treating  the mesh')
+    #first get the general things
+    dim = int(md.mesh.domaintype()[0])
+    every_nodes = md.mesh.numberofvertices
+    every_cells = md.mesh.numberofelements
+    try:
+        every_edges = md.mesh.numberofedges
+    except AttributeError:
+        #3D meshes do not have edges
+        every_edges = 0
+
+    if np.shape(md.mesh.elements)[1] == 3 or enveloppe:
+        point_per_elt = 3
+        celltype = 5  #triangles
+    elif np.shape(md.mesh.elements)[1] == 6:
+        point_per_elt = 6
+        celltype = 13  #wedges
+    else:
+        raise BadDimension('exportVTK does not support your element type')
+
+    #only keep the envelope and not the bulk of the results.
+    if enveloppe:
+        if dim == 3:
+            mesh_alti = '1'
+            is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface)
+            enveloppe_index = np.where(is_enveloppe)[0]
+            convert_index = np.nan * np.ones(np.shape(md.mesh.x))
+            convert_index = np.asarray([[i, np.where(enveloppe_index == i)[0][0]] for i, val in enumerate(convert_index) if any(enveloppe_index == i)])
+
+            num_of_points = np.size(enveloppe_index)
+            points = np.column_stack((md.mesh.x[enveloppe_index],
+                                      md.mesh.y[enveloppe_index],
+                                      md.mesh.z[enveloppe_index]))
+
+            num_of_elt = np.size(np.where(np.isnan(md.mesh.lowerelements))) + np.size(np.where(np.isnan(md.mesh.upperelements)))
+            connect = md.mesh.elements[np.where(is_enveloppe[md.mesh.elements - 1])].reshape(int(num_of_elt), 3) - 1
+            for elt in range(0, num_of_elt):
+                connect[elt, 0] = convert_index[np.where(convert_index == connect[elt, 0])[0], 1][0]
+                connect[elt, 1] = convert_index[np.where(convert_index == connect[elt, 1])[0], 1][0]
+                connect[elt, 2] = convert_index[np.where(convert_index == connect[elt, 2])[0], 1][0]
+
+            num_of_edges = every_edges  #looks like edges is only defined on the 2d mesh
+            if num_of_edges > 0:
+                edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+
+        else:
+            raise BadDimension("exportVTK can't get an enveloppe for  dimension {}".format(dim))
+
+    else:
+        #we get all the mesh, mainly defining dummies
+        num_of_elt = every_cells
+        connect = md.mesh.elements - 1
+        num_of_edges = every_edges
+        if num_of_edges > 0:
+            edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+        enveloppe_index = np.arange(0, np.size(md.mesh.x))
+        num_of_points = every_nodes
+        if dim == 2:
+            mesh_alti = input('''This is a 2D model, what should be the 3rd dimension of the mesh :
+                                        1 : md.geometry.surface
+                                        2 : md.geometry.base
+                                        3 : md.geometry.bed
+                                        4 : 0
+                                        5 : Custom\n''')
+            if mesh_alti == '1':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+            elif mesh_alti == '2':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.base))
+            elif mesh_alti == '3':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.bed))
+            elif mesh_alti == '4':
+                points = np.column_stack((md.mesh.x, md.mesh.y, 0. * md.mesh.x))
+            elif mesh_alti == '5':
+                alti_field = input("Which field should be used as 3rd dimension: ")
+                alti_var = eval(alti_field)
+                if np.shape(np.squeeze(alti_var)) == np.shape(md.mesh.x):
+                    points = np.column_stack((md.mesh.x, md.mesh.y, np.squeeze(alti_var)))
+                else:
+                    raise BadDimension('field given for 3rd dimension should be defined on vertices {} is not.'.format(alti_field))
+            else:
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+        elif dim == 3:
+            mesh_alti = '1'
+            points = np.column_stack((md.mesh.x, md.mesh.y, md.mesh.z))
+        else:
+            raise BadDimension('exportVTK does not support dimension {}'.format(dim))
+
+    if 'clipping' in kwargs.keys():
+        if kwargs['clipping'] is not None:
+            # first get the boundaries and check them
+            [Xmin, Xmax, Ymin, Ymax] = kwargs['clipping']
+            if Xmin > Xmax:
+                raise ClipError('Xmax ({}) should be larger than Xmin ({})'.format(Xmax, Xmin))
+            if Ymin > Ymax:
+                raise ClipError('Ymax ({}) should be larger than Ymin ({})'.format(Ymax, Ymin))
+            if Xmin > np.nanmax(points[:, 0]) or Xmax < np.nanmin(points[:, 0]):
+                raise ClipError('Your X boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Xmin, Xmax, np.nanmin(points[:, 0]), np.nanmax(points[:, 0])))
+            if Ymin > np.nanmax(points[:, 1]) or Ymax < np.nanmin(points[:, 1]):
+                raise ClipError('Your Y boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Ymin, Ymax, np.nanmin(points[:, 1]), np.nanmax(points[:, 1])))
+
+            #boundaries should be fine lets do stuff
+            InX = np.where(np.logical_and(points[:, 0] >= Xmin, points[:, 0] <= Xmax))
+            InY = np.where(np.logical_and(points[:, 1] >= Ymin, points[:, 1] <= Ymax))
+
+            Isinside = np.zeros(np.shape(points)[0], dtype=bool)
+            clip_convert_index = np.nan * np.ones(np.shape(points)[0])
+
+            #define the vertices that are within clipping window
+            Inclipping = np.intersect1d(InX, InY)
+            Isinside[Inclipping] = True
+            points = points[Inclipping, :]
+            num_of_points = np.shape(points)[0]
+
+            #go thorough the elements and keep those for which one node is in the clipped arrea
+            clipconnect = np.asarray([], dtype=int)
+            for elt in connect:
+                if set(elt).issubset(Inclipping):
+                    clipconnect = np.append(clipconnect, elt, axis=0)
+
+            #reshape
+            num_of_elt = int(np.size(clipconnect) / 3)
+            connect = clipconnect.reshape(num_of_elt, 3)
+
+            clip_convert_index = np.asarray([[i, np.where(Inclipping == i)[0][0]] for i, val in enumerate(clip_convert_index) if any(Inclipping == i)])
+            enveloppe_index = enveloppe_index[clip_convert_index[:, 0]]
+
+            #convert indexing and exclude elements that are partly outside of the region
+            for elt in range(0, num_of_elt):
+                try:
+                    connect[elt, 0] = clip_convert_index[np.where(clip_convert_index == connect[elt, 0])[0], 1][0]
+                except IndexError:
+                    connect[elt, 0] = -1
+                try:
+                    connect[elt, 1] = clip_convert_index[np.where(clip_convert_index == connect[elt, 1])[0], 1][0]
+                except IndexError:
+                    connect[elt, 1] = -1
+                try:
+                    connect[elt, 2] = clip_convert_index[np.where(clip_convert_index == connect[elt, 2])[0], 1][0]
+                except IndexError:
+                    connect[elt, 2] = -1
+
+            connect = connect[np.where(connect != -1)[0], :]
+            num_of_elt = np.shape(connect)[0]
+
+            if num_of_edges > 0:
+                clipedges = np.asarray([], dtype=int)
+                for edge in edges:
+                    if set(edge).issubset(Inclipping):
+                        clipedges = np.append(clipedges, edge, axis=0)
+
+                num_of_edges = int(np.size(clipedges) / 2)
+                edges = clipedges.reshape(num_of_edges, 2)
+
+                for edge in range(0, num_of_edges):
+                    try:
+                        edges[edge, 0] = clip_convert_index[np.where(clip_convert_index == edges[edge, 0])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 0] = -1
+                    try:
+                        edges[edge, 1] = clip_convert_index[np.where(clip_convert_index == edges[edge, 1])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 1] = -1
+                edges = edges[np.where(edges != -1)[0], :]
+                num_of_edges = np.shape(edges)[0]
+
+    # }}}
+
+    # write header and mesh {{{
+    if verbose > 3:
+        print('Now starting to write stuff')
+
+    if 'coarsetime' in kwargs.keys():
+        steplist = range(0, num_of_timesteps, kwargs['coarsetime'])
+    elif 'singletime' in kwargs.keys():
+        steplist = [kwargs['singletime']]
+    else:
+        steplist = range(0, num_of_timesteps)
+
+    for step in steplist:
+        if verbose > 2:
+            print('Writing for step {}'.format(step))
+        saved_cells = {}
+        saved_edges = {}
+        timestep = step
+        with open((filename + '/Timestep.vtk' + str(timestep) + '.vtk'), 'w+') as fid:
+            fid.write('# vtk DataFile Version 3.0 \n')
+            fid.write('Data for run {} \n'.format(md.miscellaneous.name))
+            fid.write('ASCII \n')
+            fid.write('DATASET UNSTRUCTURED_GRID \n')
+            fid.write('POINTS {:d} float\n'.format(num_of_points))
+            #updating z for mesh evolution
+            if moving_mesh and mesh_alti in ['1', '2']:
+                base = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Base'][enveloppe_index])
+                thick_change_ratio = (np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Thickness'][enveloppe_index]) / md.geometry.thickness[enveloppe_index])
+                above_bed = points[:, 2] - md.geometry.base[enveloppe_index]
+                altitude = base + thick_change_ratio * above_bed
+            else:
+                altitude = points[:, 2]
+            for index, point in enumerate(points):
+                fid.write('{:f} {:f} {:f} \n'.format(point[0], point[1], altitude[index]))
+
+            fid.write('CELLS {:d} {:d}\n'.format((num_of_elt + num_of_edges), num_of_elt  * (point_per_elt + 1) + num_of_edges * 3))
+
+            for elt in range(0, num_of_elt):
+                if celltype == 5:
+                    fid.write('3 {:d} {:d} {:d}\n'.format(connect[elt, 0],
+                                                          connect[elt, 1],
+                                                          connect[elt, 2]))
+                elif celltype == 13:
+                    fid.write('6 {:d} {:d} {:d} {:d} {:d} {:d}\n'.format(connect[elt, 0],
+                                                                         connect[elt, 1],
+                                                                         connect[elt, 2],
+                                                                         connect[elt, 3],
+                                                                         connect[elt, 4],
+                                                                         connect[elt, 5]))
+            for edge in range(0, num_of_edges):
+                fid.write('2 {:d} {:d}\n'.format(edges[edge, 0],
+                                                 edges[edge, 1]))
+
+            fid.write('CELL_TYPES {:d}\n'.format(num_of_elt + num_of_edges))
+            for elt in range(0, num_of_elt):
+                fid.write('{:d}\n'.format(celltype))
+                for edge in range(0, num_of_edges):
+                    fid.write('3\n')  #3 is for lines
+
+            fid.write('POINT_DATA {:s} \n'.format(str(num_of_points)))
+            # }}}
+            # {{{loop over the different solution structures
+            # first check if there are solutions to grab
+            if 'solnames' in locals():
+                for sol in solnames:
+                    treated_res = []
+                    #dealing with results on different timesteps
+                    try:
+                        if(len(res_struct.__dict__[sol]) > timestep):
+                            timestep = step
+                        else:
+                            timestep = np.size(res_struct.__dict__[sol])
+                    except TypeError:
+                        #result as no len() so no timesteps
+                        timestep = 1
+
+                    #getting the  fields in the solution
+                    if(type(res_struct.__dict__[sol]).__name__ == 'solution'):
+                        spe_res_struct = res_struct.__dict__[sol].__getitem__(timestep)
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    elif(type(res_struct.__dict__[sol]).__name__ == 'solutionstep'):
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    elif(type(res_struct.__dict__[sol]).__name__ == 'results'):  #this is a result without steps
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    else:
+                        print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                    #Sorting scalars, vectors and tensors
+                    tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']]
+                    non_tensor = [field for field in fieldnames if field not in tensors]
+                    vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z'] and field[-4:] not in ['Flux']]
+                    #check which field is a real result and print
+                    for field in fieldnames:
+                        if verbose > 2:
+                            print("Treating {}".format(field))
+                        if field in treated_res:
+                            if verbose > 2:
+                                print("{} is already done".format(field))
+                            continue
+                        elif field in vectors:
+                            if verbose > 2:
+                                print("{} is a vector".format(field))
+                            try:
+                                Vxstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'x'])
+                                Vystruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'y'])
+                                treated_res += [field[:-1] + 'x', field[:-1] + 'y']
+                                if dim == 3 and field[:-1] + 'z' in fieldnames:
+                                    #some fields like adjoint or always 2D
+                                    Vzstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'z'])
+                                    treated_res += [field[:-1] + 'z']
+
+                            except KeyError:
+                                fieldnames += field
+                                vectors.remove(field)
+
+                            fid.write('VECTORS {} float \n'.format(field[:-1]))
+                            for node in range(0, num_of_points):
+                                Vx = cleanOutliers(Vxstruct[enveloppe_index[node]])
+                                Vy = cleanOutliers(Vystruct[enveloppe_index[node]])
+                                if dim == 3 and field[:-1] + 'z' in fieldnames:
+                                    Vz = cleanOutliers(Vzstruct[enveloppe_index[node]])
+                                    fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, Vz))
+                                else:
+                                    fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, 0))
+
+                        elif field in tensors:
+                            if verbose > 2:
+                                print("{} is a tensor".format(field))
+                            try:
+                                Txxstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xx'])
+                                Txystruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xy'])
+                                Tyystruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'yy'])
+                                treated_res += [field[:-2] + 'xx', field[:-2] + 'xy', field[:-2] + 'yy']
+                                if dim == 3:
+                                    Tzzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'zz'])
+                                    Txzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xz'])
+                                    Tyzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'yz'])
+                                    treated_res += [field[:-2] + 'zz', field[:-2] + 'xz', field[:-2] + 'yz']
+
+                            except KeyError:
+                                fieldnames += field
+                                tensors.remove(field)
+
+                            fid.write('TENSORS {} float \n'.format(field[:-2]))
+                            for node in range(0, num_of_points):
+                                Txx = cleanOutliers(Txxstruct[enveloppe_index[node]])
+                                Tyy = cleanOutliers(Tyystruct[enveloppe_index[node]])
+                                Txy = cleanOutliers(Txystruct[enveloppe_index[node]])
+                                if dim == 3:
+                                    Tzz = cleanOutliers(Tzzstruct[enveloppe_index[node]])
+                                    Txz = cleanOutliers(Txzstruct[enveloppe_index[node]])
+                                    Tyz = cleanOutliers(Tyzstruct[enveloppe_index[node]])
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txx, Txy, Txz))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txy, Tyy, Tyz))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txz, Tyz, Tzz))
+                                elif dim == 2:
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txx, Txy, 0))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txy, Tyy, 0))
+                                    fid.write('{:f} {:f} {:f}\n'.format(0, 0, 0))
+                        else:
+                            if np.size(spe_res_struct.__dict__[field]) == 1:
+                                if field == 'time':
+                                    current_time = spe_res_struct.__dict__[field]
+                                    #skipping integers
+                                continue
+                            elif np.size(spe_res_struct.__dict__[field]) == every_nodes:
+                                fid.write('SCALARS {} float 1 \n'.format(field))
+                                fid.write('LOOKUP_TABLE default\n')
+                                for node in range(0, num_of_points):
+                                    outval = cleanOutliers(np.squeeze(spe_res_struct.__dict__[field][enveloppe_index[node]]))
+                                    fid.write('{:f}\n'.format(outval))
+                            elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_cells:
+                                saved_cells[field] = np.squeeze(spe_res_struct.__dict__[field])
+                            elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_edges:
+                                saved_edges[field] = np.squeeze(spe_res_struct.__dict__[field])
+                            else:
+                                print("format for field {}.{} is not suported, field is skipped".format(sol, field))
+            # }}}
+            # loop on arguments, if something other than result is asked, do it now {{{
+            for other in args:
+                other_struct = md.__dict__[other]
+                othernames = (dict.keys(other_struct.__dict__))
+                for field in othernames:
+                    if np.size(other_struct.__dict__[field]) == 1:
+                        #skipping integers
+                        continue
+                    elif np.size(other_struct.__dict__[field]) == every_nodes:
+                        fid.write('SCALARS {} float 1 \n'.format(field))
+                        fid.write('LOOKUP_TABLE default\n')
+                        for node in range(0, num_of_points):
+                            outval = cleanOutliers(other_struct.__dict__[field][enveloppe_index[node]])
+                            fid.write('{:f}\n'.format(outval))
+                    elif np.shape(other_struct.__dict__[field])[0] == every_nodes + 1:
+                        #we are dealing with a forcing of some kind.
+                        forcing_time = other_struct.__dict__[field][-1, :]
+                        if any(forcing_time == current_time):
+                            forcing_index = np.where(forcing_time == current_time)
+                            forcing_val = other_struct.__dict__[field][:, forcing_index]
+                        elif forcing_time[0] > current_time:
+                            forcing_val = other_struct.__dict__[field][:, 0]
+                        elif forcing_time[-1] < current_time:
+                            forcing_val = other_struct.__dict__[field][:, -1]
+                        else:
+                            forcing_index = np.where(forcing_time < current_time)[-1][-1]
+                            delta_time = forcing_time[forcing_index + 1] - forcing_time[forcing_index]  #compute forcing Dt
+                            delta_current = current_time - forcing_time[forcing_index]  # time since last forcing
+                            ratio = delta_current / delta_time  #compute weighting factor for preceding forcing vallue
+                            forcing_evol = (other_struct.__dict__[field][:, forcing_index + 1] - other_struct.__dict__[field][:, forcing_index]) * ratio
+                            forcing_val = other_struct.__dict__[field][:, forcing_index] + forcing_evol
+                        # and now write it down
+                        fid.write('SCALARS {}_{} float 1 \n'.format(other, field))
+                        fid.write('LOOKUP_TABLE default\n')
+                        for node in range(0, num_of_points):
+                            outval = cleanOutliers(forcing_val[enveloppe_index[node]])
+                            fid.write('{:f}\n'.format(outval))
+                    elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_cells:
+                        saved_cells[field] = other_struct.__dict__[field]
+                    elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_edges:
+                        saved_edges[field] = other_struct.__dict__[field]
+                    else:
+                        print("format for field {}.{} is not suported, field is skipped".format(other, field))
+                        continue
+            # }}}
+            # Now writting cell variables {{{
+            if np.size(list(saved_cells.keys())) > 0:
+                fid.write('CELL_DATA {:d} \n'.format(num_of_elt + num_of_edges))
+                for key in list(saved_cells.keys()):
+                    fid.write('SCALARS {} float 1 \n'.format(key))
+                    fid.write('LOOKUP_TABLE default\n')
+                    for cell in range(0, num_of_elt):
+                        outval = cleanOutliers(saved_cells[key][cell])
+                        fid.write('{:f}\n'.format(outval))
+                    for edge in range(0, num_of_edges):
+                        fid.write('{:f}\n'.format(-9999.999))
+            # }}}
+            # Now writting edge variables {{{
+            if np.size(list(saved_edges.keys())) > 0:
+                for key in list(saved_edges.keys()):
+                    fid.write('SCALARS {} float 1 \n'.format(key))
+                    fid.write('LOOKUP_TABLE default\n')
+                    for cell in range(0, num_of_elt):
+                        fid.write('{:f}\n'.format(-9999.999))
+                    for edge in range(0, num_of_edges):
+                        outval = cleanOutliers(saved_edges[key][edge])
+                        fid.write('{:f}\n'.format(outval))
+    # }}}
+
+
+def cleanOutliers(Val):
+    #paraview does not like NaN, replacing
+    if np.isnan(Val):
+        CleanVal = -9999.999
+    #also checking for very small value that mess up
+    elif (abs(Val) < 1.0e-20):
+        CleanVal = 0.0
+    else:
+        CleanVal = Val
+    return CleanVal
+
+
+class BadDimension(Exception):
+    """The required dimension is not supported yet."""
+
+
+class BadOption(Exception):
+    """The given option does not exist."""
+
+
+class ClipError(Exception):
+    """Error while trying to clip the domain."""
Index: /issm/trunk/src/m/paraview/exportVTU.py
===================================================================
--- /issm/trunk/src/m/paraview/exportVTU.py	(revision 28013)
+++ /issm/trunk/src/m/paraview/exportVTU.py	(revision 28013)
@@ -0,0 +1,723 @@
+import numpy as np
+from base64 import b64encode
+from os import path, remove, mkdir
+from glob import glob
+
+
+def exportVTU(filename, md, *args, enveloppe=False, fmtout="binary", **kwargs):
+    '''
+    vtu export
+    function exportVTU(filename, md)
+    Exports resluts in XML based vtu format for visualisation in Paraview.
+    Hopefully it is based on the treatment for export VTK and only the output part is modified.
+    (only work for triangle and wedges based on their number of nodes)
+
+    Usage:
+    exportVTU('FileName', md)
+    exportVTU('FileName', md, 'geometry', 'mesh')
+    exportVTU('FileName', md, 'geometry', 'mesh', enveloppe = True)
+
+    DirName is the name of the output directory, each timestep then has it
+    own file ('Timestep.vtkX.vtk') with X the number of the output step
+    enveloppe is an option keeping only the enveloppe of the md (it is False by default)
+
+    Options:
+        - clipping : allows to reduce your domain (cliping=[Xmin, Xmax, Ymin, Ymax])
+        - coarsetime : output one timestep every X (coarsetime=X, with X an integer)
+        - singletime : output only timestep X (singletime=X, with X an integer or -1 for last)
+
+    TODO: - make time easily accessible
+
+    Basile de Fleurian:
+    '''
+    #verbosity of the code, 0 is no messages, 5 is chatty
+    verbose = 0
+
+    #first check if the user asked for some options to be applied
+    for key in kwargs.keys():
+        if key not in ['clipping', 'coarsetime', 'singletime']:
+            raise BadOption('Provided option "{}" is not supported possibilities are : {}'.format(key, ['cliping', 'coarsetime', 'singletime']))
+
+    if 'coarsetime' in kwargs.keys() and 'singletime' in kwargs.keys():
+        raise BadOption("You can't specify both 'coarsetime' and 'singletime'")
+
+    # File checking and creation {{{
+    Dir = path.basename(filename)
+    if path.exists(filename):
+        print(('File {} allready exist'.format(filename)))
+        newname = input('Give a new name or "delete" to replace: ')
+        if newname == 'delete':
+            filelist = glob(filename + '/* ')
+            for oldfile in filelist:
+                remove(oldfile)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+            mkdir(filename)
+    else:
+        mkdir(filename)
+
+    # }}}
+
+    # make an alias for results {{{
+    if verbose > 3:
+        print('Getting accessory variables')
+    res_struct = md.results
+    moving_mesh = False
+    if(type(res_struct) != list):
+        #Getting all the solutions of the md
+        solnames = dict.keys(res_struct.__dict__)
+        num_of_timesteps = 1
+        #%building solutionstructure
+        for solution in solnames:
+            #looking for multiple time steps
+            try:
+                if len(res_struct.__dict__[solution]) > num_of_timesteps:
+                    num_of_timesteps = len(res_struct.__dict__[solution])
+                    num_of_timesteps = int(num_of_timesteps)
+                    #If Suface is in the resluts we considet that we have a moving mesh
+                    if 'Surface' in dict.keys(res_struct.__dict__[solution][0].__dict__):
+                        moving_mesh = True
+            except TypeError:
+                continue
+    else:
+        num_of_timesteps = 1
+    # }}}
+
+    # get the mesh related variables {{{
+    if verbose > 3:
+        print('Now treating  the mesh')
+    #first get the general things
+    dim = int(md.mesh.domaintype()[0])
+    every_nodes = md.mesh.numberofvertices
+    every_cells = md.mesh.numberofelements
+    try:
+        every_edges = md.mesh.numberofedges
+    except AttributeError:
+        #3D meshes do not have edges
+        every_edges = 0
+
+    if np.shape(md.mesh.elements)[1] == 3 or enveloppe:
+        point_per_elt = 3
+        celltype = 5  #triangles
+    elif np.shape(md.mesh.elements)[1] == 6:
+        point_per_elt = 6
+        celltype = 13  #wedges
+    else:
+        raise BadDimension('exportVTU does not support your element type')
+
+    #only keep the envelope and not the bulk of the results.
+    if enveloppe:  #Treating enveloppe{{{
+        if dim == 3:
+            mesh_alti = '0'
+            is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface)
+            enveloppe_index = np.where(is_enveloppe)[0]
+            convert_index = np.nan * np.ones(np.shape(md.mesh.x))
+            convert_index = np.asarray([[i, np.where(enveloppe_index == i)[0][0]] for i, val in enumerate(convert_index) if any(enveloppe_index == i)])
+
+            num_of_points = np.size(enveloppe_index)
+            points = np.column_stack((md.mesh.x[enveloppe_index],
+                                      md.mesh.y[enveloppe_index],
+                                      md.mesh.z[enveloppe_index]))
+
+            num_of_elt = np.size(np.where(np.isnan(md.mesh.lowerelements))) + np.size(np.where(np.isnan(md.mesh.upperelements)))
+            connect = md.mesh.elements[np.where(is_enveloppe[md.mesh.elements - 1])].reshape(int(num_of_elt), 3) - 1
+            for elt in range(0, num_of_elt):
+                connect[elt, 0] = convert_index[np.where(convert_index == connect[elt, 0])[0], 1][0]
+                connect[elt, 1] = convert_index[np.where(convert_index == connect[elt, 1])[0], 1][0]
+                connect[elt, 2] = convert_index[np.where(convert_index == connect[elt, 2])[0], 1][0]
+
+            num_of_edges = every_edges  #looks like edges is only defined on the 2d mesh
+            if num_of_edges > 0:
+                edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+
+        else:
+            raise BadDimension("exportVTU can't get an enveloppe for  dimension {}".format(dim))
+    # }}}
+
+    else:  #treating mesh{{{
+        #we get all the mesh, mainly defining dummies
+        num_of_elt = every_cells
+        connect = md.mesh.elements - 1
+        num_of_edges = every_edges
+        if num_of_edges > 0:
+            edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+        enveloppe_index = np.arange(0, np.size(md.mesh.x))
+        num_of_points = every_nodes
+        if dim == 2:
+            mesh_alti = input('''This is a 2D model, what should be the 3rd dimension of the mesh :
+                                        1 : md.geometry.surface
+                                        2 : md.geometry.base
+                                        3 : md.geometry.bed
+                                        4 : 0
+                                        5 : Custom\n''')
+            if mesh_alti == '1':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+            elif mesh_alti == '2':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.base))
+            elif mesh_alti == '3':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.bed))
+            elif mesh_alti == '4':
+                points = np.column_stack((md.mesh.x, md.mesh.y, 0. * md.mesh.x))
+            elif mesh_alti == '5':
+                alti_field = input("Which field should be used as 3rd dimension: ")
+                alti_var = eval(alti_field)
+                if np.shape(np.squeeze(alti_var)) == np.shape(md.mesh.x):
+                    points = np.column_stack((md.mesh.x, md.mesh.y, np.squeeze(alti_var)))
+                else:
+                    raise BadDimension('field given for 3rd dimension should be defined on vertices {} is not.'.format(alti_field))
+            else:
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+        elif dim == 3:
+            mesh_alti = '0'
+            points = np.column_stack((md.mesh.x, md.mesh.y, md.mesh.z))
+        else:
+            raise BadDimension('exportVTU does not support dimension {}'.format(dim))
+    # }}}
+
+    if 'clipping' in kwargs.keys():
+        if kwargs['clipping'] is not None:
+            # first get the boundaries and check them
+            [Xmin, Xmax, Ymin, Ymax] = kwargs['clipping']
+            if Xmin > Xmax:
+                raise ClipError('Xmax ({}) should be larger than Xmin ({})'.format(Xmax, Xmin))
+            if Ymin > Ymax:
+                raise ClipError('Ymax ({}) should be larger than Ymin ({})'.format(Ymax, Ymin))
+            if Xmin > np.nanmax(points[:, 0]) or Xmax < np.nanmin(points[:, 0]):
+                raise ClipError('Your X boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Xmin, Xmax, np.nanmin(points[:, 0]), np.nanmax(points[:, 0])))
+            if Ymin > np.nanmax(points[:, 1]) or Ymax < np.nanmin(points[:, 1]):
+                raise ClipError('Your Y boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Ymin, Ymax, np.nanmin(points[:, 1]), np.nanmax(points[:, 1])))
+
+            #boundaries should be fine lets do stuff
+            InX = np.where(np.logical_and(points[:, 0] >= Xmin, points[:, 0] <= Xmax))
+            InY = np.where(np.logical_and(points[:, 1] >= Ymin, points[:, 1] <= Ymax))
+
+            Isinside = np.zeros(np.shape(points)[0], dtype=bool)
+            clip_convert_index = np.nan * np.ones(np.shape(points)[0])
+
+            #define the vertices that are within clipping window
+            Inclipping = np.intersect1d(InX, InY)
+            Isinside[Inclipping] = True
+            points = points[Inclipping, :]
+            num_of_points = np.shape(points)[0]
+
+            #go thorough the elements and keep those for which one node is in the clipped arrea
+            clipconnect = np.asarray([], dtype=int)
+            for elt in connect:
+                if set(elt).issubset(Inclipping):
+                    clipconnect = np.append(clipconnect, elt, axis=0)
+
+            #reshape
+            num_of_elt = int(np.size(clipconnect) / 3)
+            connect = clipconnect.reshape(num_of_elt, 3)
+
+            clip_convert_index = np.asarray([[i, np.where(Inclipping == i)[0][0]] for i, val in enumerate(clip_convert_index) if any(Inclipping == i)])
+            enveloppe_index = enveloppe_index[clip_convert_index[:, 0]]
+
+            #convert indexing and exclude elements that are partly outside of the region
+            for elt in range(0, num_of_elt):
+                try:
+                    connect[elt, 0] = clip_convert_index[np.where(clip_convert_index == connect[elt, 0])[0], 1][0]
+                except IndexError:
+                    connect[elt, 0] = -1
+                try:
+                    connect[elt, 1] = clip_convert_index[np.where(clip_convert_index == connect[elt, 1])[0], 1][0]
+                except IndexError:
+                    connect[elt, 1] = -1
+                try:
+                    connect[elt, 2] = clip_convert_index[np.where(clip_convert_index == connect[elt, 2])[0], 1][0]
+                except IndexError:
+                    connect[elt, 2] = -1
+
+            connect = connect[np.where(connect != -1)[0], :]
+            num_of_elt = np.shape(connect)[0]
+
+            if num_of_edges > 0:
+                clipedges = np.asarray([], dtype=int)
+                for edge in edges:
+                    if set(edge).issubset(Inclipping):
+                        clipedges = np.append(clipedges, edge, axis=0)
+
+                num_of_edges = int(np.size(clipedges) / 2)
+                edges = clipedges.reshape(num_of_edges, 2)
+
+                for edge in range(0, num_of_edges):
+                    try:
+                        edges[edge, 0] = clip_convert_index[np.where(clip_convert_index == edges[edge, 0])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 0] = -1
+                    try:
+                        edges[edge, 1] = clip_convert_index[np.where(clip_convert_index == edges[edge, 1])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 1] = -1
+                edges = edges[np.where(edges != -1)[0], :]
+                num_of_edges = np.shape(edges)[0]
+
+    # }}}
+
+    # write header and mesh {{{
+    if verbose > 3:
+        print('Now starting to write stuff')
+
+    if 'coarsetime' in kwargs.keys():
+        steplist = range(0, num_of_timesteps, kwargs['coarsetime'])
+    elif 'singletime' in kwargs.keys():
+        steplist = [kwargs['singletime']]
+    else:
+        steplist = range(0, num_of_timesteps)
+
+    for step in steplist:
+        if verbose > 2:
+            print('Writing for step {}'.format(step))
+
+        with open(('{}/{}_{}.vtu').format(filename, Dir, step), 'w+') as fid:
+            fid.write('<?xml version="1.0"?>\n')
+            fid.write('<VTKFile type="UnstructuredGrid" version="1.0" byte_order="LittleEndian">\n')
+            fid.write('  <UnstructuredGrid>\n')
+            fid.write('    <Piece NumberOfPoints="{}"  NumberOfCells="{}">\n'.format(num_of_points, num_of_elt + num_of_edges))
+            tensors = []
+            vectors = []
+            scalars = []
+            for sol in solnames:
+                #getting the  fields in the solution
+                if type(res_struct.__dict__[sol]).__name__ == 'solution':
+                    spe_res_struct = res_struct.__dict__[sol].__getitem__(0)
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif type(res_struct.__dict__[sol]).__name__ in ['solutionstep', 'results']:
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                else:
+                    print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                loctensors, locvectors, locscalars = SortFields(fieldnames)
+                tensors.extend(loctensors)
+                vectors.extend(locvectors)
+                scalars.extend(locscalars)
+            for other in args:
+                other_struct = md.__dict__[other]
+                othernames = list(dict.keys(other_struct.__dict__))
+
+                loctensors, locvectors, locscalars = SortFields(othernames)
+                tensors.extend(loctensors)
+                vectors.extend(locvectors)
+                scalars.extend(locscalars)
+
+            fid.write('      <PointData Scalars="{}"'.format(scalars))
+            if len(vectors) > 0:
+                fid.write(' Vectors="{}"'.format(vectors[:-1]))
+            if len(tensors) > 0:
+                fid.write(' Tensors="{}"'.format(tensors[:-2]))
+            fid.write('>\n')
+
+            saved_cells = {}
+            saved_edges = {}
+            saved_const = {}
+            timestep = step
+
+            # }}}
+            # {{{loop over the different solution structures
+            # first check if there are solutions to grab
+            for sol in solnames:
+                treated_res = []
+                #dealing with results on different timesteps
+                try:
+                    if(len(res_struct.__dict__[sol]) > timestep):
+                        timestep = step
+                    else:
+                        timestep = np.size(res_struct.__dict__[sol])
+                except TypeError:
+                    #result as no len() so no timesteps
+                    timestep = 1
+
+                #getting the  fields in the solution
+                if(type(res_struct.__dict__[sol]).__name__ == 'solution'):
+                    spe_res_struct = res_struct.__dict__[sol].__getitem__(timestep)
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif(type(res_struct.__dict__[sol]).__name__ == 'solutionstep'):
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif(type(res_struct.__dict__[sol]).__name__ == 'results'):  #this is a result without steps
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                else:
+                    print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                tensors, vectors, ScalarNames = SortFields(fieldnames)
+
+                #check which field is a real result and print
+                for field in fieldnames:
+                    if field in treated_res:
+                        if verbose > 2:
+                            print("{}.{} is already done".format(sol, field))
+                        continue
+
+                    elif field in vectors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a vector ".format(sol, field))
+                        TreatVector(fid, fmtout, spe_res_struct, sol, field, treated_res, enveloppe_index)
+
+                    elif field in tensors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a tensor ".format(sol, field))
+                        TreatTensor(fid, fmtout, spe_res_struct, sol, field, treated_res, enveloppe_index)
+
+                    else:
+                        if np.size(spe_res_struct.__dict__[field]) == 1:
+                            if verbose > 2:
+                                print("Treating {}.{} as a constant ".format(sol, field))
+                            if field == 'time':
+                                current_time = spe_res_struct.__dict__[field]
+                            saved_const[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        elif np.size(spe_res_struct.__dict__[field]) == every_nodes:
+                            if verbose > 2:
+                                print("Treating {}.{} as a node variable ".format(sol, field))
+                            TreatScalar(fid, fmtout, spe_res_struct, sol, field, enveloppe_index)
+
+                        elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_cells:
+                            saved_cells[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_edges and num_of_edges > 0:
+                            saved_edges[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        else:
+                            print("format for field {}.{} is not suported, field is skipped".format(sol, field))
+            # }}}
+            # loop on arguments, if something other than result is asked, do it now {{{
+            for other in args:
+                treated_res = []
+                if verbose > 3:
+                    print("Now treating {}".format(other))
+                other_struct = md.__dict__[other]
+                othernames = list(dict.keys(other_struct.__dict__))
+                tensors, vectors, ScalarNames = SortFields(othernames)
+                for field in othernames:
+                    if field in treated_res:
+                        if verbose > 2:
+                            print("{}.{} is already done".format(other, field))
+                        continue
+                    elif field in vectors:
+                        TreatVector(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index)
+
+                    elif field in tensors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a tensor ".format(sol, field))
+                        TreatTensor(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index)
+                        #now treating fields that are not vectors or tensors
+
+                    else:
+                        if np.size(other_struct.__dict__[field]) == 1:
+                            if verbose > 2:
+                                print("Treating {}.{} as an constant ".format(other, field))
+                            if field == 'time':
+                                current_time = other_struct.__dict__[field]
+                            saved_const[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        elif np.size(other_struct.__dict__[field]) == every_nodes:
+                            if verbose > 2:
+                                print("Treating {}.{} as a node variable ".format(other, field))
+                            TreatScalar(fid, fmtout, other_struct, other, field, enveloppe_index)
+
+                        elif np.shape(other_struct.__dict__[field])[0] == every_nodes + 1:
+                            if verbose > 3:
+                                print("Treating {}.{} as a node forcing variable".format(other, field))
+                            TreatForcing(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index, current_time)
+
+                        elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_cells:
+                            if verbose > 3:
+                                print("Treating {}.{} as a cell variable".format(other, field))
+                            saved_cells[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_edges and num_of_edges > 0:
+                            if verbose > 3:
+                                print("Treating {}.{} as an edge variable".format(other, field))
+                            saved_edges[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        else:
+                            print("format for field {}.{} is not suported, field is skipped".format(other, field))
+            fid.write('      </PointData>\n')
+            # }}}
+            # Now writting cell variables {{{
+            if np.size(list(saved_cells.keys())) > 0 or np.size(list(saved_edges.keys())) > 0:
+                cellkeys = list(saved_cells.keys())
+                edgekeys = list(saved_edges.keys())
+                if len(cellkeys) > 0 and len(edgekeys) > 0:
+                    savekeys = list(saved_cells.keys())
+                    savekeys.extend(edgekeys)
+                elif len(cellkeys) > 0:
+                    savekeys = cellkeys
+                elif len(edgekeys) > 0:
+                    savekeys = edgekeys
+                if verbose > 3:
+                    print("Saving cell for {}".format(savekeys))
+                fid.write('      <CellData Scalars="{}">\n'.format(savekeys))
+
+            if np.size(list(saved_cells.keys())) > 0:
+                for key in cellkeys:
+                    outval = saved_cells[key]
+                    if num_of_edges > 0:
+                        if fmtout == "binary":
+                            outval = np.append(outval, np.nan * np.ones((num_of_edges)))
+                        else:
+                            outval = np.append(outval, -9999.999 * np.ones((num_of_edges)))
+                    if verbose > 3:
+                        print("writing {} values of type {} for {}".format(len(outval), outval.dtype, key))
+
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(outval, fid, fmtout)
+                    fid.write('        </DataArray>\n')
+
+            # }}}
+            # Now writting edge variables {{{
+            if np.size(list(saved_edges.keys())) > 0:
+                for key in list(saved_edges.keys()):
+                    if fmtout == "binary":
+                        outval = np.nan * np.ones((num_of_elt))
+                    else:
+                        outval = -9999.999 * np.ones((num_of_elt))
+                    outval = np.append(outval, saved_edges[key])
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(outval, fid, fmtout)
+                    fid.write('        </DataArray>\n')
+            if np.size(list(saved_cells.keys())) > 0 or np.size(list(saved_edges.keys())) > 0:
+                fid.write('      </CellData>\n')
+            # }}}
+
+            # Now writting constants # {{{
+            if np.size(list(saved_const.keys())) > 0:
+                fid.write('      <FieldData>\n')
+                for key in list(saved_const.keys()):
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(saved_const[key], fid, fmtout)
+                    fid.write('        </DataArray>\n')
+                fid.write('      </FieldData>\n')
+            # }}}
+
+            #Mesh Treatment and write, it needs to loop to allow variable geometry {{{
+            #updating z for mesh evolution
+            if moving_mesh and mesh_alti == '1':
+                points[:, 2] = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Surface'][enveloppe_index])
+            elif moving_mesh and mesh_alti == '2':
+                points[:, 2] = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Base'][enveloppe_index])
+
+            #Now write points locations
+            fid.write('      <Points>\n')
+            fid.write('        <DataArray type="Float32" Name="Points" NumberOfComponents="3" format="{}">\n'.format(fmtout))
+            WriteIt(points, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('      </Points>\n')
+
+            #cells are a combination of element and edges
+            # we need node conectivity offsets and types
+            #offsets is the cummulative index of the last elemant of each cell (1 indexed)
+            flat_elt = connect.flatten()
+            elt_offset = np.arange(0, num_of_elt * point_per_elt, point_per_elt, dtype=np.int64) + point_per_elt
+            elt_type = celltype * np.ones((num_of_elt), dtype=np.uint8)
+            if num_of_edges > 0:
+                flat_edges = edges.flatten()
+                flat_cells = np.hstack((flat_elt, flat_edges))
+                edge_offset = np.arange(0, num_of_edges * 2, 2) + 2 + elt_offset[-1]
+                cell_offset = np.hstack((elt_offset, edge_offset))
+                edge_type = 3 * np.ones((num_of_edges), dtype=np.uint8)
+                cell_type = np.hstack((elt_type, edge_type))
+            else:
+                flat_cells = flat_elt
+                cell_offset = elt_offset
+                cell_type = elt_type
+
+            if verbose > 3:
+                print("""writing mesh structure:
+                                  connectivity of shape {}
+                                  cell offset of shape {}
+                                  cell types of shape{}""".format(np.shape(flat_cells), np.shape(cell_offset), np.shape(cell_type)))
+            #write cells Informations
+            fid.write('      <Cells>\n')
+            fid.write('        <DataArray type="Int64" Name="connectivity" format="{}">\n'.format(fmtout))
+            WriteIt(flat_cells, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('        <DataArray type="Int64" Name="offsets" format="{}">\n'.format(fmtout))
+            WriteIt(cell_offset, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('        <DataArray type="UInt8" Name="types" format="{}">\n'.format(fmtout))
+            WriteIt(cell_type, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('      </Cells>\n')
+            fid.write('    </Piece>\n')
+            fid.write('  </UnstructuredGrid>\n')
+            fid.write('</VTKFile>\n')
+            # }}}
+
+
+def SortFields(fieldnames):
+    #we check on sizes so there is a slight chance that logs can be picked as results, we remove them to avoid that
+    for trashfield in ['errlog', 'outlog']:
+        if trashfield in fieldnames:
+            fieldnames.remove(trashfield)
+
+    #Sorting scalars, vectors and tensors
+    tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']]
+    non_tensor = [field for field in fieldnames if field not in tensors]
+    vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z']]
+    #get the name of scalar fields remove, vectors, tensors and things that are not proper results
+    scalars = [field for field in fieldnames if field not in tensors + vectors]
+    dump = ["ConvergenceNumSteps", "step", "time"]
+    for trash in dump:
+        try:
+            scalars.remove(trash)
+        except ValueError:
+            [scalars.remove(name) for name in scalars if trash in name]
+            continue
+    #clean up vector and tensors that might be here and should not
+    # we check that at least two of the vector component are here
+    for namelist in [vectors, tensors]:
+        for name in list(namelist):
+            coord = name[-1]
+            if coord == 'x' and name[:-1] + 'y' in namelist:
+                continue
+            elif coord == 'y' and name[:-1] + 'x' in namelist:
+                continue
+            elif coord == 'z' and name[:-1] + 'x' in namelist:
+                continue
+            else:
+                scalars.extend([name])
+                namelist.remove(name)
+    return tensors, vectors, scalars
+
+
+def TreatScalar(fid, fmtout, structure, structname, fieldname, enveloppe_index):
+    array = np.squeeze(structure.__dict__[fieldname][enveloppe_index])
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="1" format="{}">\n'.format(".".join((structname, fieldname)), fmtout))
+    WriteIt(array, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatVector(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index):
+    Vxstruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'x'])
+    Vystruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'y'])
+    Vx = Vxstruct[enveloppe_index]
+    Vy = Vystruct[enveloppe_index]
+    treated_res += [fieldname[:-1] + 'x', fieldname[:-1] + 'y']
+    try:
+        Vzstruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'z'])
+        treated_res += [fieldname[:-1] + 'z']
+        Vz = Vzstruct[enveloppe_index]
+    except KeyError:
+        Vz = np.zeros(np.shape(Vx))
+    Vector = (np.vstack((Vx, Vy, Vz)).T).flatten()
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="3" format="{}">\n'.format(".".join((structname, fieldname[:-1])), fmtout))
+    WriteIt(Vector, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatTensor(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index):
+    Txxstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xx'])
+    Txystruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xy'])
+    Tyystruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'yy'])
+    treated_res += [fieldname[:-2] + 'xx', fieldname[:-2] + 'xy', fieldname[:-2] + 'yy']
+    Txx = Txxstruct[enveloppe_index]
+    Tyy = Tyystruct[enveloppe_index]
+    Txy = Txystruct[enveloppe_index]
+    try:
+        Tzzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'zz'])
+        Txzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xz'])
+        Tyzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'yz'])
+        treated_res += [fieldname[:-2] + 'zz', fieldname[:-2] + 'xz', fieldname[:-2] + 'yz']
+        Tzz = Tzzstruct[enveloppe_index]
+        Txz = Txzstruct[enveloppe_index]
+        Tyz = Tyzstruct[enveloppe_index]
+    except KeyError:
+        Tzz = np.zeros(np.shape(Txx))
+        Txz = np.zeros(np.shape(Txx))
+        Tyz = np.zeros(np.shape(Txx))
+
+    Tensor = (np.vstack((Txx, Tyy, Tzz, Txy, Tyz, Txz)).T).flatten()
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="6" format="{}">\n'.format(".".join((structname, fieldname[:-1])), fmtout))
+    WriteIt(Tensor, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatForcing(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index, current_time):
+    #we are dealing with a forcing of some kind.
+    forcing_time = structure.__dict__[fieldname][-1, :]
+    if any(forcing_time == current_time):
+        forcing_index = np.where(forcing_time == current_time)
+        forcing_val = structure.__dict__[fieldname][:, forcing_index]
+    elif forcing_time[0] > current_time:
+        forcing_val = structure.__dict__[fieldname][:, 0]
+    elif forcing_time[-1] < current_time:
+        forcing_val = structure.__dict__[fieldname][:, -1]
+    else:
+        forcing_index = np.where(forcing_time < current_time)[-1][-1]
+        delta_time = forcing_time[forcing_index + 1] - forcing_time[forcing_index]  #compute forcing Dt
+        delta_current = current_time - forcing_time[forcing_index]  # time since last forcing
+        ratio = delta_current / delta_time  #compute weighting factor for preceding forcing vallue
+        forcing_evol = (structure.__dict__[fieldname][:, forcing_index + 1] - structure.__dict__[fieldname][:, forcing_index]) * ratio
+        forcing_val = structure.__dict__[fieldname][:, forcing_index] + forcing_evol
+    array = forcing_val[enveloppe_index]
+    # and now write it down
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="1" format="{}">\n'.format(".".join((structname, fieldname)), fmtout))
+    WriteIt(array, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def WriteIt(Data, fid, fmtout):
+    vtu_to_numpy_type = {
+        "Float32": np.dtype(np.float32),
+        "Float64": np.dtype(np.float64),
+        "Int8": np.dtype(np.int8),
+        "Int16": np.dtype(np.int16),
+        "Int32": np.dtype(np.int32),
+        "Int64": np.dtype(np.int64),
+        "UInt8": np.dtype(np.uint8),
+        "UInt16": np.dtype(np.uint16),
+        "UInt32": np.dtype(np.uint32),
+        "UInt64": np.dtype(np.uint64),
+    }
+    if fmtout == 'binary':
+        try:
+            datatype = Data.dtype
+        except AttributeError:
+            datatype = type(Data)
+        if datatype == np.float64:
+            Data = np.float32(Data)
+        try:
+            data_bytes = Data.tobytes()
+        except AttributeError:
+            data_bytes = np.asarray(Data).tobytes()
+        # collect header
+        header = np.array(len(data_bytes), dtype=vtu_to_numpy_type['UInt32'])
+        fid.write(b64encode(header.tobytes() + data_bytes).decode())
+        fid.write('\n')
+        #cell_type.tofile(fid)
+    elif fmtout == 'ascii':
+        np.savetxt(fid, Data, fmt='%g')
+
+
+def cleanOutliers(Val, fmtout):
+    #paraview does not like NaN in ascii files, replacing
+    if np.isnan(Val):
+        if fmtout == 'ascii':
+            CleanVal = -9999.999
+
+    #also checking for very small value that mess up
+    elif (abs(Val) < 1.0e-20):
+        CleanVal = 0.0
+    else:
+        CleanVal = Val
+    return CleanVal
+
+
+class BadDimension(Exception):
+    """The required dimension is not supported yet."""
+
+
+class BadOption(Exception):
+    """The given option does not exist."""
+
+
+class ClipError(Exception):
+    """Error while trying to clip the domain."""
Index: /issm/trunk/src/m/plot/export_gl.py
===================================================================
--- /issm/trunk/src/m/plot/export_gl.py	(revision 28012)
+++ /issm/trunk/src/m/plot/export_gl.py	(revision 28013)
@@ -65,5 +65,5 @@
     model.contourz2 = list(map(lambda r, lat: r * math.sin(math.radians(lat)), R2, contour_lat2))
 
-    #}}}
+    # }}}
     #Deal with mesh and results {{{
     print('getting mesh')
@@ -123,3 +123,3 @@
     print('writing to file')
     writejsfile(directory + databasename + '.js', model, databasename)
-    #}}}
+    # }}}
Index: /issm/trunk/src/m/plot/glstress.m
===================================================================
--- /issm/trunk/src/m/plot/glstress.m	(revision 28013)
+++ /issm/trunk/src/m/plot/glstress.m	(revision 28013)
@@ -0,0 +1,71 @@
+
+%Find Elements that are crossed by the GL
+index = md.mesh.elements;
+pos_gle = find(min(md.mask.ocean_levelset(index),[],2)<0 & max(md.mask.ocean_levelset(index),[],2)>0);
+
+%Recover stresses
+md=mechanicalproperties(md, md.results.StressbalanceSolution.Vx, md.results.StressbalanceSolution.Vy);
+
+%Hilmar's horrible cmap
+cmap = jet(80);
+cmap(60-5:60+5,:) = 0;
+
+%Allocate thetaN
+thetaN  = zeros(size(pos_gle));
+thetaNx = zeros(size(pos_gle));
+thetaNy = zeros(size(pos_gle));
+
+count = 1;
+for el=pos_gle'
+	%Find segment that has 2 grounded nodes in this element
+	pos = find(md.mask.ocean_levelset(index(el,:))>0);
+
+	%Skip element if it has less than 2 grounded nodes
+	if numel(pos)~=2; continue; end
+
+	%Find edge that is grounded
+	x1 = md.mesh.x(index(el,pos(1)));
+	y1 = md.mesh.y(index(el,pos(1)));
+	x2 = md.mesh.x(index(el,pos(2)));
+	y2 = md.mesh.y(index(el,pos(2)));
+	hold on; plot([x1 x2],[y1 y2],'-c');
+
+	%Find the normal
+	if pos(1)==1 && pos(2)==3
+		nx = (y2-y1);
+		ny = -(x2-x1);
+	else
+		nx = -(y2-y1);
+		ny = +(x2-x1);
+	end
+	hold on; plot(mean([x1 x2])+[0 nx],mean([y1 y2])+[0 ny],'-g');
+	n=[nx;ny]/sqrt(nx^2+ny^2);
+
+	%Build sigma_nn
+	tau_xx = md.results.deviatoricstress.xx(el);
+	tau_yy = md.results.deviatoricstress.yy(el);
+	tau_xy = md.results.deviatoricstress.xy(el);
+	R = [2*tau_xx+tau_yy   tau_xy;tau_xy   2*tau_yy+tau_xx];
+	N = n'*R*n;
+
+	%Water stress only
+	H = 0.5*(md.geometry.thickness(index(el,pos(1))) + md.geometry.thickness(index(el,pos(2))));
+	g = md.constants.g;
+	rho_i = md.materials.rho_ice;
+	rho_w = md.materials.rho_water;
+	N0 = 0.5*g*rho_i.*(1-rho_i./rho_w).*H;
+
+	%Plot thetaN
+	thetaN(count)  = N/N0;
+	thetaNx(count) = mean([x1 x2]);
+	thetaNy(count) = mean([y1 y2]);
+	count = count+1;
+end
+
+%Cleanup unused values
+thetaN(count:end) = [];
+thetaNx(count:end) = [];
+thetaNy(count:end) = [];
+
+disp('DONE');
+plot_scatter(thetaNx,thetaNy,thetaN,'caxis',[-.5 1.5],'colormap',cmap,'MarkerSize',10);
Index: /issm/trunk/src/m/plot/plot_basaldrag.m
===================================================================
--- /issm/trunk/src/m/plot/plot_basaldrag.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_basaldrag.m	(revision 28013)
@@ -16,15 +16,12 @@
 end
 
-%compute exponents
-s=averaging(md,1./md.friction.p,0);
-r=averaging(md,md.friction.q./md.friction.p,0);
-
-ub_mag=sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
-drag_mag=(max(md.constants.g*(md.materials.rho_ice*md.geometry.thickness+md.materials.rho_water*md.geometry.base),0)).^r.*(md.friction.coefficient).^2.*ub_mag.^s/1000;
+tau_b    = basalstress(md);
+drag_mag = tau_b/1000;
+ub_mag   = sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
 sig=1;
 
 %compute horizontal velocity 
 if strcmpi(type,'basal_drag')
-	ub=sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
+	ub = ub_mag;
 	title_str='Basal drag [kPa]';
 elseif strcmpi(type,'basal_dragx')
Index: /issm/trunk/src/m/plot/plot_coastlines.py
===================================================================
--- /issm/trunk/src/m/plot/plot_coastlines.py	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_coastlines.py	(revision 28013)
@@ -6,5 +6,5 @@
 
 
-def plot_coastlines(mesh, *args): #{{{
+def plot_coastlines(mesh, *args):  # {{{
     # Define coastline #{{{
     coast = np.array([
@@ -9889,5 +9889,5 @@
         coast[:, 1] - 360
     ])
-    #}}}
+    # }}}
 
     if len(args) == 1:
@@ -9920,3 +9920,3 @@
             else:
                 xlim(options.getfieldvalue('xlim', [-180, 180]))
-#}}}
+# }}}
Index: /issm/trunk/src/m/plot/plot_gridded.m
===================================================================
--- /issm/trunk/src/m/plot/plot_gridded.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_gridded.m	(revision 28013)
@@ -14,5 +14,8 @@
 if islevelset
 	levelset = getfieldvalue(options,'levelset');
-	[levelset datatype]=processdata(md,levelset,options);
+	options2 = copy(options);
+	options2.removefield('caxis',false);
+	options2.removefield('log',false);
+	[levelset datatype]=processdata(md,levelset,options2);
 end
 
@@ -25,4 +28,14 @@
 xlim=getfieldvalue(options,'xlim',[min(x) max(x)]);
 ylim=getfieldvalue(options,'ylim',[min(y) max(y)]);
+
+isAxis = exist(options, 'axis');
+if isAxis
+	myaxis = getfieldvalue(options,'axis');
+	if ~ischar(myaxis)
+		xlim = [myaxis(1), myaxis(2)];
+		ylim = [myaxis(3), myaxis(4)];
+	end
+end
+
 postx=getfieldvalue(options,'posting',diff(xlim)/1000);
 posty=getfieldvalue(options,'posting',diff(ylim)/1000);
Index: /issm/trunk/src/m/plot/plot_highlightelements.m
===================================================================
--- /issm/trunk/src/m/plot/plot_highlightelements.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_highlightelements.m	(revision 28013)
@@ -13,4 +13,5 @@
 [x y z elements is2d isplanet]=processmesh(md,[],options);
 [elementnumbers datatype]=processdata(md,[1:md.mesh.numberofelements]',options);
+edgecolor = getfieldvalue(options,'EdgeColor','black');
 
 %plot
@@ -19,39 +20,39 @@
 	%plot mesh 
 	A=elements(:,1); B=elements(:,2); C=elements(:,3);
-	patch( 'Faces', [A B C], 'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
+	patch( 'Faces', [A B C], 'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
 
 	%Highlight
 	pos=getfieldvalue(options,'highlight',[]);
 	A=elements(pos,1); B=elements(pos,2); C=elements(pos,3);
-	patch( 'Faces', [A B C], 'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
+	patch( 'Faces', [A B C], 'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
 else
 	if size(elements,2)==6, %prisms
 		%plot mesh 
 		A=elements(:,1); B=elements(:,2); C=elements(:,3); D=elements(:,4); E=elements(:,5); F=elements(:,6);
-		patch( 'Faces', [A B C],  'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
-		patch( 'Faces', [D E F],  'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
-		patch( 'Faces', [A B E D],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
-		patch( 'Faces', [B E F C],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
-		patch( 'Faces', [C A D F],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor','black');
+		patch( 'Faces', [A B C],  'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces', [D E F],  'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces', [A B E D],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces', [B E F C],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces', [C A D F],'Vertices', [x y z],'FaceVertexCData', [1 1 1],'FaceColor','none','EdgeColor',edgecolor);
 
 		%Highlight
 		A=elements(pos,1); B=elements(pos,2); C=elements(pos,3); D=elements(pos,4); E=elements(pos,5); F=elements(pos,6);
-		patch( 'Faces', [A B C],  'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces', [D E F],  'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces', [A B E D],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces', [B E F C],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces', [C A D F],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
+		patch( 'Faces', [A B C],  'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces', [D E F],  'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces', [A B E D],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces', [B E F C],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces', [C A D F],'Vertices', [x y z],'FaceVertexCData', [0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
 	elseif size(elements,2)==4, %tetras
 		A=elements(:,1); B=elements(:,2); C=elements(:,3); D=elements(:,4);
-		patch( 'Faces',[A B C],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor','black');
-		patch( 'Faces',[A B D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor','black');
-		patch( 'Faces',[B C D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor','black');
-		patch( 'Faces',[C A D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor','black');
+		patch( 'Faces',[A B C],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces',[A B D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces',[B C D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor',edgecolor);
+		patch( 'Faces',[C A D],'Vertices', [x y z],'FaceVertexCData',zeros(size(x)),'FaceColor','none','EdgeColor',edgecolor);
 		%Highlight
 		A=elements(pos,1); B=elements(pos,2); C=elements(pos,3); D=elements(pos,4);
-		patch( 'Faces',[A B C],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces',[A B D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces',[B C D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
-		patch( 'Faces',[C A D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor','black');
+		patch( 'Faces',[A B C],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces',[A B D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces',[B C D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
+		patch( 'Faces',[C A D],'Vertices', [x y z],'FaceVertexCData',[0.9 0.5 0.5],'FaceColor','flat','EdgeColor',edgecolor);
 	else
 		error('Not supported');
Index: /issm/trunk/src/m/plot/plot_manager.m
===================================================================
--- /issm/trunk/src/m/plot/plot_manager.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_manager.m	(revision 28013)
@@ -188,5 +188,4 @@
 if exist(options,'asymsubplot')
 	id=getfieldvalue(options,'asymsubplot',i);
-	subplot(nlines,ncols,id);
 	subplotmodel(nlines,ncols,id,options);
 else
Index: /issm/trunk/src/m/plot/plot_overlay.m
===================================================================
--- /issm/trunk/src/m/plot/plot_overlay.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_overlay.m	(revision 28013)
@@ -42,4 +42,9 @@
 	xlim=getfieldvalue(options,'xlim',[min(x) max(x)])/getfieldvalue(options,'unit',1);
 	ylim=getfieldvalue(options,'ylim',[min(y) max(y)])/getfieldvalue(options,'unit',1);
+	if exist(options, 'axis');
+		myaxis = getfieldvalue(options,'axis');
+		xlim = [myaxis(1), myaxis(2)];
+		ylim = [myaxis(3), myaxis(4)];
+	end
 	options=addfielddefault(options,'xlim',xlim);
 	options=addfielddefault(options,'ylim',ylim);
@@ -48,17 +53,20 @@
 contrast = getfieldvalue(options,'contrast',1);  
 radar    = md.radaroverlay.pwr;
-if size(radar,3)>1,
-	disp('WARNING: color image converted to greyscale intensity image');
-	if strcmp(class(radar),'uint8'),
-		radar=double(sum(radar,3))/(255*3);
-	else
-		radar=sum(radar,3)/3;
+
+if ~radaronly
+	if size(radar,3)>1,
+		disp('WARNING: color image converted to greyscale intensity image');
+		if strcmp(class(radar),'uint8'),
+			radar=double(sum(radar,3))/(255*3);
+		else
+			radar=sum(radar,3)/3;
+		end
 	end
+	if getfieldvalue(options,'backgroundbtw',0)
+		radar(find(radar==0))=1; %Change background from black to white
+	end
+	radar = radar.^(contrast);
+	radar = radar./max(radar(:));
 end
-if getfieldvalue(options,'backgroundbtw',0)
-	radar(find(radar==0))=1; %Change background from black to white
-end
-radar = radar.^(contrast);
-radar = radar./max(radar(:));
 
 if getfieldvalue(options,'backgroundbtw',0)
@@ -101,5 +109,5 @@
 
 %Special colormaps that require hsv treatment
-colorm=getfieldvalue(options,'colormap','Rignot');
+colorm=getfieldvalue(options,'colormap','parula');
 if strcmpi(colorm,'Rignot') | strcmpi(colorm,'Seroussi') | strcmpi(colorm,'redblue')
 	if strcmpi(colorm,'Rignot'),
@@ -133,18 +141,22 @@
 	image_rgb=hsv2rgb(image_hsv);
 else
-	colorm = getcolormap(options);
-	len    = size(colorm,1);
+	if radaronly
+		image_rgb = radar;
+	else
+		colorm = getcolormap(options);
+		len    = size(colorm,1);
 
-	ind = ceil((len-1)*(data_grid-data_min)/(data_max - data_min + eps) +1);
-	ind(find(ind>len))=len;
-	image_rgb=zeros(size(data_grid,1),size(data_grid,2),3);
-	r=colorm(:,1); image_rgb(:,:,1)=r(ind); clear r;
-	g=colorm(:,2); image_rgb(:,:,2)=g(ind); clear g;
-	b=colorm(:,3); image_rgb(:,:,3)=b(ind); clear b;
+		ind = ceil((len-1)*(data_grid-data_min)/(data_max - data_min + eps) +1);
+		ind(find(ind>len))=len;
+		image_rgb=zeros(size(data_grid,1),size(data_grid,2),3);
+		r=colorm(:,1); image_rgb(:,:,1)=r(ind); clear r;
+		g=colorm(:,2); image_rgb(:,:,2)=g(ind); clear g;
+		b=colorm(:,3); image_rgb(:,:,3)=b(ind); clear b;
 
-	%Now add radarmap
-	r = image_rgb(:,:,1).*radar;  r(data_nan) = radar(data_nan);  image_rgb(:,:,1) = r;  clear r;
-	g = image_rgb(:,:,2).*radar;  g(data_nan) = radar(data_nan);  image_rgb(:,:,2) = g;  clear g;
-	b = image_rgb(:,:,3).*radar;  b(data_nan) = radar(data_nan);  image_rgb(:,:,3) = b;  clear b;
+		%Now add radarmap
+		r = image_rgb(:,:,1).*radar;  r(data_nan) = radar(data_nan);  image_rgb(:,:,1) = r;  clear r;
+		g = image_rgb(:,:,2).*radar;  g(data_nan) = radar(data_nan);  image_rgb(:,:,2) = g;  clear g;
+		b = image_rgb(:,:,3).*radar;  b(data_nan) = radar(data_nan);  image_rgb(:,:,3) = b;  clear b;
+	end
 end
 
@@ -172,3 +184,3 @@
 options=addfielddefault(options,'axis','equal off'); % default axis
 applyoptions(md,data,options);
-drawnow
+%drawnow
Index: /issm/trunk/src/m/plot/plot_scatter.m
===================================================================
--- /issm/trunk/src/m/plot/plot_scatter.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_scatter.m	(revision 28013)
@@ -62,5 +62,5 @@
 		ylim = h.Limits;
 	end
-	palette=colormap();
+	palette=jet();%colormap();
 	numcolors=size(palette,1);
 	levels=round_ice(linspace(ylim(1),ylim(2),numcolors+1),2);
@@ -107,4 +107,7 @@
 end
 
+%Stop MATLAB's default interactivity
+disableDefaultInteractivity(gca);
+
 if ~alreadyplot,
 	% format the colorbar
Index: /issm/trunk/src/m/plot/plot_transient_movie.m
===================================================================
--- /issm/trunk/src/m/plot/plot_transient_movie.m	(revision 28012)
+++ /issm/trunk/src/m/plot/plot_transient_movie.m	(revision 28013)
@@ -74,5 +74,8 @@
 		range = [Inf -Inf];
 		for i=steps
-			[data datatype]=processdata(md,results(i).(field),options);
+			if isfield(results(i), 'MeshElements')
+				options=changefieldvalue(options,'amr', i);
+			end
+			[data datatype]=processdata(md,results(i).(field), options);
 			range(1) = min(range(1),min(data));
 			range(2) = max(range(2),max(data));
@@ -80,5 +83,4 @@
 		options=addfielddefault(options,'caxis',range);
 	end
-
 
 	%Process mesh once for all
@@ -91,4 +93,10 @@
 
 		if ~isempty(results(i).(field)),
+			%Process mesh if necessary
+			if isfield(results(i), 'MeshElements')
+				options=changefieldvalue(options,'amr', i);
+				[x y z elements is2d isplanet]=processmesh(md,results(i).(field),options);
+			end
+
 			%process data
 			[data datatype]=processdata(md,results(i).(field),options);
@@ -114,5 +122,9 @@
 			if exist(options,'icefront')
 				if dimension(md.mesh)==2
-					contours=isoline(md, results(i).MaskIceLevelset,'output','matrix');
+					if exist(options, 'amr')
+						contours=isoline(md, results(i).MaskIceLevelset,'output','matrix', 'amr', results(i));
+					else
+						contours=isoline(md, results(i).MaskIceLevelset,'output','matrix');
+					end
 				else
 					ice = project2d(md, results(i).MaskIceLevelset, 1);
Index: /issm/trunk/src/m/plot/processmesh.m
===================================================================
--- /issm/trunk/src/m/plot/processmesh.m	(revision 28012)
+++ /issm/trunk/src/m/plot/processmesh.m	(revision 28013)
@@ -104,4 +104,10 @@
 end
 
+%Quiver plot for elements?
+if size(data,2)>1 && size(data,1)==size(elements,1)
+	x = mean(x(elements),2);
+	y = mean(y(elements),2);
+end
+
 if isa(md,'planet'),
 	isplanet=1;
Index: /issm/trunk/src/m/plot/radarpower.m
===================================================================
--- /issm/trunk/src/m/plot/radarpower.m	(revision 28012)
+++ /issm/trunk/src/m/plot/radarpower.m	(revision 28013)
@@ -100,28 +100,58 @@
 	%}}}
 else %user provided image {{{
+
 	%user provided an image. check we also have overlay_xlim and overlay_ylim  options, to know what range of coordinates the image covers.
-	if (~exist(options,'overlay_xlim') | ~exist(options,'overlay_xlim')| ~exist(options,'overlay_xposting')| ~exist(options,'overlay_yposting')),
-		error('radarpower error message: please provide overlay_xlim, overlay_ylim, overlay_xposting and overlay_yposting options together with overlay_image option');
+	filename = getfieldvalue(options,'overlay_image');
+	[filepath,name,ext] = fileparts(filename);
+	if ~exist(filename)
+		error([filename ' not found']);
 	end
-	overlay_image=getfieldvalue(options,'overlay_image');
-	overlay_xlim=getfieldvalue(options,'overlay_xlim');
-	overlay_ylim=getfieldvalue(options,'overlay_ylim');
-	overlay_xposting=getfieldvalue(options,'overlay_xposting');
-	overlay_yposting=getfieldvalue(options,'overlay_yposting');
 
-	sizex=floor((x1-x0)/overlay_xposting);
-	sizey=floor((y1-y0)/overlay_yposting);
-	topleftx=floor((x0-overlay_xlim(1))/overlay_xposting); % x min
-	toplefty=floor((overlay_ylim(2)-y1)/overlay_yposting); % y max
+	%Is it a geotiff?
+	if strcmp(ext,'.tiff') || strcmp(ext,'.tif')
 
-	%Read and crop file
-	disp('Warning: expecting coordinates in polar stereographic (Std Latitude: 70ºN Meridian: 45º)');
-	im=imread(overlay_image);
-	%adjust contrast and brightness
-	%im=imadjust(im,[a b],[c d]);
-	im=im(toplefty:toplefty+sizey,topleftx:topleftx+sizex);
-	md.radaroverlay.pwr=double(flipud(im));
-	md.radaroverlay.x=(x0:(x1-x0)/(size(md.radaroverlay.pwr,2)-1):x1);
-	md.radaroverlay.y=(y0:(y1-y0)/(size(md.radaroverlay.pwr,1)-1):y1);
+		%Crop image from xylim
+		tempfilename='./temp.tif';
+		eval(['!gdal_translate -quiet -projwin ' num2str(x0) ' ' num2str(y1) ' ' num2str(x1) ' ' num2str(y0) ' ' filename ' ' tempfilename]);
+
+		%Read in temp.tif:
+		im=imread('temp.tif','TIFF');
+		%adjust contrast and brightness
+		%im=imadjust(im,[a b],[c d]);
+		pixelskip=max(1,ceil(posting/((x1-x0)/(size(im,2)))));
+		%md.radaroverlay.pwr=double(flipud(im(1:pixelskip:end,1:pixelskip:end,:)));
+		md.radaroverlay.pwr=double(im(1:pixelskip:end,1:pixelskip:end,:))/255;
+		md.radaroverlay.x=x0:(x1-x0)/(size(md.radaroverlay.pwr,2)-1):x1;
+		md.radaroverlay.y=y1:-(y1-y0)/(size(md.radaroverlay.pwr,1)-1):y0;
+
+		%Erase image or keep it?
+		if ~getfieldvalue(options,'keep_image',0),
+			delete(tempfilename);
+		end
+	else
+		if (~exist(options,'overlay_xlim') | ~exist(options,'overlay_xlim')| ~exist(options,'overlay_xposting')| ~exist(options,'overlay_yposting')),
+			error('radarpower error message: please provide overlay_xlim, overlay_ylim, overlay_xposting and overlay_yposting options together with overlay_image option');
+		end
+		overlay_xlim=getfieldvalue(options,'overlay_xlim');
+		overlay_ylim=getfieldvalue(options,'overlay_ylim');
+		overlay_xposting=getfieldvalue(options,'overlay_xposting');
+		overlay_yposting=getfieldvalue(options,'overlay_yposting');
+		overlay_image=getfieldvalue(options,'overlay_image');
+
+		sizex=floor((x1-x0)/overlay_xposting);
+		sizey=floor((y1-y0)/overlay_yposting);
+		topleftx=floor((x0-overlay_xlim(1))/overlay_xposting); % x min
+		toplefty=floor((overlay_ylim(2)-y1)/overlay_yposting); % y max
+
+		%Read and crop file
+		disp('Warning: expecting coordinates in polar stereographic (Std Latitude: 70ºN Meridian: 45º)');
+		im=imread(overlay_image);
+		%adjust contrast and brightness
+		%im=imadjust(im,[a b],[c d]);
+		im=im(toplefty:toplefty+sizey,topleftx:topleftx+sizex);
+		md.radaroverlay.pwr=double(flipud(im));
+		md.radaroverlay.x=(x0:(x1-x0)/(size(md.radaroverlay.pwr,2)-1):x1);
+		md.radaroverlay.y=(y0:(y1-y0)/(size(md.radaroverlay.pwr,1)-1):y1);
+	end
 end %}}}
 
Index: /issm/trunk/src/m/qmu/helpers.py
===================================================================
--- /issm/trunk/src/m/qmu/helpers.py	(revision 28012)
+++ /issm/trunk/src/m/qmu/helpers.py	(revision 28013)
@@ -6,12 +6,12 @@
 
 class struct(object):
-    """STRUCT class definition - An empty struct that can be assigned arbitrary 
+    """struct class definition - An empty struct that can be assigned arbitrary 
     attributes
     """
-    def __init__(self): #{{{
+    def __init__(self):  # {{{
         pass
-    #}}}
-
-    def __repr__(self): #{{{
+    # }}}
+
+    def __repr__(self):  # {{{
         s = ''
         for key, value in self.__dict__.items():
@@ -28,14 +28,14 @@
             s += '\n'
         return s
-    #}}}
-
-    def __len__(self): #{{{
+    # }}}
+
+    def __len__(self):  # {{{
         return len(self.__dict__.keys())
-    #}}}
+    # }}}
 
 
 class Lstruct(list):
-    """An empty struct that can be assigned arbitrary attributes but can also be
-    accesed as a list. Eg. x.y = 'hello', x[:] = ['w', 'o', 'r', 'l', 'd']
+    """An empty struct that can be assigned arbitrary attributes but can also 
+    be accessed as a list. Eg. x.y = 'hello', x[:] = ['w', 'o', 'r', 'l', 'd']
 
     Note that 'x' returns the array and x.__dict__ will only return attributes
@@ -48,5 +48,4 @@
 
     Examples:
-
         x = Lstruct(1, 2, 3, 4) -> [1, 2, 3, 4]
         x.a = 'hello'
@@ -88,6 +87,5 @@
 
 class OrderedStruct(object):
-    """
-    A form of dictionary-like structure that maintains the ordering in which
+    """A form of dictionary-like structure that maintains the ordering in which 
     its fields/attributes and their corresponding values were added.
 
@@ -97,11 +95,11 @@
 
     Example:
-        OrderedDict:  # a bit clumsy to use and look at
+        OrderedDict: # A bit clumsy to use and look at
             x['y'] = 5
 
-        OrderedStruct:  # nicer to look at, and works the same way
+        OrderedStruct: # Nicer to look at, and works the same way
             x.y = 5
             OR
-            x['y'] = 5  # supports OrderedDict-style usage
+            x['y'] = 5 # Supports OrderedDict-style usage
 
     Supports: len(x), str(x), for-loop iteration.
@@ -115,23 +113,23 @@
         x = OrderedStruct('y', 5, 'z', 6)
 
-    note below that the output fields as iterables are always in the same
+    Note below that the output fields as iterables are always in the same
     order as the inputs
 
-    x.keys() -> ['y', 'z']
-    x.values() -> [5, 6]
-    x.items() -> [('y', 6), ('z', 6)]
-    x.__dict__ -> [('y', 6), ('z', 6)]
-    vars(x) -> [('y', 6), ('z', 6)]
-
-    x.y -> 5
-    x['y'] -> 5
-    x.z -> 6
-    x['z'] -> 6
-
-    for i in x:  # same as x.items()
-        print i
-     ->
-    ('x', 5)
-    ('y', 6)
+        x.keys() -> ['y', 'z']
+        x.values() -> [5, 6]
+        x.items() -> [('y', 6), ('z', 6)]
+        x.__dict__ -> [('y', 6), ('z', 6)]
+        vars(x) -> [('y', 6), ('z', 6)]
+
+        x.y -> 5
+        x['y'] -> 5
+        x.z -> 6
+        x['z'] -> 6
+
+        for i in x:  # same as x.items()
+            print i
+        ->
+        ('x', 5)
+        ('y', 6)
 
     Note: to access internal fields use dir(x) (input fields will be included,
@@ -178,9 +176,9 @@
             return _v[pos]
         except ValueError:
-            # not in keys, not a valid attribute, raise error
+            # Not in keys, not a valid attribute, raise error
             raise AttributeError('Attribute "' + str(attr) + '" does not exist.')
 
     def __getattribute__(self, attr):
-        # re-route calls to vars(x) and x.__dict__
+        # Re-route calls to vars(x) and x.__dict__
         if attr == '__dict__':
             return OrderedDict(list(self.items()))
@@ -211,8 +209,7 @@
 
     def __copy__(self):
-        """
-        shallow copy, hard copies of trivial attributes,
-        references to structures like lists/OrderedDicts
-        unless redefined as an entirely different structure
+        """shallow copy, hard copies of trivial attributes, references to 
+        structures like lists/OrderedDicts unless redefined as an entirely 
+        different structure
         """
         newInstance = type(self)()
@@ -222,6 +219,5 @@
 
     def __deepcopy__(self, memo=None):
-        """
-        hard copy of all attributes
+        """hard copy of all attributes
         same thing but call deepcopy recursively
         technically not how it should be done,
@@ -256,15 +252,19 @@
 
 def isempty(x):
-    """
-    returns true if object is +/-infinity, NaN, None, '', has length 0, or is
-    an array/matrix composed only of such components (includes mixtures of
+    """Returns true if object is +/-infinity, NaN, None, '', has length 0, or 
+    is an array/matrix composed only of such components (includes mixtures of
     "empty" types)
     """
 
-    if type(x) in [list, np.ndarray, tuple]:
+    if type(x) is list:
+        if len(x) == 0:
+            return True
+
+    if type(x) in [np.ndarray, tuple]:
         if np.size(x) == 0:
             return True
 
-    # if anything in that array/matrix is not empty, the whole thing is not empty
+    if type(x) in [list, np.ndarray, tuple]:
+        # If anything in the array/matrix is not empty, the whole thing is not empty
         try:
             x = np.concatenate(x)
@@ -274,5 +274,5 @@
             if not isempty(i):
                 return False
-    # the array isn't empty but is full of "empty" type objects, so return True
+        # The array isn't empty but is full of "empty" type objects, so return True
         return True
 
@@ -282,5 +282,5 @@
         return True
 
-    # type may not be understood by numpy, in which case it definitely is NOT NaN or infinity
+    # Type may not be understood by NumPy, in which case it definitely is NOT NaN or infinity
     try:
         if np.isnan(x) or np.isinf(x):
@@ -289,11 +289,11 @@
         pass
 
-    # if all of that fails, then it is not empty
+    # If all of the above fails, then it is not empty
     return False
 
 
 def fieldnames(x, ignore_internals=True):
-    """
-    returns a list of fields of x
+    """Returns a list of fields of x
+
     ignore_internals ignores all fieldnames starting with '_' and is True by
     default
@@ -308,6 +308,6 @@
 
 def isfield(x, y, ignore_internals=True):
-    """
-    is y is a field of x?
+    """Returns True if y is a field of x
+
     ignore_internals ignores all fieldnames starting with '_' and is True by
     default
@@ -317,17 +317,15 @@
 
 def fileparts(x):
-    """
-    given:   "path/path/.../file_name.ext"
-    returns: [path, file_name, ext] (list of strings)
+    """given:   "path/path/.../file_name.ext", returns: [path, file_name, ext] (list of strings)
     """
     try:
-        a = x[:x.rindex('/')]  #path
-        b = x[x.rindex('/') + 1:]  #full filename
-    except ValueError:  #no path provided
+        a = x[:x.rindex('/')] # Path
+        b = x[x.rindex('/') + 1:] # Full filename
+    except ValueError: # No path provided
         a = ''
         b = x
     try:
-        c, d = b.split('.')  #file name, extension
-    except ValueError:  #no extension provided
+        c, d = b.split('.') # File name, extension
+    except ValueError: # No extension provided
         return [a, b, '']
     return [a, c, '.' + d]
@@ -349,5 +347,5 @@
     result = str(args[0])
     for i in range(len(args[1:])):
-        # if last argument wasn't empty, add a '/' between it and the next argument
+        # If last argument wasn't empty, add a '/' between it and the next argument
         if len(args[i]) != 0:
             result += '/' + str(args[i + 1])
@@ -358,10 +356,8 @@
 
 def findline(fidi, s):
-    """
-    returns full first line containing s (as a string), or None
-
-    Note: will include any newlines or tabs that occur in that line,
-    use str(findline(f, s)).strip() to remove these, str() in case result is
-    None
+    """returns full first line containing s (as a string), or None
+
+    Note: will include any newlines or tabs that occur in that line, use 
+    str(findline(f, s)).strip() to remove these, str() in case result is None
     """
     for line in fidi:
@@ -372,7 +368,6 @@
 
 def empty_nd_list(shape, filler=0., as_numpy_ndarray=False):
-    """
-    returns a python list of the size/shape given (shape must be int or tuple)
-    the list will be filled with the optional second argument
+    """Returns a python list of the size/shape given (shape must be int or 
+    tuple) the list will be filled with the optional second argument
 
     filler is 0.0 by default
Index: /issm/trunk/src/m/qmu/preqmu.py
===================================================================
--- /issm/trunk/src/m/qmu/preqmu.py	(revision 28012)
+++ /issm/trunk/src/m/qmu/preqmu.py	(revision 28013)
@@ -81,5 +81,5 @@
                     raise RuntimeError('preqmu error message: one of the expanded responses has more values than the number of partitions')
         numresponses = numresponses + np.size(vars(responses)[field_name])
-    #}}}
+    # }}}
 
     # Create in file for Dakota
@@ -114,5 +114,5 @@
         else:
             responsedescriptors.append(fieldresponses.descriptor)
-    #}}}
+    # }}}
 
     # Build a list of variable partitions
Index: /issm/trunk/src/m/shp/shp2exp.m
===================================================================
--- /issm/trunk/src/m/shp/shp2exp.m	(revision 28012)
+++ /issm/trunk/src/m/shp/shp2exp.m	(revision 28013)
@@ -62,4 +62,8 @@
 		pos = find(~isnan(x) & ~isnan(y));
 		idx=find(diff(pos)~=1);
+		if numel(idx)==0
+			disp(['Skipping Line ' num2str(i)]);
+			continue;
+		end
 		A=[idx(1);diff(idx);numel(pos)-idx(end)];
 		Cx=mat2cell(x(pos),A,1);
Index: /issm/trunk/src/m/shp/shpread.py
===================================================================
--- /issm/trunk/src/m/shp/shpread.py	(revision 28012)
+++ /issm/trunk/src/m/shp/shpread.py	(revision 28013)
@@ -10,5 +10,5 @@
 
 
-def shpread(filename, *args): #{{{
+def shpread(filename, *args):  # {{{
     """SHPREAD - read a shapefile and build a list of shapes
 
@@ -140,3 +140,3 @@
 
     return Structs
-#}}}
+# }}}
Index: /issm/trunk/src/m/shp/shpwrite.py
===================================================================
--- /issm/trunk/src/m/shp/shpwrite.py	(revision 28012)
+++ /issm/trunk/src/m/shp/shpwrite.py	(revision 28013)
@@ -5,5 +5,5 @@
 
 
-def shpwrite(shp, filename): #{{{
+def shpwrite(shp, filename):  # {{{
     '''
     SHPREAD - write a shape file from a contour structure
@@ -59,3 +59,3 @@
         sf.record(str(i))
     sf.close()
-#}}}
+# }}}
Index: /issm/trunk/src/m/solve/loadresultsfromcluster.py
===================================================================
--- /issm/trunk/src/m/solve/loadresultsfromcluster.py	(revision 28012)
+++ /issm/trunk/src/m/solve/loadresultsfromcluster.py	(revision 28013)
@@ -17,5 +17,5 @@
 
 def loadresultsfromcluster(md, *args):  #{{{
-    """LOADRESULTSFROMCLUSTER - load results of solution sequence from cluster
+    """loadresultsfromcluster - load results of solution sequence from cluster
 
     Usage:
Index: /issm/trunk/src/m/solve/marshall.py
===================================================================
--- /issm/trunk/src/m/solve/marshall.py	(revision 28012)
+++ /issm/trunk/src/m/solve/marshall.py	(revision 28013)
@@ -5,5 +5,5 @@
 
 def marshall(md):
-    """MARSHALL - outputs a compatible binary file from @model md, for certain solution type.
+    """marshall - outputs a compatible binary file from @model md, for certain solution type.
 
     The routine creates a compatible binary file from @model md
@@ -15,5 +15,5 @@
 
     if md.verbose.solution:
-        print("marshalling file {}.bin".format(md.miscellaneous.name))
+        print('marshalling file \'{}\'.bin'.format(md.miscellaneous.name))
 
     # Open file for binary writing
@@ -21,5 +21,5 @@
         fid = open(md.miscellaneous.name + '.bin', 'wb')
     except IOError as e:
-        raise IOError("marshall error message: could not open '%s.bin' file for binary writing. Due to: ".format(md.miscellaneous.name), e)
+        print('marshall error message: could not open \'{}.bin\' file for binary writing due to: {}'.format(md.miscellaneous.name, e))
 
     fields = md.properties()
@@ -32,5 +32,5 @@
         # Check that current field is an object
         if not hasattr(getattr(md, field), 'marshall'):
-            raise TypeError("field '{}' is not an object.".format(field))
+            raise TypeError('field \'{}\' is not an object.'.format(field))
 
         # Marshall current object
@@ -46,5 +46,5 @@
 
     except IOError as e:
-        print('marshall error message: could not close file \'{}.bin\' due to:'.format(md.miscellaneous.name), e)
+        print('marshall error message: could not close \'{}.bin\' file for binary writing due to: {}'.format(md.miscellaneous.name, e))
 
     # Uncomment the following to make a copy of the binary input file for 
Index: /issm/trunk/src/m/solve/outbinread.m
===================================================================
--- /issm/trunk/src/m/solve/outbinread.m	(revision 28012)
+++ /issm/trunk/src/m/solve/outbinread.m	(revision 28013)
@@ -133,4 +133,8 @@
 	elseif strcmp(fieldname,'TotalSmb'),
 		field = field/10.^12*yts; %(GigaTon/year)
+	elseif strcmp(fieldname,'TotalMelt'),
+		field = field/10.^12*yts; %(GigaTon/year)
+	elseif strcmp(fieldname,'TotalRefreeze'),
+		field = field/10.^12*yts; %(GigaTon/year)
 	elseif strcmp(fieldname,'SmbMassBalance'),
 		field = field*yts;
Index: /issm/trunk/src/m/solve/parseresultsfromdisk.m
===================================================================
--- /issm/trunk/src/m/solve/parseresultsfromdisk.m	(revision 28012)
+++ /issm/trunk/src/m/solve/parseresultsfromdisk.m	(revision 28013)
@@ -241,4 +241,8 @@
 	elseif strcmp(fieldname,'VyAverage'),
 		field = field*yts;
+	elseif strcmp(fieldname,'VxDebris'),
+		field = field*yts;
+	elseif strcmp(fieldname,'VyDebris'),
+		field = field*yts;
 	elseif strcmp(fieldname,'BasalforcingsGroundediceMeltingRate'),
 		field = field*yts;
@@ -260,4 +264,8 @@
 		field = field/10.^12*yts; %(GigaTon/year)
 	elseif strcmp(fieldname,'TotalSmbScaled'),
+		field = field/10.^12*yts; %(GigaTon/year)
+	elseif strcmp(fieldname,'TotalSmbMelt'),
+		field = field/10.^12*yts; %(GigaTon/year)
+	elseif strcmp(fieldname,'TotalSmbRefreeze'),
 		field = field/10.^12*yts; %(GigaTon/year)
 	elseif strcmp(fieldname,'GroundinglineMassFlux'),
Index: /issm/trunk/src/m/solve/parseresultsfromdisk.py
===================================================================
--- /issm/trunk/src/m/solve/parseresultsfromdisk.py	(revision 28012)
+++ /issm/trunk/src/m/solve/parseresultsfromdisk.py	(revision 28013)
@@ -12,5 +12,5 @@
         #saveres = parseresultsfromdiskioserialsequential(md, filename)
     return saveres
-#}}}
+# }}}
 
 
@@ -192,4 +192,8 @@
         elif fieldname == 'VyAverage':
             field = field * yts
+        elif fieldname == 'VxDebris':
+            field = field * yts
+        elif fieldname == 'VyDebris':
+            field = field * yts
         elif fieldname == 'BasalforcingsGroundediceMeltingRate':
             field = field * yts
@@ -211,4 +215,8 @@
             field = field / pow(10.0, 12) * yts # (GigaTon/year)
         elif fieldname == 'TotalSmbScaled':
+            field = field / pow(10.0, 12) * yts # (GigaTon/year)
+        elif fieldname == 'TotalSmbMelt':
+            field = field / pow(10.0, 12) * yts # (GigaTon/year)
+        elif fieldname == 'TotalSmbRefreeze':
             field = field / pow(10.0, 12) * yts # (GigaTon/year)
         elif fieldname == 'GroundinglineMassFlux':
@@ -345,5 +353,5 @@
 # }}}
 
-def addfieldtorecord(a, descr): #{{{
+def addfieldtorecord(a, descr):  # {{{
     if a.dtype.fields is None:
         raise ValueError('\'a\' must be a structured numpy array')
@@ -353,3 +361,3 @@
 
     return b
-#}}}
+# }}}
Index: /issm/trunk/src/m/solve/solve.py
===================================================================
--- /issm/trunk/src/m/solve/solve.py	(revision 28012)
+++ /issm/trunk/src/m/solve/solve.py	(revision 28013)
@@ -11,5 +11,5 @@
 
 def solve(md, solutionstring, *args):
-    """SOLVE - apply solution sequence for this model
+    """solve - apply solution sequence for this model
 
     Usage:
@@ -98,5 +98,5 @@
     if options.getfieldvalue('checkconsistency', 'yes') == 'yes':
         if md.verbose.solution:
-            print("checking model consistency")
+            print('checking model consistency')
         ismodelselfconsistent(md)
 
@@ -112,5 +112,5 @@
             if options.getfieldvalue('runtimename', True):
                 c = datetime.now()
-                md.private.runtimename = "%s-%02i-%02i-%04i-%02i-%02i-%02i-%i" % (md.miscellaneous.name, c.month, c.day, c.year, c.hour, c.minute, c.second, os.getpid())
+                md.private.runtimename = '%s-%02i-%02i-%04i-%02i-%02i-%02i-%i' % (md.miscellaneous.name, c.month, c.day, c.year, c.hour, c.minute, c.second, os.getpid())
             else:
                 md.private.runtimename = md.miscellaneous.name
Index: /issm/trunk/src/m/solve/solveslm.py
===================================================================
--- /issm/trunk/src/m/solve/solveslm.py	(revision 28012)
+++ /issm/trunk/src/m/solve/solveslm.py	(revision 28013)
@@ -10,8 +10,8 @@
 
 def solveslm(slm, solutionstringi, *args):
-    """SOLVESLM - apply solution sequence for this sealevel model
+    """solveslm - apply solution sequence for this sealevel model
 
     Usage:
-        slm=solveslm(slm,solutionstring,varargin)
+        slm = solveslm(slm,solutionstring,varargin)
         where varargin is a lit of paired arguments of string OR enums
 
Index: /issm/trunk/src/m/solve/waitonlock.m
===================================================================
--- /issm/trunk/src/m/solve/waitonlock.m	(revision 28012)
+++ /issm/trunk/src/m/solve/waitonlock.m	(revision 28013)
@@ -51,7 +51,6 @@
 			command = [command ' -i ' cluster.idfile];
 		end
-		port=0;
 		if isprop(cluster,'port') && cluster.port,
-			command = [command ' -p ' num2str(port) ' localhost'];
+			command = [command ' -p ' num2str(cluster.port) ' localhost'];
 		else,
 			command = [command ' ' cluster.name];
Index: /issm/trunk/src/m/solve/waitonlock.py
===================================================================
--- /issm/trunk/src/m/solve/waitonlock.py	(revision 28012)
+++ /issm/trunk/src/m/solve/waitonlock.py	(revision 28013)
@@ -60,7 +60,6 @@
             if isprop(cluster, 'idfile') and cluster.idfile != '':
                 command += ' -i {}'.format(cluster.idfile)
-            port = 0
             if isprop(cluster, 'port') and cluster.port:
-                command += ' -p {} localhost'.format(port);
+                command += ' -p {} localhost'.format(cluster.port);
             else:
                 command += ' {}'.format(cluster.name)
Index: /issm/trunk/src/m/solvers/bcgslbjacobioptions.m
===================================================================
--- /issm/trunk/src/m/solvers/bcgslbjacobioptions.m	(revision 28012)
+++ /issm/trunk/src/m/solvers/bcgslbjacobioptions.m	(revision 28013)
@@ -8,4 +8,4 @@
 solverOptions.ksp_type=getfieldvalue(options, 'ksp_type','bcgsl');
 solverOptions.pc_type=getfieldvalue(options, 'pc_type',  'bjacobi');
-solverOptions.ksp_max_it=getfieldvalue(options,'ksp_max_it',100);
-solverOptions.ksp_rtol=getfieldvalue(options,'ksp_rtol',1e-15);
+solverOptions.ksp_max_it=getfieldvalue(options,'ksp_max_it',300);
+solverOptions.ksp_rtol=getfieldvalue(options,'ksp_rtol',1e-13);
Index: /issm/trunk/src/m/solvers/bcgslbjacobioptions.py
===================================================================
--- /issm/trunk/src/m/solvers/bcgslbjacobioptions.py	(revision 28012)
+++ /issm/trunk/src/m/solvers/bcgslbjacobioptions.py	(revision 28013)
@@ -11,6 +11,6 @@
     solverOptions['ksp_type'] = options.getfieldvalue('ksp_type', 'bcgsl')
     solverOptions['pc_type'] = options.getfieldvalue('pc_type', 'bjacobi')
-    solverOptions['ksp_max_it'] = options.getfieldvalue('ksp_max_it', 100)
-    solverOptions['ksp_rtol'] = options.getfieldvalue('ksp_rtol', 1e-15)
+    solverOptions['ksp_max_it'] = options.getfieldvalue('ksp_max_it', 300)
+    solverOptions['ksp_rtol'] = options.getfieldvalue('ksp_rtol', 1e-13)
 
     return solverOptions
Index: /issm/trunk/src/wrappers/BamgMesher/BamgMesher.cpp
===================================================================
--- /issm/trunk/src/wrappers/BamgMesher/BamgMesher.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/BamgMesher/BamgMesher.cpp	(revision 28013)
@@ -40,5 +40,5 @@
 	WriteData(BAMGMESHOUT,bamgmesh_out);
 
-	/*Free ressources: */
+	/*Free resources: */
 	delete bamgopts;
 	delete bamggeom_in;
Index: /issm/trunk/src/wrappers/BamgMesher/BamgMesher.js
===================================================================
--- /issm/trunk/src/wrappers/BamgMesher/BamgMesher.js	(revision 28012)
+++ /issm/trunk/src/wrappers/BamgMesher/BamgMesher.js	(revision 28013)
@@ -242,5 +242,5 @@
     var return_array=[bamgmeshout, bamggeomout];
 
-    /*Free ressources: */
+    /*Free resources: */
     Module._free(pVerticesSize_mesh_out);
     Module._free(pVertices_mesh_out);
Index: /issm/trunk/src/wrappers/Chaco/Chaco.cpp
===================================================================
--- /issm/trunk/src/wrappers/Chaco/Chaco.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/Chaco/Chaco.cpp	(revision 28013)
@@ -66,5 +66,5 @@
 	WriteData(ASSGN_OUT,assignment,nvtxs);
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<short>(assignment); 
 	xDelete<double>(goal);
Index: /issm/trunk/src/wrappers/ContourToMesh/ContourToMesh.js
===================================================================
--- /issm/trunk/src/wrappers/ContourToMesh/ContourToMesh.js	(revision 28012)
+++ /issm/trunk/src/wrappers/ContourToMesh/ContourToMesh.js	(revision 28013)
@@ -80,5 +80,5 @@
 	/*}}}*/
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pin_nod); 
 	Module._free(pin_nel); 
Index: /issm/trunk/src/wrappers/DistanceToMaskBoundary/DistanceToMaskBoundary.cpp
===================================================================
--- /issm/trunk/src/wrappers/DistanceToMaskBoundary/DistanceToMaskBoundary.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/DistanceToMaskBoundary/DistanceToMaskBoundary.cpp	(revision 28013)
@@ -49,5 +49,5 @@
 	WriteData(DISTANCE,distance,nods);
 
-	/*Free ressources: */
+	/*Free resources: */
 	xDelete<double>(x);
 	xDelete<double>(y);
Index: /issm/trunk/src/wrappers/ElementConnectivity/ElementConnectivity.js
===================================================================
--- /issm/trunk/src/wrappers/ElementConnectivity/ElementConnectivity.js	(revision 28012)
+++ /issm/trunk/src/wrappers/ElementConnectivity/ElementConnectivity.js	(revision 28013)
@@ -38,5 +38,5 @@
 	/*}}}*/
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pconnectivity); 
 	Module._free(connectivitylinear); 
Index: /issm/trunk/src/wrappers/InterpFromGridToMesh/InterpFromGridToMesh.cpp
===================================================================
--- /issm/trunk/src/wrappers/InterpFromGridToMesh/InterpFromGridToMesh.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/InterpFromGridToMesh/InterpFromGridToMesh.cpp	(revision 28013)
@@ -54,9 +54,9 @@
 
 	/*Input datasets: */
-	FetchData(&x,&x_rows,NULL,XHANDLE);
-	FetchData(&y,&y_rows,NULL,YHANDLE);
+	FetchData(&x,&x_rows,XHANDLE);
+	FetchData(&y,&y_rows,YHANDLE);
 	FetchData(&data,&data_rows,&data_cols,DATAHANDLE);
-	FetchData(&x_mesh,&x_mesh_rows,NULL,XMESHHANDLE);
-	FetchData(&y_mesh,&y_mesh_rows,NULL,YMESHHANDLE);
+	FetchData(&x_mesh,&x_mesh_rows,XMESHHANDLE);
+	FetchData(&y_mesh,&y_mesh_rows,YMESHHANDLE);
 	FetchData(&default_value,DEFAULTHANDLE);
 
Index: /issm/trunk/src/wrappers/InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
===================================================================
--- /issm/trunk/src/wrappers/InterpFromMeshToGrid/InterpFromMeshToGrid.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/InterpFromMeshToGrid/InterpFromMeshToGrid.cpp	(revision 28013)
@@ -62,5 +62,5 @@
 	WriteData(GRIDDATA,griddata,nlines,ncols);
 
-	/*Free ressources: */
+	/*Free resources: */
 	xDelete<int>(index);
 	xDelete<double>(x);
Index: /issm/trunk/src/wrappers/InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.js
===================================================================
--- /issm/trunk/src/wrappers/InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.js	(revision 28012)
+++ /issm/trunk/src/wrappers/InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.js	(revision 28013)
@@ -72,5 +72,5 @@
 	/*}}}*/
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pdata_interp); 
 
Index: /issm/trunk/src/wrappers/IssmConfig/IssmConfig.js
===================================================================
--- /issm/trunk/src/wrappers/IssmConfig/IssmConfig.js	(revision 28012)
+++ /issm/trunk/src/wrappers/IssmConfig/IssmConfig.js	(revision 28013)
@@ -18,5 +18,5 @@
 	/*}}}*/
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pvalue); 
 
Index: /issm/trunk/src/wrappers/Kriging/Kriging.cpp
===================================================================
--- /issm/trunk/src/wrappers/Kriging/Kriging.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/Kriging/Kriging.cpp	(revision 28013)
@@ -64,5 +64,5 @@
 	if(nlhs==2) WriteData(ERROR,error,M_interp,N_interp);
 
-	/*Free ressources: */
+	/*Free resources: */
 	xDelete<double>(x);
 	xDelete<double>(y);
Index: /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp
===================================================================
--- /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp	(revision 28013)
@@ -71,5 +71,5 @@
 	WriteData(NODEPARTITIONING,node_partitioning,numberofvertices);
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<int>(elements);
 	xDelete<int>(elements2d);
Index: /issm/trunk/src/wrappers/NodeConnectivity/NodeConnectivity.js
===================================================================
--- /issm/trunk/src/wrappers/NodeConnectivity/NodeConnectivity.js	(revision 28012)
+++ /issm/trunk/src/wrappers/NodeConnectivity/NodeConnectivity.js	(revision 28013)
@@ -34,5 +34,5 @@
 	/*}}}*/
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pconnectivity); 
 	Module._free(connectivitylinear); 
Index: /issm/trunk/src/wrappers/PropagateFlagsFromConnectivity/PropagateFlagsFromConnectivity.cpp
===================================================================
--- /issm/trunk/src/wrappers/PropagateFlagsFromConnectivity/PropagateFlagsFromConnectivity.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/PropagateFlagsFromConnectivity/PropagateFlagsFromConnectivity.cpp	(revision 28013)
@@ -38,5 +38,5 @@
 	WriteData(POOLOUT,pool,nel);
 
-	/*Free ressources: */
+	/*Free resources: */
 	xDelete<double>(connectivity);
 	xDelete<double>(pool);
Index: /issm/trunk/src/wrappers/Triangle/Triangle.cpp
===================================================================
--- /issm/trunk/src/wrappers/Triangle/Triangle.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/Triangle/Triangle.cpp	(revision 28013)
@@ -50,5 +50,5 @@
 	WriteData(SEGMENTMARKERLIST,segmentmarkerlist,nsegs);
 
-	/*free ressources: */
+	/*free resources: */
 	delete domain;
 	delete rifts;
Index: /issm/trunk/src/wrappers/Triangle/Triangle.h
===================================================================
--- /issm/trunk/src/wrappers/Triangle/Triangle.h	(revision 28012)
+++ /issm/trunk/src/wrappers/Triangle/Triangle.h	(revision 28013)
@@ -19,5 +19,5 @@
 #ifdef _HAVE_JAVASCRIPT_MODULES_
 #undef _DO_NOT_LOAD_GLOBALS_ /*only module where this needs to be undefined, so as to 
-							   not include IssmComm several times in the javascript Modle construct.*/
+							   not include IssmComm several times in the JavaScript module construct.*/
 #endif
 
Index: /issm/trunk/src/wrappers/Triangle/Triangle.js
===================================================================
--- /issm/trunk/src/wrappers/Triangle/Triangle.js	(revision 28012)
+++ /issm/trunk/src/wrappers/Triangle/Triangle.js	(revision 28013)
@@ -66,5 +66,5 @@
 	var return_array=[index,x,y,segments,segmentmarkers];
 
-	/*Free ressources: */
+	/*Free resources: */
 	Module._free(pindex); 
 	Module._free(indexlinear); 
Index: /issm/trunk/src/wrappers/javascript/Makefile.am
===================================================================
--- /issm/trunk/src/wrappers/javascript/Makefile.am	(revision 28012)
+++ /issm/trunk/src/wrappers/javascript/Makefile.am	(revision 28013)
@@ -4,16 +4,17 @@
 EXEEXT=$(JAVASCRIPTWRAPPEREXT)
 
-#define prefix (from http://www.gnu.org/software/autoconf/manual/autoconf-2.67/html_node/Defining-Directories.html)
-AM_CPPFLAGS+=  -DISSM_PREFIX='"$(prefix)"'
+# Define prefix (from http://www.gnu.org/software/autoconf/manual/autoconf-2.67/html_node/Defining-Directories.html)
+AM_CPPFLAGS += -DISSM_PREFIX='"$(prefix)"'
 
-js_scripts = ${ISSM_DIR}/src/wrappers/BamgMesher/BamgMesher.js \
-			 ${ISSM_DIR}/src/wrappers/Triangle/Triangle.js \
-			 ${ISSM_DIR}/src/wrappers/NodeConnectivity/NodeConnectivity.js\
-			 ${ISSM_DIR}/src/wrappers/ContourToMesh/ContourToMesh.js\
-			 ${ISSM_DIR}/src/wrappers/ElementConnectivity/ElementConnectivity.js\
-			 ${ISSM_DIR}/src/wrappers/InterpFromGridToMesh/InterpFromGridToMesh.js\
-			 ${ISSM_DIR}/src/wrappers/InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.js\
-			 ${ISSM_DIR}/src/wrappers/IssmConfig/IssmConfig.js\
-			 ${ISSM_DIR}/src/wrappers/Issm/issm.js
+js_scripts = \
+	${ISSM_DIR}/src/wrappers/BamgMesher/BamgMesher.js \
+	${ISSM_DIR}/src/wrappers/Triangle/Triangle.js \
+	${ISSM_DIR}/src/wrappers/NodeConnectivity/NodeConnectivity.js \
+	${ISSM_DIR}/src/wrappers/ContourToMesh/ContourToMesh.js \
+	${ISSM_DIR}/src/wrappers/ElementConnectivity/ElementConnectivity.js \
+	${ISSM_DIR}/src/wrappers/InterpFromGridToMesh/InterpFromGridToMesh.js \
+	${ISSM_DIR}/src/wrappers/InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.js \
+	${ISSM_DIR}/src/wrappers/IssmConfig/IssmConfig.js \
+	${ISSM_DIR}/src/wrappers/Issm/issm.js
 
 bin_SCRIPTS =  issm-prebin.js
@@ -21,27 +22,19 @@
 issm-prebin.js: ${js_scripts}
 	cat ${js_scripts}  > issm-prebin.js
-	
-#javascript io{{{
-if !WINDOWS
+
+# JavaScript I/O{{{
 lib_LTLIBRARIES = libISSMJavascript.la
-else
-noinst_LTLIBRARIES = libISSMJavascript.la
-lib_LTLIBRARIES = 
-endif
 
-io_sources=   ./io/WriteJavascriptData.cpp\
-				./io/FetchJavascriptData.cpp
+io_sources = \
+	./io/WriteJavascriptData.cpp \
+	./io/FetchJavascriptData.cpp
 
-ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS) 
+ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS)
 
 libISSMJavascript_la_SOURCES = $(io_sources)
 libISSMJavascript_la_CXXFLAGS= $(ALLCXXFLAGS)
 #}}}
-#api io{{{
-if !WINDOWS
+# API I/O{{{
 lib_LTLIBRARIES += libISSMApi.la
-else
-noinst_LTLIBRARIES += libISSMApi.la
-endif
 
 api_sources= ./io/ApiPrintf.cpp
@@ -50,48 +43,52 @@
 libISSMApi_la_CXXFLAGS= $(ALLCXXFLAGS)
 #}}}
-#Wrappers {{{
-bin_PROGRAMS = 		 IssmModule
+# Wrappers {{{
+bin_PROGRAMS = IssmModule
 #}}}
 
 # Dependencies {{{
 
-#Triangle library
-AM_CXXFLAGS =  -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER -D_WRAPPERS_
-AM_CXXFLAGS +=  -D_HAVE_JAVASCRIPT_MODULES_ -fPIC
+# Triangle
+AM_CXXFLAGS = -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER -D_WRAPPERS_
+AM_CXXFLAGS += -D_HAVE_JAVASCRIPT_MODULES_ -fPIC
 
 deps = ./libISSMJavascript.la ../../c/libISSMModules.la ../../c/libISSMCore.la ./libISSMApi.la
 
-#Optimization flags:
-AM_CXXFLAGS += $(CXXOPTFLAGS) 
+# Optimization flags
+AM_CXXFLAGS += $(CXXOPTFLAGS)
 #}}}
 # Module sources and dependencies {{{
-if !WINDOWS
 libISSMJavascript_la_LIBADD = ./../../c/libISSMCore.la ./../../c/libISSMModules.la $(MPILIB) $(PETSCLIB) $(MKLLIB) $(GSLLIB) $(MATHLIB)
+
+if VERSION
+libISSMJavascript_la_LDFLAGS =
+libISSMApi_la_LDFLAGS =
+else
+libISSMJavascript_la_LDFLAGS = -avoid-version
+libISSMApi_la_LDFLAGS = -avoid-version
 endif
 
 if STANDALONE_LIBRARIES
-libISSMJavascript_la_LDFLAGS = -static 
-deps += $(PETSCLIB) $(TAOLIB) $(M1QN3LIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB)   $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
+if !MSYS2
+libISSMJavascript_la_LDFLAGS += -static
+libISSMApi_la_LDFLAGS += -static
+endif
+deps += $(DAKOTALIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MPILIB) $(NEOPZLIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) $(OSLIBS)
 endif
 
-if !WINDOWS
-libISSMApi_la_LIBADD = $(MPILIB) $(PETSCLIB) $(GSLLIB) $(MATHLIB) $(MEXLIB)
-endif
+libISSMApi_la_LIBADD = $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJLIB) $(MATHLIB)
 
-if STANDALONE_LIBRARIES
-libISSMApi_la_LDFLAGS = -static 
-endif
+IssmModule_SOURCES = \
+	../BamgMesher/BamgMesher.cpp \
+	../Triangle/Triangle.cpp \
+	../NodeConnectivity/NodeConnectivity.cpp \
+	../ContourToMesh/ContourToMesh.cpp \
+	../ElementConnectivity/ElementConnectivity.cpp \
+	../InterpFromGridToMesh/InterpFromGridToMesh.cpp \
+	../InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.cpp \
+	../IssmConfig/IssmConfig.cpp \
+	../Issm/issm.cpp
 
-IssmModule_SOURCES = ../BamgMesher/BamgMesher.cpp \
-                     ../Triangle/Triangle.cpp \
-					 ../NodeConnectivity/NodeConnectivity.cpp\
-					 ../ContourToMesh/ContourToMesh.cpp\
-					 ../ElementConnectivity/ElementConnectivity.cpp\
-					 ../InterpFromGridToMesh/InterpFromGridToMesh.cpp\
-					 ../InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.cpp\
-					 ../IssmConfig/IssmConfig.cpp\
-					 ../Issm/issm.cpp
-
-IssmModule_CXXFLAGS= -fPIC -D_DO_NOT_LOAD_GLOBALS_  --memory-init-file 0 $(AM_CXXFLAGS) $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) -s EXPORTED_FUNCTIONS="['_BamgMesherModule','_TriangleModule','_NodeConnectivityModule','_ContourToMeshModule','_ElementConnectivityModule','_InterpFromGridToMeshModule','_InterpFromMeshToMesh2dModule','_IssmConfigModule','_IssmModule']"  -s DISABLE_EXCEPTION_CATCHING=0 -s ALLOW_MEMORY_GROWTH=1 -s INVOKE_RUN=0
-IssmModule_LDADD = ${deps} $(TRIANGLELIB)  $(GSLLIB)
+IssmModule_CXXFLAGS= -fPIC -D_DO_NOT_LOAD_GLOBALS_  --memory-init-file 0 $(AM_CXXFLAGS) $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) -s EXPORTED_FUNCTIONS="['_BamgMesherModule','_TriangleModule','_NodeConnectivityModule','_ContourToMeshModule','_ElementConnectivityModule','_InterpFromGridToMeshModule','_InterpFromMeshToMesh2dModule','_IssmConfigModule','_IssmModule']" -s DISABLE_EXCEPTION_CATCHING=0 -s ALLOW_MEMORY_GROWTH=1 -s INVOKE_RUN=0
+IssmModule_LDADD = ${deps} $(TRIANGLELIB) $(GSLLIB)
 #}}}
Index: /issm/trunk/src/wrappers/javascript/io/FetchJavascriptData.cpp
===================================================================
--- /issm/trunk/src/wrappers/javascript/io/FetchJavascriptData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/javascript/io/FetchJavascriptData.cpp	(revision 28013)
@@ -1,3 +1,3 @@
-/*\file FetchData.cpp:
+/* \file FetchJavascriptData.cpp:
  * \brief: general I/O interface to fetch data in javascript
  */
@@ -22,4 +22,9 @@
 }
 /*}}}*/
+/*FUNCTION FetchData(double* pscalar, double scalar){{{*/
+void FetchData(double* pscalar, double scalar){
+	*pscalar=scalar;
+}
+/*}}}*/
 /*FUNCTION FetchData(int* pinteger, int integer){{{*/
 void FetchData(int* pinteger, int integer){
@@ -27,9 +32,4 @@
 }
 /*}}}*/
-/*FUNCTION FetchData(double* pscalar, double scalar){{{*/
-void FetchData(double* pscalar, double scalar){
-	*pscalar=scalar;
-}
-/*}}}*/
 /*FUNCTION FetchData(double **pvector, double* vectorin, int nods){{{*/
 void FetchData(double** pvector, double* vectorin, int nods){
@@ -49,4 +49,25 @@
 	*pvector=vector;
 	*pnods=nods;
+}
+/*}}}*/
+/*FUNCTION FetchData(double **pmatrix, int* pM, int* matrix, int M, int N){{{*/
+void FetchData(double **pmatrix, int* pM, int* matrixin, int M, int N){
+	double*  outmatrix=NULL;
+	int      outmatrix_rows;
+
+	if(M == 0 || N == 0){
+		/*Nothing to pick up. Just initialize matrix pointer to NULL: */
+		outmatrix_rows=0;
+		outmatrix=NULL;
+	}
+	else if (pmatrix && matrixin){
+		outmatrix_rows=M;
+		outmatrix=xNew<IssmPDouble>(M*N);
+		for(int i=0;i<M*N;i++){outmatrix[i]=(IssmPDouble)matrixin[i];}
+	}
+
+	/*Assign output pointers:*/
+	*pmatrix=outmatrix;
+	if (pM){*pM=outmatrix_rows;}
 }
 /*}}}*/
@@ -62,5 +83,5 @@
 		outmatrix=NULL;
 	}
-    else if (pmatrix && matrixin){
+	else if (pmatrix && matrixin){
 		outmatrix_rows=M;
 		outmatrix_cols=N;
@@ -73,4 +94,24 @@
 	if (pM){*pM=outmatrix_rows;}
 	if (pN){*pN=outmatrix_cols;}
+}
+/*}}}*/
+/*FUNCTION FetchData(double **pmatrix, int* pM, double* matrix, int M, int N){{{*/
+void FetchData(double **pmatrix, int* pM, double* matrixin, int M, int N){
+	double*  outmatrix=NULL;
+	int      outmatrix_rows;
+
+	if(M == 0 || N == 0){
+		/*Nothing to pick up. Just initialize matrix pointer to NULL: */
+		outmatrix_rows=0;
+		outmatrix=NULL;
+	}
+	else if (pmatrix && matrixin){
+		outmatrix_rows=M;
+		outmatrix=xNew<IssmPDouble>(M*N); xMemCpy<IssmPDouble>(outmatrix,matrixin,M*N);
+	}
+
+	/*Assign output pointers:*/
+	*pmatrix=outmatrix;
+	if (pM){*pM=outmatrix_rows;}
 }
 /*}}}*/
@@ -86,5 +127,5 @@
 		outmatrix=NULL;
 	}
-    else if (pmatrix && matrixin){
+	else if (pmatrix && matrixin){
 		outmatrix_rows=M;
 		outmatrix_cols=N;
@@ -96,4 +137,24 @@
 	if (pM){*pM=outmatrix_rows;}
 	if (pN){*pN=outmatrix_cols;}
+}
+/*}}}*/
+/*FUNCTION FetchData(int **pmatrix, int* pM, int* matrix, int M, int N){{{*/
+void FetchData(int **pmatrix, int* pM, int* matrixin, int M, int N){
+	int*     outmatrix=NULL;
+	int      outmatrix_rows;
+
+	if(M == 0 || N == 0){
+		/*Nothing to pick up. Just initialize matrix pointer to NULL: */
+		outmatrix_rows=0;
+		outmatrix=NULL;
+	}
+	else if (pmatrix && matrixin){
+		outmatrix_rows=M;
+		outmatrix=xNew<int>(M*N); xMemCpy<int>(outmatrix,matrixin,M*N);
+	}
+
+	/*Assign output pointers:*/
+	*pmatrix=outmatrix;
+	if (pM){*pM=outmatrix_rows;}
 }
 /*}}}*/
@@ -109,5 +170,5 @@
 		outmatrix=NULL;
 	}
-    else if (pmatrix && matrixin){
+	else if (pmatrix && matrixin){
 		outmatrix_rows=M;
 		outmatrix_cols=N;
@@ -187,28 +248,28 @@
 	BamgOpts *bamgopts      = new BamgOpts();
 
-    /*Parameters*/
-    bamgopts->anisomax	    = anisomax;
-    bamgopts->coeff	        = coeff;
-    bamgopts->cutoff    	= cutoff;
-    bamgopts->errg	        = errg;
-    bamgopts->gradation	    = gradation;
-    bamgopts->Hessiantype	= Hessiantype;
-    bamgopts->maxnbv	    = maxnbv;
-    bamgopts->maxsubdiv	    = maxsubdiv;
-    bamgopts->Metrictype	= Metrictype;
-    bamgopts->nbjacobi	    = nbjacobi;
-    bamgopts->nbsmooth	    = nbsmooth;
-    bamgopts->omega	        = omega;
-    bamgopts->power	        = power;
-    bamgopts->verbose	    = verbose;
-
-    /*Flags*/
-    bamgopts->Crack	        = Crack;
-    bamgopts->KeepVertices	= KeepVertices;
-    bamgopts->splitcorners	= splitcorners;
-
-    /*Metric related*/
-    bamgopts->hmin	        = hmin;
-    bamgopts->hmax       	= hmax;
+	/*Parameters*/
+	bamgopts->anisomax	    = anisomax;
+	bamgopts->coeff	        = coeff;
+	bamgopts->cutoff    	= cutoff;
+	bamgopts->errg	        = errg;
+	bamgopts->gradation	    = gradation;
+	bamgopts->Hessiantype	= Hessiantype;
+	bamgopts->maxnbv	    = maxnbv;
+	bamgopts->maxsubdiv	    = maxsubdiv;
+	bamgopts->Metrictype	= Metrictype;
+	bamgopts->nbjacobi	    = nbjacobi;
+	bamgopts->nbsmooth	    = nbsmooth;
+	bamgopts->omega	        = omega;
+	bamgopts->power	        = power;
+	bamgopts->verbose	    = verbose;
+
+	/*Flags*/
+	bamgopts->Crack	        = Crack;
+	bamgopts->KeepVertices	= KeepVertices;
+	bamgopts->splitcorners	= splitcorners;
+
+	/*Metric related*/
+	bamgopts->hmin	        = hmin;
+	bamgopts->hmax       	= hmax;
 	FetchData(&bamgopts->hminVertices, &bamgopts->hminVerticesSize[0], &bamgopts->hminVerticesSize[1], hminVertices, hminVerticesSize[0], hminVerticesSize[1]);
 	FetchData(&bamgopts->hmaxVertices, &bamgopts->hmaxVerticesSize[0], &bamgopts->hmaxVerticesSize[1], hmaxVertices, hmaxVerticesSize[0], hmaxVerticesSize[1]);
Index: /issm/trunk/src/wrappers/javascript/io/WriteJavascriptData.cpp
===================================================================
--- /issm/trunk/src/wrappers/javascript/io/WriteJavascriptData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/javascript/io/WriteJavascriptData.cpp	(revision 28013)
@@ -1,4 +1,4 @@
-/* \file WriteData.c:
- * \brief: general interface for writing data
+/* \file WriteJavascriptData.cpp:
+ * \brief: general I/O interface to fetch data in javascript
  */
 
Index: /issm/trunk/src/wrappers/javascript/io/javascriptio.h
===================================================================
--- /issm/trunk/src/wrappers/javascript/io/javascriptio.h	(revision 28012)
+++ /issm/trunk/src/wrappers/javascript/io/javascriptio.h	(revision 28013)
@@ -1,4 +1,4 @@
-/*\file matlabio.h
- *\brief: I/O for ISSM in matlab mode
+/*\file javascriptio.h
+ *s\brief: I/O for ISSM in javascript mode
  */
 
@@ -32,11 +32,14 @@
 
 void FetchData(char** pstring, char* stringin);
-void FetchData(double* pscalar,double scalar);
+void FetchData(double* pscalar, double scalar);
 void FetchData(int* pinteger,int integer);
 void FetchData(double** pvector, double* vectorin, int nods);
 void FetchData(double** pvector, int* pnods, double* vectorin, int nods);
-void FetchData(double **pmatrix, int* pM, int* pN, int* matrixin, int M, int N);
-void FetchData(double **pmatrix, int* pM, int* pN, double* matrixin, int M, int N);
-void FetchData(int **pmatrix, int* pM, int* pN, int* matrixin, int M, int N);
+void FetchData(double** pmatrix, int* pM, int* matrixin, int M, int N);
+void FetchData(double** pmatrix, int* pM, int* pN, int* matrixin, int M, int N);
+void FetchData(double** pmatrix, int* pM, double* matrixin, int M, int N);
+void FetchData(double** pmatrix, int* pM, int* pN, double* matrixin, int M, int N);
+void FetchData(int** pmatrix, int* pM, int* matrixin, int M, int N);
+void FetchData(int** pmatrix, int* pM, int* pN, int* matrixin, int M, int N);
 void FetchData(Contours** pcontours,double* x, double* y, int nods);
 void FetchData(BamgGeom** pbamggeom, int* VerticesSize, double* Vertices, int* EdgesSize, double* Edges, int* CornersSize, double* Corners, int* RequiredVerticesSize, double* RequiredVertices, int* RequiredEdgesSize, double* RequiredEdges, int* CrackedEdgesSize, double* CrackedEdges, int* SubDomainsSize, double* SubDomains);
Index: /issm/trunk/src/wrappers/matlab/Makefile.am
===================================================================
--- /issm/trunk/src/wrappers/matlab/Makefile.am	(revision 28012)
+++ /issm/trunk/src/wrappers/matlab/Makefile.am	(revision 28013)
@@ -1,3 +1,3 @@
-AM_CPPFLAGS = @NEOPZINCL@ @DAKOTAINCL@ @MATLABINCL@ @PETSCINCL@ @MPIINCL@ @SPOOLESINCL@ @PARMETISINCL@ @METISINCL@ @TRIANGLEINCL@ @CHACOINCL@ @SCOTCHINCL@ @SHAPELIBINCL@ @AMPIINCL@ @ADJOINTMPIINCL@ @MEDIPACKINCL@ @CODIPACKINCL@ @PROJINCL@
+AM_CPPFLAGS = @NEOPZINCL@ @DAKOTAINCL@ @BOOSTINCL@ @MATLABINCL@ @PETSCINCL@ @MPIINCL@ @SPOOLESINCL@ @PARMETISINCL@ @METISINCL@ @TRIANGLEINCL@ @CHACOINCL@ @SCOTCHINCL@ @SHAPELIBINCL@ @AMPIINCL@ @ADJOINTMPIINCL@ @MEDIPACKINCL@ @CODIPACKINCL@ @PROJINCL@
 AUTOMAKE_OPTIONS = subdir-objects
 
@@ -8,10 +8,5 @@
 
 #matlab io{{{
-if !WINDOWS
 lib_LTLIBRARIES = libISSMMatlab.la
-else
-noinst_LTLIBRARIES = libISSMMatlab.la
-lib_LTLIBRARIES =
-endif
 
 io_sources = \
@@ -33,9 +28,5 @@
 #}}}
 #api io{{{
-if !WINDOWS
 lib_LTLIBRARIES += libISSMApi.la
-else
-noinst_LTLIBRARIES += libISSMApi.la
-endif
 
 if !MSYS2
@@ -68,8 +59,8 @@
 	NodeConnectivity_matlab.la \
 	PointCloudFindNeighbors_matlab.la \
+	ProcessRifts_matlab.la \
 	PropagateFlagsFromConnectivity_matlab.la \
-	Triangle_matlab.la \
-	ProcessRifts_matlab.la \
-	Scotch_matlab.la
+	Scotch_matlab.la \
+	Triangle_matlab.la
 
 if CHACO
@@ -90,8 +81,8 @@
 deps = $(MATHLIB)
 
-#Triangle library
+# Triangle
 CXXFLAGS_FOR_TRI = -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER
 
-#Matlab part
+# MATLAB
 CXXFLAGS_FOR_MEX = -D_HAVE_MATLAB_MODULES_ $(MEXOPTFLAGS) $(MEXCXXFLAGS)
 CPPFLAGS_FOR_MEX = $(MATLABINCL)
@@ -122,5 +113,8 @@
 AM_LDFLAGS += -Wl,-rpath,'@rpath'
 else
-AM_LDFLAGS += -Wl,-static -Wl,-lbacktrace -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
+AM_LDFLAGS += -Wl,-lbacktrace
+if !MSYS2
+AM_LDFLAGS += -Wl,-static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
+endif
 endif
 endif
@@ -133,8 +127,4 @@
 deps += ./libISSMApi.la
 endif
-
-# if WINDOWS
-# deps += $(METISLIB)
-# endif
 
 if ADOLC
@@ -148,11 +138,9 @@
 deps += ${LIBADD_FOR_MEX}
 
-#Optimization flags:
+# Optimization flags
 AM_CXXFLAGS += $(CXXOPTFLAGS)
 #}}}
 # Module sources and dependencies {{{
-if !WINDOWS
 libISSMMatlab_la_LIBADD = ./../../c/libISSMCore.la ./../../c/libISSMModules.la $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(MPILIB) $(NEOPZLIB) $(MKLLIB) $(GSLLIB) $(PROJLIB) $(MATHLIB) $(MEXLIB)
-endif
 
 if VERSION
@@ -165,12 +153,12 @@
 
 if STANDALONE_LIBRARIES
+if !MSYS2
 libISSMMatlab_la_LDFLAGS += -static
 libISSMApi_la_LDFLAGS += -static
+endif
 deps += $(DAKOTALIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(SCOTCHLIB) $(MKLLIB) $(MPILIB) $(NEOPZLIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJLIB) $(OSLIBS) ${LIBADD_FOR_MEX}
 endif
 
-if !WINDOWS
 libISSMApi_la_LIBADD = $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJLIB) $(MATHLIB) $(MEXLIB)
-endif
 
 BamgConvertMesh_matlab_la_SOURCES = ../BamgConvertMesh/BamgConvertMesh.cpp
Index: /issm/trunk/src/wrappers/matlab/io/FetchMatlabData.cpp
===================================================================
--- /issm/trunk/src/wrappers/matlab/io/FetchMatlabData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/matlab/io/FetchMatlabData.cpp	(revision 28013)
@@ -1,4 +1,4 @@
-/*\file FetchData.cpp:
- * \brief: general I/O interface to fetch data in matlab
+/*\file FetchMatlabData.cpp:
+ *\brief: general I/O interface to fetch data in matlab
  */
 
@@ -538,5 +538,5 @@
       for(int i=0;i<cols;i++){
          for(int j=0;j<(jc[i+1]-jc[i]);j++){
-            matrix[rows*ir[count]+i]=pmxmatrix[count];
+            matrix[cols*ir[count]+i]=pmxmatrix[count];
             count++;
          }
Index: /issm/trunk/src/wrappers/matlab/io/WriteMatlabData.cpp
===================================================================
--- /issm/trunk/src/wrappers/matlab/io/WriteMatlabData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/matlab/io/WriteMatlabData.cpp	(revision 28013)
@@ -1,4 +1,4 @@
-/* \file WriteData.c:
- * \brief: general interface for writing data
+/*\file WriteMatlabData.c:
+ *\brief: general I/O interface to write data in matlab
  */
 
@@ -295,5 +295,5 @@
 		mxSetPr(dataref,tmatrix_ptr);
 
-		/*Free ressources:*/
+		/*Free resources:*/
 		xDelete<double>(matrix_ptr);
 	}
Index: /issm/trunk/src/wrappers/python/Makefile.am
===================================================================
--- /issm/trunk/src/wrappers/python/Makefile.am	(revision 28012)
+++ /issm/trunk/src/wrappers/python/Makefile.am	(revision 28013)
@@ -1,6 +1,4 @@
 AM_CPPFLAGS = @DAKOTAINCL@ @PETSCINCL@ @MPIINCL@ @SPOOLESINCL@ @PARMETISINCL@ @METISINCL@ @TRIANGLEINCL@ @CHACOINCL@ @SCOTCHINCL@ @SHAPELIBINCL@ @PYTHONINCL@ @PYTHON_NUMPYINCL@ @AMPIINCL@ @ADJOINTMPIINCL@ @MEDIPACKINCL@ @CODIPACKINCL@ @PROJINCL@
 AUTOMAKE_OPTIONS = subdir-objects
-
-EXEEXT=$(PYTHONWRAPPEREXT)
 
 #define prefix (from http://www.gnu.org/software/autoconf/manual/autoconf-2.67/html_node/Defining-Directories.html)
@@ -15,15 +13,25 @@
 	./io/WritePythonData.cpp
 
-ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS)
+ALL_CXXFLAGS = -fPIC -D_WRAPPERS_
+
+if MSYS2
+ALL_CXXFLAGS += -D_DO_NOT_LOAD_GLOBALS_ # Cannot have undefined symbols under MSYS2
+ALL_CXXFLAGS += -D_USE_MATH_DEFINES # Need to open fence to math.h definitions when --std=c++ is used (which is the default)
+endif
+
+ALL_CXXFLAGS += $(CXXOPTFLAGS) $(CXXFLAGS)
 
 libISSMPython_la_SOURCES = $(io_sources)
-libISSMPython_la_CXXFLAGS= $(ALLCXXFLAGS)
+libISSMPython_la_CXXFLAGS= ${ALL_CXXFLAGS}
 #}}}
 #api io{{{
 lib_LTLIBRARIES += libISSMApi.la
-api_sources		 = ./io/ApiPrintf.cpp
+
+if !MSYS2
+api_sources= ./io/ApiPrintf.cpp
 
 libISSMApi_la_SOURCES = $(api_sources)
-libISSMApi_la_CXXFLAGS= $(ALLCXXFLAGS)
+libISSMApi_la_CXXFLAGS = ${ALL_CXXFLAGS}
+endif
 #}}}
 #Wrappers {{{
@@ -52,12 +60,26 @@
 endif
 #}}}
-#Flags and libraries {{{
-deps = $(MATHLIB) ${PYTHONLIB}
-
-#Triangle library
-AM_CXXFLAGS = -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER -D_WRAPPERS_
-
-#Python part
-AM_LDFLAGS = -module $(PYTHONLINK) -shrext ${EXEEXT} --export-dynamic -rdynamic -no-undefined
+# Dependencies {{{
+deps = $(MATHLIB) $(PYTHONLIB)
+
+# Triangle
+CXXFLAGS_FOR_TRI = -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER
+
+# Python
+LDFLAGS_FOR_PY = -module $(PYTHONLINK) -shrext $(PYTHONWRAPPEREXT) -no-undefined
+
+LDFLAGS_FOR_PY += --no-warnings
+CXXFLAGS_FOR_PY = -D_HAVE_PYTHON_MODULES_
+
+if PYTHON3
+CXXFLAGS_FOR_PY += -DNPY_NO_DEPRECATED_API
+endif
+
+AM_CXXFLAGS = ${CXXFLAGS_FOR_TRI} ${CXXFLAGS_FOR_PY}
+AM_LDFLAGS = ${LDFLAGS_FOR_PY}
+
+if !MSYS2
+AM_LDFLAGS += --export-dynamic -rdynamic
+endif
 
 if !VERSION
@@ -76,16 +98,18 @@
 AM_LDFLAGS += -Wl,-rpath,'@rpath'
 else
-AM_LDFLAGS += -Wl,-static -Wl,-lbacktrace -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
-endif
-endif
-
-AM_LDFLAGS 	+= --no-warnings
-AM_CXXFLAGS += -D_HAVE_PYTHON_MODULES_ -fPIC
-
-if PYTHON3
-AM_CXXFLAGS += -DNPY_NO_DEPRECATED_API
-endif
-
-deps += ./libISSMPython.la ../../c/libISSMModules.la ../../c/libISSMCore.la ./libISSMApi.la
+AM_LDFLAGS += -Wl,-lbacktrace
+if !MSYS2
+AM_LDFLAGS += -Wl,-static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
+endif
+endif
+endif
+
+AM_CXXFLAGS += -fPIC -D_WRAPPERS_
+
+deps += ./libISSMPython.la ../../c/libISSMModules.la ../../c/libISSMCore.la
+
+if !MSYS2
+deps += ./libISSMApi.la
+endif
 
 if ADOLC
@@ -104,87 +128,89 @@
 
 if STANDALONE_LIBRARIES
+if !MSYS2
 libISSMPython_la_LDFLAGS = -static
 libISSMApi_la_LDFLAGS = -static
+endif
 deps += $(DAKOTALIB) $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(TAOLIB) $(NEOPZLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(CHACOLIB) $(SCOTCHLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(OSLIBS)
 endif
 
-libISSMApi_la_LIBADD = $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(MATHLIB)
+libISSMApi_la_LIBADD = $(PETSCLIB) $(MUMPSLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(PARMETISLIB) $(METISLIB) $(HDF5LIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJLIB) $(MATHLIB)
 
 BamgConvertMesh_python_la_SOURCES = ../BamgConvertMesh/BamgConvertMesh.cpp
 BamgConvertMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgConvertMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+BamgConvertMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 BamgMesher_python_la_SOURCES = ../BamgMesher/BamgMesher.cpp
 BamgMesher_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgMesher_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+BamgMesher_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 BamgTriangulate_python_la_SOURCES = ../BamgTriangulate/BamgTriangulate.cpp
 BamgTriangulate_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgTriangulate_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+BamgTriangulate_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 if CHACO
 Chaco_python_la_SOURCES = ../Chaco/Chaco.cpp
 Chaco_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-Chaco_python_la_LIBADD = ${deps} $(CHACOLIB) $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+Chaco_python_la_LIBADD = ${deps} $(CHACOLIB) $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 endif
 
 ContourToMesh_python_la_SOURCES = ../ContourToMesh/ContourToMesh.cpp
 ContourToMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
+ContourToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
 
 ContourToNodes_python_la_SOURCES = ../ContourToNodes/ContourToNodes.cpp
 ContourToNodes_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToNodes_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+ContourToNodes_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 ElementConnectivity_python_la_SOURCES = ../ElementConnectivity/ElementConnectivity.cpp
 ElementConnectivity_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ElementConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+ElementConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 ExpToLevelSet_python_la_SOURCES = ../ExpToLevelSet/ExpToLevelSet.cpp
 ExpToLevelSet_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ExpToLevelSet_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(NEOPZLIB)
+ExpToLevelSet_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(NEOPZLIB)
 
 InterpFromMesh2d_python_la_SOURCES = ../InterpFromMesh2d/InterpFromMesh2d.cpp
 InterpFromMesh2d_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMesh2d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
+InterpFromMesh2d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
 
 InterpFromGridToMesh_python_la_SOURCES = ../InterpFromGridToMesh/InterpFromGridToMesh.cpp
 InterpFromGridToMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromGridToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
+InterpFromGridToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
 
 InterpFromMeshToGrid_python_la_SOURCES = ../InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
 InterpFromMeshToGrid_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToGrid_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
+InterpFromMeshToGrid_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB)
 
 InterpFromMeshToMesh2d_python_la_SOURCES = ../InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.cpp
 InterpFromMeshToMesh2d_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh2d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB)
+InterpFromMeshToMesh2d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB)
 
 InterpFromMeshToMesh3d_python_la_SOURCES = ../InterpFromMeshToMesh3d/InterpFromMeshToMesh3d.cpp
 InterpFromMeshToMesh3d_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh3d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB)
+InterpFromMeshToMesh3d_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB)
 
 IssmConfig_python_la_SOURCES = ../IssmConfig/IssmConfig.cpp
 IssmConfig_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-IssmConfig_python_la_LIBADD = ${deps} $(DAKOTALIB) $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB)
+IssmConfig_python_la_LIBADD = ${deps} $(DAKOTALIB) $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB)
 
 MeshPartition_python_la_SOURCES = ../MeshPartition/MeshPartition.cpp
 MeshPartition_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshPartition_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(METISLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+MeshPartition_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(METISLIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 MeshProfileIntersection_python_la_SOURCES = ../MeshProfileIntersection/MeshProfileIntersection.cpp
 MeshProfileIntersection_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshProfileIntersection_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+MeshProfileIntersection_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 NodeConnectivity_python_la_SOURCES = ../NodeConnectivity/NodeConnectivity.cpp
 NodeConnectivity_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-NodeConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+NodeConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 Triangle_python_la_SOURCES = ../Triangle/Triangle.cpp
 Triangle_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-Triangle_python_la_LIBADD = ${deps} $(TRIANGLELIB) $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+Triangle_python_la_LIBADD = ${deps} $(TRIANGLELIB) $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
 
 ProcessRifts_python_la_SOURCES = ../ProcessRifts/ProcessRifts.cpp
 ProcessRifts_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ProcessRifts_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
-#}}}
+ProcessRifts_python_la_LIBADD = ${deps} $(PETSCLIB) $(HDF5LIB) $(SCALAPACKLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB)
+#}}}
Index: /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp
===================================================================
--- /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp	(revision 28013)
@@ -1,4 +1,4 @@
-/*\file FetchData.cpp:
- * \brief: general I/O interface to fetch data in python
+/*\file FetchPythonData.cpp:
+ *\brief: general I/O interface to fetch data in python
  */
 
@@ -189,4 +189,5 @@
 	long* lmatrix=NULL;
 	bool* bmatrix=NULL;
+	int* imatrix=NULL;
 	float* smatrix=NULL;
 	int i;
@@ -212,5 +213,4 @@
 				py_matrix=py_matrix2;
 			}
-
 			if (PyArray_TYPE((PyArrayObject*)py_matrix) == NPY_FLOAT) {
 				/*retrieve internal value: */
@@ -247,4 +247,12 @@
 				matrix=xNew<double>(M*N);
 				for(i=0;i<M*N;i++)matrix[i]=(double)bmatrix[i];
+			}
+			else if (PyArray_TYPE((PyArrayObject*)py_matrix) == NPY_INT32) {
+				/*retrieve internal value: */
+				imatrix=(int*)PyArray_DATA((PyArrayObject*)py_matrix);
+
+				/*transform into double matrix: */
+				matrix=xNew<double>(M*N);
+				for(i=0;i<M*N;i++)matrix[i]=(double)imatrix[i];
 			}
 
Index: /issm/trunk/src/wrappers/python/io/WritePythonData.cpp
===================================================================
--- /issm/trunk/src/wrappers/python/io/WritePythonData.cpp	(revision 28012)
+++ /issm/trunk/src/wrappers/python/io/WritePythonData.cpp	(revision 28013)
@@ -1,4 +1,4 @@
-/* \file WriteData.c:
- * \brief: general interface for writing data
+/*\file WritePythonData.cpp:
+ *\brief: general I/O interface to write data in matlab
  */
 
Index: /issm/trunk/test/MITgcm/build.sh
===================================================================
--- /issm/trunk/test/MITgcm/build.sh	(revision 28012)
+++ /issm/trunk/test/MITgcm/build.sh	(revision 28013)
@@ -32,4 +32,8 @@
 			export LD_LIBRARY_PATH="$ISSM_DIR/externalpackages/petsc/install/lib:/dartfs-hpc/admin/opt/el7/intel/compilers_and_libraries_2019.3.199/linux/compiler/lib/intel64:$ISSM_DIR/externalpackages/triangle/install/lib"
 			;;
+		"amundsen")
+			export LDADD="-L$ISSM_DIR/externalpackages/petsc/install/lib -lmpi -lmpifort"
+			$modelpath/../MITgcm/install/tools/genmake2 -mpi -mo $modelpath/../MITgcm/code -rd $modelpath/../MITgcm/install
+			;;
 		*)
 			$modelpath/../MITgcm/install/tools/genmake2 -mpi -mo $modelpath/../MITgcm/code -rd $modelpath/../MITgcm/install
@@ -42,3 +46,11 @@
     make depend
 fi
-make -j 4 &> Makefile.log
+
+#run make command
+STR=`uname -v`
+SUB='ARM64'
+if [[ "$STR" == *"$SUB"* ]]; then
+    arch -arm64 make -j &> Makefile.log
+else
+    make -j 4 &> Makefile.log   
+fi
Index: /issm/trunk/test/MITgcm/build_4003.sh
===================================================================
--- /issm/trunk/test/MITgcm/build_4003.sh	(revision 28012)
+++ /issm/trunk/test/MITgcm/build_4003.sh	(revision 28013)
@@ -42,3 +42,11 @@
     make depend
 fi
-make -j 4 &> Makefile.log
+
+#run make command
+STR=`uname -v`
+SUB='ARM64'
+if [[ "$STR" == *"$SUB"* ]]; then
+    arch -arm64 make -j &> Makefile.log
+else
+    make -j 4 &> Makefile.log   
+fi
Index: /issm/trunk/test/MITgcm/build_remesh.sh
===================================================================
--- /issm/trunk/test/MITgcm/build_remesh.sh	(revision 28013)
+++ /issm/trunk/test/MITgcm/build_remesh.sh	(revision 28013)
@@ -0,0 +1,56 @@
+#!/bin/bash
+#This script compiles and links MITgcm
+
+#recover hostname and model path:
+hostname="$1"
+modelpath="$2"
+
+if [ -e ~/.bashrc ]; then
+    source ~/.bashrc
+fi
+
+# Get MITgcm code, if needed
+if [ ! -d "$modelpath/../MITgcm/install" ]; then
+    cd $modelpath/../MITgcm
+    source install.sh
+    cd $modelpath
+fi
+
+# Create build directory, if needed
+cd $modelpath
+if [ ! -d "build" ]; then mkdir build; fi
+cd build
+
+#create MITgcm makefile for this run, if needed
+if [ ! -f Makefile ]; then
+	case $hostname in
+		"pleiades")
+			$modelpath/../MITgcm/install/tools/genmake2 -of $SLR_DIR/models/ice-ocean/configs/linux_amd64_gfortran+mpi_ice_nas -mo ../code_remesh -rd $modelpath/../MITgcm/install
+			;;
+		"babylon")
+			$modelpath/../MITgcm/install/tools/genmake2 -of $modelpath/../MITgcm/install/tools/build_options/linux_amd64_ifort -mpi -mo $modelpath/../MITgcm/code_remesh -rd $modelpath/../MITgcm/install
+			export LD_LIBRARY_PATH="$ISSM_DIR/externalpackages/petsc/install/lib:/dartfs-hpc/admin/opt/el7/intel/compilers_and_libraries_2019.3.199/linux/compiler/lib/intel64:$ISSM_DIR/externalpackages/triangle/install/lib"
+			;;
+		"amundsen")
+			export LDADD="-L$ISSM_DIR/externalpackages/petsc/install/lib -lmpi -lmpifort"
+			$modelpath/../MITgcm/install/tools/genmake2 -mpi -mo $modelpath/../MITgcm/code_remesh -rd $modelpath/../MITgcm/install
+			;;
+		*)
+			$modelpath/../MITgcm/install/tools/genmake2 -mpi -mo $modelpath/../MITgcm/code_remesh -rd $modelpath/../MITgcm/install
+			;;
+	esac
+fi
+
+#create MITgcm code links for this run, if needed
+if [ ! -f BUILD_INFO.h ]; then
+    make depend
+fi
+
+#run make command
+STR=`uname -v`
+SUB='ARM64'
+if [[ "$STR" == *"$SUB"* ]]; then
+    arch -arm64 make -j &> Makefile.log
+else
+    make -j 4 &> Makefile.log   
+fi
Index: /issm/trunk/test/MITgcm/code_remesh/CPP_EEOPTIONS.h
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/CPP_EEOPTIONS.h	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/CPP_EEOPTIONS.h	(revision 28013)
@@ -0,0 +1,161 @@
+CBOP
+C     !ROUTINE: CPP_EEOPTIONS.h
+C     !INTERFACE:
+C     include "CPP_EEOPTIONS.h"
+C
+C     !DESCRIPTION:
+C     *==========================================================*
+C     | CPP\_EEOPTIONS.h                                         |
+C     *==========================================================*
+C     | C preprocessor "execution environment" supporting        |
+C     | flags. Use this file to set flags controlling the        |
+C     | execution environment in which a model runs - as opposed |
+C     | to the dynamical problem the model solves.               |
+C     | Note: Many options are implemented with both compile time|
+C     |       and run-time switches. This allows options to be   |
+C     |       removed altogether, made optional at run-time or   |
+C     |       to be permanently enabled. This convention helps   |
+C     |       with the data-dependence analysis performed by the |
+C     |       adjoint model compiler. This data dependency       |
+C     |       analysis can be upset by runtime switches that it  |
+C     |       is unable to recoginise as being fixed for the     |
+C     |       duration of an integration.                        |
+C     |       A reasonable way to use these flags is to          |
+C     |       set all options as selectable at runtime but then  |
+C     |       once an experimental configuration has been        |
+C     |       identified, rebuild the code with the appropriate  |
+C     |       options set at compile time.                       |
+C     *==========================================================*
+CEOP
+
+#ifndef _CPP_EEOPTIONS_H_
+#define _CPP_EEOPTIONS_H_
+
+C     In general the following convention applies:
+C     ALLOW  - indicates an feature will be included but it may
+C     CAN      have a run-time flag to allow it to be switched
+C              on and off.
+C              If ALLOW or CAN directives are "undef'd" this generally
+C              means that the feature will not be available i.e. it
+C              will not be included in the compiled code and so no
+C              run-time option to use the feature will be available.
+C
+C     ALWAYS - indicates the choice will be fixed at compile time
+C              so no run-time option will be present
+
+C=== Macro related options ===
+C--   Control storage of floating point operands
+C     On many systems it improves performance only to use
+C     8-byte precision for time stepped variables.
+C     Constant in time terms ( geometric factors etc.. )
+C     can use 4-byte precision, reducing memory utilisation and
+C     boosting performance because of a smaller working set size.
+C     However, on vector CRAY systems this degrades performance.
+C     Enable to switch REAL4_IS_SLOW from genmake2 (with LET_RS_BE_REAL4):
+#ifdef LET_RS_BE_REAL4
+#undef REAL4_IS_SLOW
+#else /* LET_RS_BE_REAL4 */
+#define REAL4_IS_SLOW
+#endif /* LET_RS_BE_REAL4 */
+
+C--   Control use of "double" precision constants.
+C     Use D0 where it means REAL*8 but not where it means REAL*16
+#define D0 d0
+
+C=== IO related options ===
+C--   Flag used to indicate whether Fortran formatted write
+C     and read are threadsafe. On SGI the routines can be thread
+C     safe, on Sun it is not possible - if you are unsure then
+C     undef this option.
+#undef FMTFTN_IO_THREAD_SAFE
+
+C--   Flag used to indicate whether Binary write to Local file (i.e.,
+C     a different file for each tile) and read are thread-safe.
+#undef LOCBIN_IO_THREAD_SAFE
+
+C--   Flag to turn off the writing of error message to ioUnit zero
+#undef DISABLE_WRITE_TO_UNIT_ZERO
+
+C--   Alternative formulation of BYTESWAP, faster than
+C     compiler flag -byteswapio on the Altix.
+#undef FAST_BYTESWAP
+
+C--   Flag to turn on old default of opening scratch files with the
+C     STATUS='SCRATCH' option. This method, while perfectly FORTRAN-standard,
+C     caused filename conflicts on some multi-node/multi-processor platforms
+C     in the past and has been replace by something (hopefully) more robust.
+#undef USE_FORTRAN_SCRATCH_FILES
+
+C--   Flag defined for eeboot_minimal.F, eeset_parms.F and open_copy_data_file.F
+C     to write STDOUT, STDERR and scratch files from process 0 only.
+C WARNING: to use only when absolutely confident that the setup is working
+C     since any message (error/warning/print) from any proc <> 0 will be lost.
+#undef SINGLE_DISK_IO
+
+C=== MPI, EXCH and GLOBAL_SUM related options ===
+C--   Flag turns off MPI_SEND ready_to_receive polling in the
+C     gather_* subroutines to speed up integrations.
+#undef DISABLE_MPI_READY_TO_RECEIVE
+
+C--   Control MPI based parallel processing
+CXXX We no longer select the use of MPI via this file (CPP_EEOPTIONS.h)
+CXXX To use MPI, use an appropriate genmake2 options file or use
+CXXX genmake2 -mpi .
+CXXX #undef  ALLOW_USE_MPI
+
+C--   Control use of communication that might overlap computation.
+C     Under MPI selects/deselects "non-blocking" sends and receives.
+#define ALLOW_ASYNC_COMMUNICATION
+#undef  ALLOW_ASYNC_COMMUNICATION
+#undef  ALWAYS_USE_ASYNC_COMMUNICATION
+C--   Control use of communication that is atomic to computation.
+C     Under MPI selects/deselects "blocking" sends and receives.
+#define ALLOW_SYNC_COMMUNICATION
+#undef  ALWAYS_USE_SYNC_COMMUNICATION
+
+C--   Control XY periodicity in processor to grid mappings
+C     Note: Model code does not need to know whether a domain is
+C           periodic because it has overlap regions for every box.
+C           Model assume that these values have been
+C           filled in some way.
+#undef  ALWAYS_PREVENT_X_PERIODICITY
+#undef  ALWAYS_PREVENT_Y_PERIODICITY
+#define CAN_PREVENT_X_PERIODICITY
+#define CAN_PREVENT_Y_PERIODICITY
+
+C--   disconnect tiles (no exchange between tiles, just fill-in edges
+C     assuming locally periodic subdomain)
+#undef DISCONNECTED_TILES
+
+C--   Always cumulate tile local-sum in the same order by applying MPI allreduce
+C     to array of tiles ; can get slower with large number of tiles (big set-up)
+#define GLOBAL_SUM_ORDER_TILES
+
+C--   Alternative way of doing global sum without MPI allreduce call
+C     but instead, explicit MPI send & recv calls. Expected to be slower.
+#undef GLOBAL_SUM_SEND_RECV
+
+C--   Alternative way of doing global sum on a single CPU
+C     to eliminate tiling-dependent roundoff errors. Note: This is slow.
+#undef  CG2D_SINGLECPU_SUM
+
+C=== Other options (to add/remove pieces of code) ===
+C--   Flag to turn on checking for errors from all threads and procs
+C     (calling S/R STOP_IF_ERROR) before stopping.
+#define USE_ERROR_STOP
+
+C--   Control use of communication with other component:
+C     allow to import and export from/to Coupler interface.
+#undef COMPONENT_MODULE
+
+C--   Options used to couple MITgcm and ISSM
+C     Eventually this option can probably be merged with COMPONENT_MODULE
+#define ALLOW_CPL_ISSM
+
+C--   Activate some pieces of code for coupling to GEOS AGCM
+#undef HACK_FOR_GMAO_CPL
+
+#endif /* _CPP_EEOPTIONS_H_ */
+
+#include "CPP_EEMACROS.h"
+
Index: /issm/trunk/test/MITgcm/code_remesh/CPP_OPTIONS.h
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/CPP_OPTIONS.h	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/CPP_OPTIONS.h	(revision 28013)
@@ -0,0 +1,141 @@
+#ifndef CPP_OPTIONS_H
+#define CPP_OPTIONS_H
+
+CBOP
+C !ROUTINE: CPP_OPTIONS.h
+C !INTERFACE:
+C #include "CPP_OPTIONS.h"
+
+C !DESCRIPTION:
+C *==================================================================*
+C | main CPP options file for the model:
+C | Control which optional features to compile in model/src code.
+C *==================================================================*
+CEOP
+
+C CPP flags controlling particular source code features
+
+C-- Forcing code options:
+
+C o Shortwave heating as extra term in external_forcing.F
+C Note: this should be a run-time option
+#define SHORTWAVE_HEATING
+
+C o Include/exclude Geothermal Heat Flux at the bottom of the ocean
+#undef ALLOW_GEOTHERMAL_FLUX
+
+C o Allow to account for heating due to friction (and momentum dissipation)
+#undef ALLOW_FRICTION_HEATING
+
+C o Allow mass source or sink of Fluid in the interior
+C   (3-D generalisation of oceanic real-fresh water flux)
+#undef ALLOW_ADDFLUID
+
+C o Include pressure loading code
+#define ATMOSPHERIC_LOADING
+
+C o Include/exclude balancing surface forcing fluxes code
+#undef ALLOW_BALANCE_FLUXES
+
+C o Include/exclude balancing surface forcing relaxation code
+#undef ALLOW_BALANCE_RELAX
+
+C o Include/exclude checking for negative salinity
+#undef CHECK_SALINITY_FOR_NEGATIVE_VALUES
+
+C-- Options to discard parts of the main code:
+
+C o Exclude/allow external forcing-fields load
+C   this allows to read & do simple linear time interpolation of oceanic
+C   forcing fields, if no specific pkg (e.g., EXF) is used to compute them.
+#undef EXCLUDE_FFIELDS_LOAD
+
+C o Include/exclude phi_hyd calculation code
+#define INCLUDE_PHIHYD_CALCULATION_CODE
+
+C-- Vertical mixing code options:
+
+C o Include/exclude call to S/R CONVECT
+#define INCLUDE_CONVECT_CALL
+
+C o Include/exclude call to S/R CALC_DIFFUSIVITY
+#define INCLUDE_CALC_DIFFUSIVITY_CALL
+
+C o Allow full 3D specification of vertical diffusivity
+#undef ALLOW_3D_DIFFKR
+
+C o Allow latitudinally varying BryanLewis79 vertical diffusivity
+#undef ALLOW_BL79_LAT_VARY
+
+C o Exclude/allow partial-cell effect (physical or enhanced) in vertical mixing
+C   this allows to account for partial-cell in vertical viscosity and diffusion,
+C   either from grid-spacing reduction effect or as artificially enhanced mixing
+C   near surface & bottom for too thin grid-cell
+#undef EXCLUDE_PCELL_MIX_CODE
+
+C-- Time-stepping code options:
+
+C o Include/exclude combined Surf.Pressure and Drag Implicit solver code
+#define ALLOW_SOLVE4_PS_AND_DRAG
+
+C o Include/exclude Implicit vertical advection code
+#define INCLUDE_IMPLVERTADV_CODE
+
+C o Include/exclude AdamsBashforth-3rd-Order code
+#undef ALLOW_ADAMSBASHFORTH_3
+
+C-- Model formulation options:
+
+C o Allow/exclude "Exact Convervation" of fluid in Free-Surface formulation
+C   that ensures that d/dt(eta) is exactly equal to - Div.Transport
+#define EXACT_CONSERV
+
+C o Allow the use of Non-Linear Free-Surface formulation
+C   this implies that grid-cell thickness (hFactors) varies with time
+#define NONLIN_FRSURF
+
+C o Include/exclude nonHydrostatic code
+#undef ALLOW_NONHYDROSTATIC
+
+C o Include/exclude GM-like eddy stress in momentum code
+#undef ALLOW_EDDYPSI
+
+C-- Algorithm options:
+
+C o Use Non Self-Adjoint (NSA) conjugate-gradient solver
+#undef ALLOW_CG2D_NSA
+
+C o Include/exclude code for single reduction Conjugate-Gradient solver
+#define ALLOW_SRCG
+
+C o Choices for implicit solver routines solve_*diagonal.F
+C   The following has low memory footprint, but not suitable for AD
+#define SOLVE_DIAGONAL_LOWMEMORY
+C   The following one suitable for AD but does not vectorize
+#undef SOLVE_DIAGONAL_KINNER
+
+C-- Retired code options:
+
+C o Use LONG.bin, LATG.bin, etc., initialization for ini_curviliear_grid.F
+C   Default is to use "new" grid files (OLD_GRID_IO undef) but OLD_GRID_IO
+C   is still useful with, e.g., single-domain curvilinear configurations.
+#undef OLD_GRID_IO
+#define ALLOW_SOLVE4_PS_AND_DRAG
+
+
+C-- Other option files:
+
+C o Execution environment support options
+#include "CPP_EEOPTIONS.h"
+
+C o Include/exclude single header file containing multiple packages options
+C   (AUTODIFF, COST, CTRL, ECCO, EXF ...) instead of the standard way where
+C   each of the above pkg get its own options from its specific option file.
+C   Although this method, inherited from ECCO setup, has been traditionally
+C   used for all adjoint built, work is in progress to allow to use the
+C   standard method also for adjoint built.
+c#ifdef PACKAGES_CONFIG_H
+c# include "ECCO_CPPOPTIONS.h"
+c#endif
+
+#endif /* CPP_OPTIONS_H */
Index: /issm/trunk/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h	(revision 28013)
@@ -0,0 +1,28 @@
+C     Diagnostics Array Dimension
+C     ---------------------------
+C     ndiagMax   :: maximum total number of available diagnostics
+C     numlists   :: maximum number of diagnostics list (in data.diagnostics)
+C     numperlist :: maximum number of active diagnostics per list (data.diagnostics)
+C     numLevels  :: maximum number of levels to write    (data.diagnostics)
+C     numDiags   :: maximum size of the storage array for active 2D/3D diagnostics
+C     nRegions   :: maximum number of regions (statistics-diagnostics)
+C     sizRegMsk  :: maximum size of the regional-mask (statistics-diagnostics)
+C     nStats     :: maximum number of statistics (e.g.: aver,min,max ...)
+C     diagSt_size:: maximum size of the storage array for statistics-diagnostics
+C Note : may need to increase "numDiags" when using several 2D/3D diagnostics,
+C  and "diagSt_size" (statistics-diags) since values here are deliberately small.
+      INTEGER    ndiagMax
+      INTEGER    numlists, numperlist, numLevels
+      INTEGER    numDiags
+      INTEGER    nRegions, sizRegMsk, nStats
+      INTEGER    diagSt_size
+      PARAMETER( ndiagMax = 500 )
+      PARAMETER( numlists = 10, numperlist = 50, numLevels=2*Nr )
+      PARAMETER( numDiags = 4*Nr )
+      PARAMETER( nRegions = 0 , sizRegMsk = 1 , nStats = 4 )
+      PARAMETER( diagSt_size = 10*Nr )
+
+
+CEH3 ;;; Local Variables: ***
+CEH3 ;;; mode:fortran ***
+CEH3 ;;; End: ***
Index: /issm/trunk/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h	(revision 28013)
@@ -0,0 +1,31 @@
+C     *==========================================================*
+C     | SHELFICE_OPTIONS.h
+C     | o CPP options file for SHELFICE package.
+C     *==========================================================*
+C     | Use this file for selecting options within the SHELFICE
+C     | package.
+C     *==========================================================*
+
+#ifndef SHELFICE_OPTIONS_H
+#define SHELFICE_OPTIONS_H
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+
+#ifdef ALLOW_SHELFICE
+C     Package-specific Options & Macros go here
+
+C     allow code for simple ISOMIP thermodynamics
+#define ALLOW_ISOMIP_TD
+
+C     allow friction velocity-dependent transfer coefficient
+C     following Holland and Jenkins, JPO, 1999
+#define SHI_ALLOW_GAMMAFRICT
+
+C     allow (vertical) remeshing whenever ocean top thickness factor
+C     exceeds thresholds
+#define ALLOW_SHELFICE_REMESHING
+C     and allow to print message to STDOUT when this happens
+#define SHELFICE_REMESH_PRINT
+
+#endif /* ALLOW_SHELFICE */
+#endif /* SHELFICE_OPTIONS_H */
Index: /issm/trunk/test/MITgcm/code_remesh/SIZE.h
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/SIZE.h	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/SIZE.h	(revision 28013)
@@ -0,0 +1,64 @@
+CBOP
+C    !ROUTINE: SIZE.h
+C    !INTERFACE:
+C    include SIZE.h
+C    !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SIZE.h Declare size of underlying computational grid.
+C     *==========================================================*
+C     | The design here supports a three-dimensional model grid
+C     | with indices I,J and K. The three-dimensional domain
+C     | is comprised of nPx*nSx blocks (or tiles) of size sNx
+C     | along the first (left-most index) axis, nPy*nSy blocks
+C     | of size sNy along the second axis and one block of size
+C     | Nr along the vertical (third) axis.
+C     | Blocks/tiles have overlap regions of size OLx and OLy
+C     | along the dimensions that are subdivided.
+C     *==========================================================*
+C     \ev
+C
+C     Voodoo numbers controlling data layout:
+C     sNx :: Number of X points in tile.
+C     sNy :: Number of Y points in tile.
+C     OLx :: Tile overlap extent in X.
+C     OLy :: Tile overlap extent in Y.
+C     nSx :: Number of tiles per process in X.
+C     nSy :: Number of tiles per process in Y.
+C     nPx :: Number of processes to use in X.
+C     nPy :: Number of processes to use in Y.
+C     Nx  :: Number of points in X for the full domain.
+C     Ny  :: Number of points in Y for the full domain.
+C     Nr  :: Number of points in vertical direction.
+CEOP
+      INTEGER sNx
+      INTEGER sNy
+      INTEGER OLx
+      INTEGER OLy
+      INTEGER nSx
+      INTEGER nSy
+      INTEGER nPx
+      INTEGER nPy
+      INTEGER Nx
+      INTEGER Ny
+      INTEGER Nr
+      PARAMETER (
+     &           sNx =  20,
+     &           sNy =  20,
+     &           OLx =   3,
+     &           OLy =   3,
+     &           nSx =   1,
+     &           nSy =   1,
+     &           nPx = 1,
+     &           nPy = 2,
+     &           Nx  = sNx*nSx*nPx,
+     &           Ny  = sNy*nSy*nPy,
+     &           Nr  =  30)
+
+C     MAX_OLX :: Set to the maximum overlap region size of any array
+C     MAX_OLY    that will be exchanged. Controls the sizing of exch
+C                routine buffers.
+      INTEGER MAX_OLX
+      INTEGER MAX_OLY
+      PARAMETER ( MAX_OLX = OLx,
+     &            MAX_OLY = OLy )
+
Index: /issm/trunk/test/MITgcm/code_remesh/SIZE.h.bak
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/SIZE.h.bak	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/SIZE.h.bak	(revision 28013)
@@ -0,0 +1,64 @@
+CBOP
+C    !ROUTINE: SIZE.h
+C    !INTERFACE:
+C    include SIZE.h
+C    !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SIZE.h Declare size of underlying computational grid.
+C     *==========================================================*
+C     | The design here supports a three-dimensional model grid
+C     | with indices I,J and K. The three-dimensional domain
+C     | is comprised of nPx*nSx blocks (or tiles) of size sNx
+C     | along the first (left-most index) axis, nPy*nSy blocks
+C     | of size sNy along the second axis and one block of size
+C     | Nr along the vertical (third) axis.
+C     | Blocks/tiles have overlap regions of size OLx and OLy
+C     | along the dimensions that are subdivided.
+C     *==========================================================*
+C     \ev
+C
+C     Voodoo numbers controlling data layout:
+C     sNx :: Number of X points in tile.
+C     sNy :: Number of Y points in tile.
+C     OLx :: Tile overlap extent in X.
+C     OLy :: Tile overlap extent in Y.
+C     nSx :: Number of tiles per process in X.
+C     nSy :: Number of tiles per process in Y.
+C     nPx :: Number of processes to use in X.
+C     nPy :: Number of processes to use in Y.
+C     Nx  :: Number of points in X for the full domain.
+C     Ny  :: Number of points in Y for the full domain.
+C     Nr  :: Number of points in vertical direction.
+CEOP
+      INTEGER sNx
+      INTEGER sNy
+      INTEGER OLx
+      INTEGER OLy
+      INTEGER nSx
+      INTEGER nSy
+      INTEGER nPx
+      INTEGER nPy
+      INTEGER Nx
+      INTEGER Ny
+      INTEGER Nr
+      PARAMETER (
+     &           sNx =  20,
+     &           sNy =  20,
+     &           OLx =   3,
+     &           OLy =   3,
+     &           nSx =   1,
+     &           nSy =   1,
+     &           nPx =   1,
+     &           nPy =   2,
+     &           Nx  = sNx*nSx*nPx,
+     &           Ny  = sNy*nSy*nPy,
+     &           Nr  =  30)
+
+C     MAX_OLX :: Set to the maximum overlap region size of any array
+C     MAX_OLY    that will be exchanged. Controls the sizing of exch
+C                routine buffers.
+      INTEGER MAX_OLX
+      INTEGER MAX_OLY
+      PARAMETER ( MAX_OLX = OLx,
+     &            MAX_OLY = OLy )
+
Index: /issm/trunk/test/MITgcm/code_remesh/cpl_issm.F
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/cpl_issm.F	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/cpl_issm.F	(revision 28013)
@@ -0,0 +1,220 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+
+CBOP
+C     !ROUTINE: CPL_ISSM
+C     !INTERFACE:
+      SUBROUTINE CPL_ISSM( myTime, myIter, myThid )
+
+C     !DESCRIPTION: \bv
+C     *==================================================================
+C     | SUBROUTINE cpl_issm
+C     | o Couple MITgcm ocean model with ISSM ice sheet model
+C     *==================================================================
+C     \ev
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global variables ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "PARAMS.h"
+#include "DYNVARS.h"
+#include "GRID.h"
+#include "FFIELDS.h"
+#include "SHELFICE_OPTIONS.h"
+#include "SHELFICE.h"
+#ifdef ALLOW_EXF
+# include "EXF_OPTIONS.h"
+# include "EXF_FIELDS.h"
+#endif
+
+      LOGICAL  DIFFERENT_MULTIPLE
+      EXTERNAL DIFFERENT_MULTIPLE
+
+C     !LOCAL VARIABLES:
+C     mytime - time counter for this thread (seconds)
+C     myiter - iteration counter for this thread
+C     mythid - thread number for this instance of the routine.
+      _RL     mytime
+      INTEGER myiter, mythid 
+CEOP
+
+#ifdef ALLOW_CPL_ISSM
+#include "EESUPPORT.h"
+      COMMON /CPL_MPI_ID/ mpiMyWid, toissmcomm
+      INTEGER mpiMyWid, toissmcomm, mpiRC
+      INTEGER mpistatus(MPI_STATUS_SIZE)
+      INTEGER i, j, bi, bj, buffsize
+      COMMON /CPL_ISSM_TIME/ CouplingTime
+      _R8 CouplingTime, IceModelTime
+      _R8 xfer_array(Nx,Ny)
+      _R8 local(1:sNx,1:sNy,nSx,nSy)
+      CHARACTER*(MAX_LEN_MBUF) suff
+
+C Initialization steps I1, I2, and I3:
+      IF( myTime .EQ. startTime ) THEN
+
+C   I1. ISSM sends CouplingTime, the interval at which we couple
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Recv(CouplingTime,1,MPI_DOUBLE,0,10001000,
+     &           toissmcomm,mpistatus,mpiRC)
+            _END_MASTER( myThid )
+         ENDIF
+         _BEGIN_MASTER( myThid )
+         CALL MPI_BCAST(CouplingTime,1,MPI_DOUBLE,0,
+     &        MPI_COMM_MODEL,mpiRC)
+         _END_MASTER( myThid )
+C        print*, 'Ocean received CouplingTime: ', CouplingTime
+
+C   I2. MITgcm sends grid size (NX and NY)
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Send(Nx,1,MPI_INT,0,10001003,
+     &           toissmcomm,mpistatus)
+            call MPI_Send(Ny,1,MPI_INT,0,10001004,
+     &           toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+
+C   I3. MITgcm sends grid coordinates of center of cells
+C       (longitude -180 <= XC < 180 and latitude YC)
+C     Send longitude East of center of cell
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj) = xC(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001005,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+C     Send latitude North of center of cell
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj) = yC(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001006,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+
+      ENDIF
+C End initialization steps I1, I2, and I3.
+
+C Recurring steps C1 and C2:
+      IF( MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+
+C   C1. ISSM sends ice model time IceTimeTag
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Recv(IceModelTime,1,MPI_DOUBLE,0,10001001,
+     &           toissmcomm,mpistatus,mpiRC)
+C           print*, 'Ocean received IceModelTime: ', IceModelTime
+            _END_MASTER( myThid )
+         ENDIF
+
+C   C2. MITgcm sends ocean model time OceanTimeTag
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Send(myTime,1,MPI_DOUBLE,0,10001002,
+     &           toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+
+      ENDIF
+C End recurring steps C1 and C2.
+
+C Recurring step C3 except during Initialization:
+C  C3. MITgcm sends
+C      (N-1)*CouplingTime <= OceanModelTime < N*CouplingTime
+C      time-mean melt rate to ISSM
+      IF( myTime .NE. startTime .AND.
+     &     MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj)=shelficeFreshWaterFlux(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001007,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+C        print*,'Done Sending shelficeFreshWaterFlux array.'
+         
+      ENDIF
+C End recurring step C3.
+
+C Recurring step C4 except during Termination:
+C  C4. ISSM sends IceModelTime=(N-1)*CouplingTime base to MITgcm
+      IF( myTime .NE. endtime .AND.
+     &     MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+         WRITE(suff,'(I10.10)') myIter
+         CALL WRITE_FLD_XY_RS( 'R_shelfIce1_',suff,R_shelfIce,-1,myThid)
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )         
+            call MPI_Recv(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001008,toissmcomm,mpistatus,mpiRC)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid ) 
+         CALL SCATTER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         DO bj = myByLo(myThid), myByHi(myThid)
+            DO bi = myBxLo(myThid), myBxHi(myThid)
+               DO j=1,sNy
+                  DO i=1,sNx
+                     IF( local(i,j,bi,bj).LT.9998 ) THEN
+                        R_shelfIce(i,j,bi,bj) = local(i,j,bi,bj)
+                     ELSE
+                        R_shelfIce(i,j,bi,bj) = 0. _d 0
+                     ENDIF
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+C- fill in the overlap (+ BARRIER):
+         _EXCH_XY_RS( R_shelfIce, myThid )
+         CALL WRITE_FLD_XY_RS( 'R_shelfIce2_',suff,R_shelfIce,-1,myThid)
+      ENDIF
+C End recurring step C4.
+
+#endif /* ALLOW_CPL_ISSM */
+
+      RETURN
+      END
Index: /issm/trunk/test/MITgcm/code_remesh/do_oceanic_phys.F
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/do_oceanic_phys.F	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/do_oceanic_phys.F	(revision 28013)
@@ -0,0 +1,1111 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+#ifdef ALLOW_MOM_COMMON
+# include "MOM_COMMON_OPTIONS.h"
+#endif
+#ifdef ALLOW_AUTODIFF
+# include "AUTODIFF_OPTIONS.h"
+#endif
+#ifdef ALLOW_CTRL
+# include "CTRL_OPTIONS.h"
+#endif
+#ifdef ALLOW_SALT_PLUME
+# include "SALT_PLUME_OPTIONS.h"
+#endif
+#ifdef ALLOW_ECCO
+# include "ECCO_OPTIONS.h"
+#endif
+
+#ifdef ALLOW_AUTODIFF
+# ifdef ALLOW_GGL90
+#  include "GGL90_OPTIONS.h"
+# endif
+# ifdef ALLOW_GMREDI
+#  include "GMREDI_OPTIONS.h"
+# endif
+# ifdef ALLOW_KPP
+#  include "KPP_OPTIONS.h"
+# endif
+# ifdef ALLOW_SEAICE
+#  include "SEAICE_OPTIONS.h"
+# endif
+# ifdef ALLOW_EXF
+#  include "EXF_OPTIONS.h"
+# endif
+#endif /* ALLOW_AUTODIFF */
+
+CBOP
+C     !ROUTINE: DO_OCEANIC_PHYS
+C     !INTERFACE:
+      SUBROUTINE DO_OCEANIC_PHYS(myTime, myIter, myThid)
+C     !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SUBROUTINE DO_OCEANIC_PHYS
+C     | o Controlling routine for oceanic physics and
+C     |   parameterization
+C     *==========================================================*
+C     | o originally, part of S/R thermodynamics
+C     *==========================================================*
+C     \ev
+
+C     !CALLING SEQUENCE:
+C     DO_OCEANIC_PHYS
+C       |
+C       |-- OBCS_CALC
+C       |
+C       |-- OCN_APPLY_IMPORT
+C       |
+C       |-- FRAZIL_CALC_RHS
+C       |
+C       |-- THSICE_MAIN
+C       |
+C       |-- SEAICE_FAKE
+C       |-- SEAICE_MODEL
+C       |-- SEAICE_COST_SENSI
+C       |
+C       |-- OCN_EXPORT_DATA
+C       |
+C       |-- SHELFICE_THERMODYNAMICS
+C       |
+C       |-- ICEFRONT_THERMODYNAMICS
+C       |
+C       |-- SALT_PLUME_DO_EXCH
+C       |
+C       |-- FREEZE_SURFACE
+C       |
+C       |-- EXTERNAL_FORCING_SURF
+C       |
+C       |-- OBCS_ADJUST
+C       |
+C       |- k loop (Nr:1):
+C       | - DWNSLP_CALC_RHO
+C       | - BBL_CALC_RHO
+C       | - FIND_RHO_2D @ p(k)
+C       | - FIND_RHO_2D @ p(k-1)
+C       | - GRAD_SIGMA
+C       | - CALC_IVDC
+C       | - DIAGS_RHO_L
+C       |- end k loop.
+C       |
+C       |-- CALC_OCE_MXLAYER
+C       |
+C       |-- SALT_PLUME_CALC_DEPTH
+C       |-- SALT_PLUME_VOLFRAC
+C       |-- SALT_PLUME_APPLY
+C       |-- SALT_PLUME_APPLY
+C       |-- SALT_PLUME_FORCING_SURF
+C       |
+C       |-- KPP_CALC
+C       |-- KPP_CALC_DUMMY
+C       |
+C       |-- PP81_CALC
+C       |
+C       |-- KL10_CALC
+C       |
+C       |-- MY82_CALC
+C       |
+C       |-- GGL90_CALC
+C       |
+C       |-- TIMEAVE_SURF_FLUX
+C       |
+C       |-- GMREDI_CALC_TENSOR
+C       |-- GMREDI_CALC_TENSOR_DUMMY
+C       |
+C       |-- DWNSLP_CALC_FLOW
+C       |-- DWNSLP_CALC_FLOW
+C       |
+C       |-- OFFLINE_GET_DIFFUS
+C       |
+C       |-- BBL_CALC_RHS
+C       |
+C       |-- MYPACKAGE_CALC_RHS
+C       |
+C       |-- GMREDI_DO_EXCH
+C       |
+C       |-- KPP_DO_EXCH
+C       |
+C       |-- GGL90_EXCHANGES
+C       |
+C       |-- DIAGS_RHO_G
+C       |-- DIAGS_OCEANIC_SURF_FLUX
+C       |-- SALT_PLUME_DIAGNOSTICS_FILL
+C       |
+C       |-- ECCO_PHYS
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global variables ===
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "PARAMS.h"
+#include "GRID.h"
+#include "DYNVARS.h"
+#ifdef ALLOW_TIMEAVE
+# include "TIMEAVE_STATV.h"
+#endif
+#ifdef ALLOW_OFFLINE
+# include "OFFLINE_SWITCH.h"
+#endif
+
+#ifdef ALLOW_AUTODIFF
+# include "AUTODIFF_MYFIELDS.h"
+# ifdef ALLOW_AUTODIFF_TAMC
+#  include "tamc.h"
+# endif
+# include "FFIELDS.h"
+# include "SURFACE.h"
+# include "EOS.h"
+# ifdef ALLOW_GMREDI
+#  include "GMREDI.h"
+# endif
+# ifdef ALLOW_KPP
+#  include "KPP.h"
+# endif
+# ifdef ALLOW_GGL90
+#  include "GGL90.h"
+# endif
+# ifdef ALLOW_EBM
+#  include "EBM.h"
+# endif
+# ifdef ALLOW_EXF
+#  include "ctrl.h"
+#  include "EXF_FIELDS.h"
+#  ifdef ALLOW_BULKFORMULAE
+#   include "EXF_CONSTANTS.h"
+#  endif
+# endif
+# ifdef ALLOW_SEAICE
+#  include "SEAICE_SIZE.h"
+#  include "SEAICE.h"
+#  include "SEAICE_PARAMS.h"
+# endif
+# ifdef ALLOW_THSICE
+#  include "THSICE_VARS.h"
+# endif
+# ifdef ALLOW_SALT_PLUME
+#  include "SALT_PLUME.h"
+# endif
+# ifdef ALLOW_ECCO
+#  ifdef ALLOW_SIGMAR_COST_CONTRIBUTION
+#   include "ecco_cost.h"
+#  endif
+# endif
+#endif /* ALLOW_AUTODIFF */
+
+C     !INPUT/OUTPUT PARAMETERS:
+C     == Routine arguments ==
+C     myTime :: Current time in simulation
+C     myIter :: Current iteration number in simulation
+C     myThid :: Thread number for this instance of the routine.
+      _RL myTime
+      INTEGER myIter
+      INTEGER myThid
+
+C     !LOCAL VARIABLES:
+C     == Local variables
+C     rhoKp1,rhoKm1 :: Density at current level, and @ level minus one
+C     iMin, iMax    :: Ranges and sub-block indices on which calculations
+C     jMin, jMax       are applied.
+C     bi, bj        :: tile indices
+C     msgBuf        :: Temp. for building output string
+C     i,j,k         :: loop indices
+C     kSrf          :: surface index
+      _RL rhoKp1  (1-OLx:sNx+OLx,1-OLy:sNy+OLy)
+      _RL rhoKm1  (1-OLx:sNx+OLx,1-OLy:sNy+OLy)
+      _RL sigmaX  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      _RL sigmaY  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      _RL sigmaR  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      INTEGER iMin, iMax
+      INTEGER jMin, jMax
+      INTEGER bi, bj
+      INTEGER i, j, k, kSrf
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+      INTEGER doDiagsRho
+      LOGICAL calcGMRedi, calcKPP, calcConvect
+#ifdef ALLOW_DIAGNOSTICS
+      LOGICAL  DIAGNOSTICS_IS_ON
+      EXTERNAL DIAGNOSTICS_IS_ON
+#endif /* ALLOW_DIAGNOSTICS */
+#ifdef ALLOW_AUTODIFF
+      _RL thetaRef
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_AUTODIFF_TAMC
+      INTEGER act1, act2, act3, act4
+      INTEGER max1, max2, max3
+      INTEGER kkey, itdkey
+#endif
+CEOP
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   dummy statement to end declaration part
+      itdkey = 1
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+      kSrf = 1
+      IF ( usingPCoords ) kSrf = Nr
+
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_ENTER('DO_OCEANIC_PHYS',myThid)
+#endif
+
+      doDiagsRho = 0
+#ifdef ALLOW_DIAGNOSTICS
+      IF ( useDiagnostics .AND. fluidIsWater ) THEN
+        IF ( DIAGNOSTICS_IS_ON('MXLDEPTH',myThid) )
+     &       doDiagsRho = doDiagsRho + 1
+        IF ( DIAGNOSTICS_IS_ON('DRHODR  ',myThid) )
+     &       doDiagsRho = doDiagsRho + 2
+        IF ( DIAGNOSTICS_IS_ON('WdRHO_P ',myThid) )
+     &       doDiagsRho = doDiagsRho + 4
+        IF ( DIAGNOSTICS_IS_ON('WdRHOdP ',myThid) )
+     &       doDiagsRho = doDiagsRho + 8
+      ENDIF
+#endif /* ALLOW_DIAGNOSTICS */
+
+      calcGMRedi  = useGMRedi
+      calcKPP     = useKPP
+      calcConvect = ivdc_kappa.NE.0.
+#ifdef ALLOW_OFFLINE
+      IF ( useOffLine ) THEN
+        calcGMRedi = useGMRedi .AND. .NOT.offlineLoadGMRedi
+        calcKPP    = useKPP    .AND. .NOT.offlineLoadKPP
+        calcConvect=calcConvect.AND. .NOT.offlineLoadConvec
+      ENDIF
+#endif /* ALLOW_OFFLINE */
+
+#ifdef  ALLOW_OBCS
+      IF (useOBCS) THEN
+C--   Calculate future values on open boundaries
+C--   moved before SEAICE_MODEL call since SEAICE_MODEL needs seaice-obcs fields
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+# ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('OBCS_CALC',myThid)
+# endif
+       CALL OBCS_CALC( myTime+deltaTClock, myIter+1,
+     I                 uVel, vVel, wVel, theta, salt, myThid )
+      ENDIF
+#endif  /* ALLOW_OBCS */
+
+#ifdef ALLOW_OCN_COMPON_INTERF
+C--    Apply imported data (from coupled interface) to forcing fields
+C jmc: moved here before any freezing/seaice pkg adjustment of surf-fluxes
+      IF ( useCoupler ) THEN
+         CALL OCN_APPLY_IMPORT( .TRUE., myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_OCN_COMPON_INTERF */
+
+#ifdef ALLOW_AUTODIFF
+      DO bj=myByLo(myThid),myByHi(myThid)
+       DO bi=myBxLo(myThid),myBxHi(myThid)
+        DO j=1-OLy,sNy+OLy
+         DO i=1-OLx,sNx+OLx
+          adjustColdSST_diag(i,j,bi,bj) = 0. _d 0
+# ifdef ALLOW_SALT_PLUME
+          saltPlumeDepth(i,j,bi,bj) = 0. _d 0
+          saltPlumeFlux(i,j,bi,bj)  = 0. _d 0
+# endif
+         ENDDO
+        ENDDO
+       ENDDO
+      ENDDO
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_FRAZIL
+      IF ( useFRAZIL ) THEN
+C--   Freeze water in the ocean interior and let it rise to the surface
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+       CALL FRAZIL_CALC_RHS( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_FRAZIL */
+
+#if (defined ALLOW_THSICE) && !(defined ALLOW_ATM2D)
+      IF ( useThSIce .AND. fluidIsWater ) THEN
+# ifdef ALLOW_AUTODIFF_TAMC
+#  ifdef ALLOW_SEAICE
+CADJ STORE uice,vice         = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+CADJ STORE iceMask,iceHeight = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE snowHeight, Tsrf  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE Qice1, Qice2      = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE sHeating,snowAge  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE hocemxl, icflxsw  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt,theta        = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uvel,vvel         = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qnet,qsw, empmr   = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE atemp,aqh,precip  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE swdown,lwdown     = comlev1, key=ikey_dynamics, kind=isbyte
+#  ifdef NONLIN_FRSURF
+CADJ STORE hFac_surfC        = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+# endif /* ALLOW_AUTODIFF_TAMC */
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('THSICE_MAIN',myThid)
+# endif
+C--     Step forward Therm.Sea-Ice variables
+C       and modify forcing terms including effects from ice
+        CALL TIMER_START('THSICE_MAIN     [DO_OCEANIC_PHYS]', myThid)
+        CALL THSICE_MAIN( myTime, myIter, myThid )
+        CALL TIMER_STOP( 'THSICE_MAIN     [DO_OCEANIC_PHYS]', myThid)
+      ENDIF
+#endif /* ALLOW_THSICE */
+
+#ifdef ALLOW_SEAICE
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE qnet  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qsw   = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE fu,fv = comlev1, key=ikey_dynamics, kind=isbyte
+#if (defined ALLOW_EXF) && (defined ALLOW_ATM_TEMP)
+CADJ STORE evap  = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+# endif /* ALLOW_AUTODIFF_TAMC */
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE phiHydLow= comlev1, key=ikey_dynamics, byte=isbyte
+#endif
+      IF ( useSEAICE ) THEN
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE uvel,vvel         = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uice,vice         = comlev1, key=ikey_dynamics, kind=isbyte
+#  ifdef ALLOW_EXF
+CADJ STORE atemp,aqh,precip  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE swdown,lwdown     = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uwind,vwind       = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef SEAICE_VARIABLE_SALINITY
+CADJ STORE hsalt             = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef ATMOSPHERIC_LOADING
+CADJ STORE pload, siceload   = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef NONLIN_FRSURF
+CADJ STORE recip_hfacc       = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef ANNUAL_BALANCE
+CADJ STORE balance_itcount   = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif /* ANNUAL_BALANCE */
+#  ifdef ALLOW_THSICE
+C-- store thSIce vars before advection (called from SEAICE_MODEL) updates them:
+CADJ STORE iceMask,iceHeight = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE snowHeight,hOceMxL= comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE Qice1, Qice2      = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif /* ALLOW_THSICE */
+# endif /* ALLOW_AUTODIFF_TAMC */
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('SEAICE_MODEL',myThid)
+# endif
+        CALL TIMER_START('SEAICE_MODEL    [DO_OCEANIC_PHYS]', myThid)
+        CALL SEAICE_MODEL( myTime, myIter, myThid )
+        CALL TIMER_STOP ('SEAICE_MODEL    [DO_OCEANIC_PHYS]', myThid)
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE tices = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE heff  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE hsnow = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE area  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uIce  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE vIce  = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+# ifdef ALLOW_COST
+        CALL SEAICE_COST_SENSI ( myTime, myIter, myThid )
+# endif
+# ifdef ALLOW_AUTODIFF
+      ELSEIF ( SEAICEadjMODE .EQ. -1 ) THEN
+CADJ STORE area = comlev1, key=ikey_dynamics, kind=isbyte
+        CALL SEAICE_FAKE( myTime, myIter, myThid )
+# endif /* ALLOW_AUTODIFF */
+      ENDIF
+#endif /* ALLOW_SEAICE */
+
+#if (defined ALLOW_OCN_COMPON_INTERF) && (defined ALLOW_THSICE)
+C--   After seaice-dyn and advection of pkg/thsice fields,
+C     Export ocean coupling fields to coupled interface (only with pkg/thsice)
+      IF ( useCoupler ) THEN
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('OCN_EXPORT_DATA',myThid)
+# endif
+         CALL TIMER_START('OCN_EXPORT_DATA [DO_OCEANIC_PHYS]', myThid)
+         CALL OCN_EXPORT_DATA( myTime, myIter, myThid )
+         CALL TIMER_STOP ('OCN_EXPORT_DATA [DO_OCEANIC_PHYS]', myThid)
+      ENDIF
+#endif /* ALLOW_OCN_COMPON_INTERF & ALLOW_THSICE */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE sst, sss          = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qsw               = comlev1, key=ikey_dynamics, kind=isbyte
+# ifdef ALLOW_SEAICE
+CADJ STORE area              = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+#endif
+
+#ifdef ALLOW_CPL_ISSM
+      IF ( useCoupler) CALL CPL_ISSM( myTime, myIter, myThid )
+#endif
+
+#ifdef ALLOW_SHELFICE
+      IF ( useShelfIce .AND. fluidIsWater ) THEN
+#ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('SHELFICE_THERMODYNAMICS',myThid)
+#endif
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE salt, theta       = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uvel, vvel        = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+C     compute temperature and (virtual) salt flux at the
+C     shelf-ice ocean interface
+       CALL TIMER_START('SHELFICE_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &       myThid)
+       CALL SHELFICE_THERMODYNAMICS( myTime, myIter, myThid )
+       CALL TIMER_STOP( 'SHELFICE_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &      myThid)
+      ENDIF
+#endif /* ALLOW_SHELFICE */
+
+#ifdef ALLOW_ICEFRONT
+      IF ( useICEFRONT .AND. fluidIsWater ) THEN
+#ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('ICEFRONT_THERMODYNAMICS',myThid)
+#endif
+C     compute temperature and (virtual) salt flux at the
+C     ice-front ocean interface
+       CALL TIMER_START('ICEFRONT_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &       myThid)
+       CALL ICEFRONT_THERMODYNAMICS( myTime, myIter, myThid )
+       CALL TIMER_STOP( 'ICEFRONT_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &      myThid)
+      ENDIF
+#endif /* ALLOW_ICEFRONT */
+
+#ifdef ALLOW_SALT_PLUME
+      IF ( useSALT_PLUME ) THEN
+Catn: exchanging saltPlumeFlux:
+        CALL SALT_PLUME_DO_EXCH( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_SALT_PLUME */
+
+C--   Freeze water at the surface
+      IF ( allowFreezing ) THEN
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta             = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+        CALL FREEZE_SURFACE( myTime, myIter, myThid )
+      ENDIF
+
+      iMin = 1-OLx
+      iMax = sNx+OLx
+      jMin = 1-OLy
+      jMax = sNy+OLy
+
+C---  Determines forcing terms based on external fields
+C     relaxation terms, etc.
+#ifdef ALLOW_AUTODIFF
+CADJ STORE salt, theta       = comlev1, key=ikey_dynamics, kind=isbyte
+#else  /* ALLOW_AUTODIFF */
+C--   if fluid is not water, by-pass surfaceForcing, find_rho, gmredi
+C     and all vertical mixing schemes, but keep OBCS_CALC
+      IF ( fluidIsWater ) THEN
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_CALL('EXTERNAL_FORCING_SURF',myThid)
+#endif
+        CALL EXTERNAL_FORCING_SURF(
+     I             iMin, iMax, jMin, jMax,
+     I             myTime, myIter, myThid )
+
+#ifdef  ALLOW_OBCS
+      IF (useOBCS) THEN
+C--   After all surface fluxes are known apply balancing fluxes and
+C--   apply tidal forcing to open boundaries
+# ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('OBCS_ADJUST',myThid)
+# endif
+       CALL OBCS_ADJUST(
+     I      myTime+deltaTClock, myIter+1, myThid )
+      ENDIF
+#endif  /* ALLOW_OBCS */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   HPF directive to help TAMC
+CHPF$ INDEPENDENT
+#endif /* ALLOW_AUTODIFF_TAMC */
+      DO bj=myByLo(myThid),myByHi(myThid)
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   HPF directive to help TAMC
+CHPF$ INDEPENDENT
+#endif /* ALLOW_AUTODIFF_TAMC */
+       DO bi=myBxLo(myThid),myBxHi(myThid)
+
+#ifdef ALLOW_AUTODIFF_TAMC
+          act1 = bi - myBxLo(myThid)
+          max1 = myBxHi(myThid) - myBxLo(myThid) + 1
+          act2 = bj - myByLo(myThid)
+          max2 = myByHi(myThid) - myByLo(myThid) + 1
+          act3 = myThid - 1
+          max3 = nTx*nTy
+          act4 = ikey_dynamics - 1
+          itdkey = (act1 + 1) + act2*max1
+     &                      + act3*max1*max2
+     &                      + act4*max1*max2*max3
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--   Set up work arrays with valid (i.e. not NaN) values
+C     These inital values do not alter the numerical results. They
+C     just ensure that all memory references are to valid floating
+C     point numbers. This prevents spurious hardware signals due to
+C     uninitialised but inert locations.
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+C This is currently used by GMRedi, IVDC, MXL-depth  and Diagnostics
+           sigmaX(i,j,k) = 0. _d 0
+           sigmaY(i,j,k) = 0. _d 0
+           sigmaR(i,j,k) = 0. _d 0
+#if (defined (ALLOW_SIGMAR_COST_CONTRIBUTION) || defined (ALLOW_LEITH_QG))
+           sigmaRfield(i,j,k,bi,bj) = 0. _d 0
+#endif
+          ENDDO
+         ENDDO
+        ENDDO
+
+        DO j=1-OLy,sNy+OLy
+         DO i=1-OLx,sNx+OLx
+          rhoKm1 (i,j)   = 0. _d 0
+          rhoKp1 (i,j)   = 0. _d 0
+         ENDDO
+        ENDDO
+#ifdef ALLOW_AUTODIFF
+cph all the following init. are necessary for TAF
+cph although some of these are re-initialised later.
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           rhoInSitu(i,j,k,bi,bj) = 0.
+# ifdef ALLOW_GGL90
+           GGL90viscArU(i,j,k,bi,bj)  = 0. _d 0
+           GGL90viscArV(i,j,k,bi,bj)  = 0. _d 0
+           GGL90diffKr(i,j,k,bi,bj)  = 0. _d 0
+# endif /* ALLOW_GGL90 */
+# ifdef ALLOW_SALT_PLUME
+#  ifdef SALT_PLUME_VOLUME
+           SPforcingS(i,j,k,bi,bj) = 0. _d 0
+           SPforcingT(i,j,k,bi,bj) = 0. _d 0
+#  endif
+# endif /* ALLOW_SALT_PLUME */
+          ENDDO
+         ENDDO
+        ENDDO
+#ifdef ALLOW_OFFLINE
+       IF ( calcConvect ) THEN
+#endif
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           IVDConvCount(i,j,k,bi,bj) = 0.
+          ENDDO
+         ENDDO
+        ENDDO
+#ifdef ALLOW_OFFLINE
+       ENDIF
+       IF ( calcGMRedi ) THEN
+#endif
+# ifdef ALLOW_GMREDI
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           Kwx(i,j,k,bi,bj)  = 0. _d 0
+           Kwy(i,j,k,bi,bj)  = 0. _d 0
+           Kwz(i,j,k,bi,bj)  = 0. _d 0
+           Kux(i,j,k,bi,bj)  = 0. _d 0
+           Kvy(i,j,k,bi,bj)  = 0. _d 0
+#  ifdef GM_EXTRA_DIAGONAL
+           Kuz(i,j,k,bi,bj)  = 0. _d 0
+           Kvz(i,j,k,bi,bj)  = 0. _d 0
+#  endif
+#  ifdef GM_BOLUS_ADVEC
+           GM_PsiX(i,j,k,bi,bj)  = 0. _d 0
+           GM_PsiY(i,j,k,bi,bj)  = 0. _d 0
+#  endif
+#  ifdef GM_VISBECK_VARIABLE_K
+           VisbeckK(i,j,bi,bj)   = 0. _d 0
+#  endif
+          ENDDO
+         ENDDO
+        ENDDO
+# endif /* ALLOW_GMREDI */
+#ifdef ALLOW_OFFLINE
+       ENDIF
+       IF ( calcKPP ) THEN
+#endif
+# ifdef ALLOW_KPP
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           KPPdiffKzS(i,j,k,bi,bj)  = 0. _d 0
+           KPPdiffKzT(i,j,k,bi,bj)  = 0. _d 0
+          ENDDO
+         ENDDO
+        ENDDO
+# endif /* ALLOW_KPP */
+#ifdef ALLOW_OFFLINE
+       ENDIF
+#endif
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta(:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE salt (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE totphihyd(:,:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+# ifdef ALLOW_KPP
+CADJ STORE uvel (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE vvel (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+# ifdef ALLOW_SALT_PLUME
+CADJ STORE saltplumedepth(:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE saltplumeflux(:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--   Always compute density (stored in common block) here; even when it is not
+C     needed here, will be used anyway in calc_phi_hyd (data flow easier this way)
+#ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('FIND_RHO_2D (xNr)',myThid)
+#endif
+#ifdef ALLOW_AUTODIFF
+        IF ( fluidIsWater ) THEN
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_DOWN_SLOPE
+         IF ( useDOWN_SLOPE ) THEN
+           DO k=1,Nr
+            CALL DWNSLP_CALC_RHO(
+     I                  theta, salt,
+     O                  rhoInSitu(1-OLx,1-OLy,k,bi,bj),
+     I                  k, bi, bj, myTime, myIter, myThid )
+           ENDDO
+         ENDIF
+#endif /* ALLOW_DOWN_SLOPE */
+#ifdef ALLOW_BBL
+         IF ( useBBL ) THEN
+C     pkg/bbl requires in-situ bbl density for depths equal to and deeper than the bbl.
+C     To reduce computation and storage requirement, these densities are stored in the
+C     dry grid boxes of rhoInSitu.  See BBL_CALC_RHO for details.
+           DO k=Nr,1,-1
+            CALL BBL_CALC_RHO(
+     I                  theta, salt,
+     O                  rhoInSitu,
+     I                  k, bi, bj, myTime, myIter, myThid )
+
+           ENDDO
+         ENDIF
+#endif /* ALLOW_BBL */
+         IF ( .NOT. ( useDOWN_SLOPE .OR. useBBL ) ) THEN
+           DO k=1,Nr
+            CALL FIND_RHO_2D(
+     I                iMin, iMax, jMin, jMax, k,
+     I                theta(1-OLx,1-OLy,k,bi,bj),
+     I                salt (1-OLx,1-OLy,k,bi,bj),
+     O                rhoInSitu(1-OLx,1-OLy,k,bi,bj),
+     I                k, bi, bj, myThid )
+           ENDDO
+         ENDIF
+#ifdef ALLOW_AUTODIFF
+        ELSE
+C-        fluid is not water:
+          DO k=1,Nr
+           IF ( select_rStar.GE.1 .OR. selectSigmaCoord.GE.1 ) THEN
+C-    isothermal (theta=const) reference state
+             thetaRef = thetaConst
+           ELSE
+C-    horizontally uniform (tRef) reference state
+             thetaRef = tRef(k)
+           ENDIF
+           DO j=1-OLy,sNy+OLy
+            DO i=1-OLx,sNx+OLx
+             rhoInSitu(i,j,k,bi,bj) =
+     &         ( theta(i,j,k,bi,bj)
+     &              *( salt(i,j,k,bi,bj)*atm_Rq + oneRL )
+     &         - thetaRef )*maskC(i,j,k,bi,bj)
+            ENDDO
+           ENDDO
+          ENDDO
+        ENDIF
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_DEBUG
+        IF (debugMode) THEN
+          WRITE(msgBuf,'(A,2(I4,A))')
+     &         'ENTERING UPWARD K LOOP (bi=', bi, ', bj=', bj,')'
+          CALL DEBUG_MSG(msgBuf(1:43),myThid)
+        ENDIF
+#endif
+
+C--     Start of diagnostic loop
+        DO k=Nr,1,-1
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C? Patrick, is this formula correct now that we change the loop range?
+C? Do we still need this?
+cph kkey formula corrected.
+cph Needed for rhoK, rhoKm1, in the case useGMREDI.
+          kkey = (itdkey-1)*Nr + k
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+c#ifdef ALLOW_AUTODIFF_TAMC
+cCADJ STORE theta(:,:,k,bi,bj) = comlev1_bibj_k, key=kkey,
+cCADJ &     kind = isbyte
+cCADJ STORE salt(:,:,k,bi,bj)  = comlev1_bibj_k, key=kkey,
+cCADJ &     kind = isbyte
+c#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--       Calculate gradients of potential density for isoneutral
+C         slope terms (e.g. GM/Redi tensor or IVDC diffusivity)
+          IF ( calcGMRedi .OR. (k.GT.1 .AND. calcConvect)
+     &         .OR. usePP81 .OR. useKL10
+     &         .OR. useMY82 .OR. useGGL90
+     &         .OR. useSALT_PLUME .OR. doDiagsRho.GE.1 ) THEN
+            IF (k.GT.1) THEN
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta(:,:,k-1,bi,bj) = comlev1_bibj_k, key=kkey,kind=isbyte
+CADJ STORE salt (:,:,k-1,bi,bj) = comlev1_bibj_k, key=kkey,kind=isbyte
+CADJ STORE rhokm1 (bi,bj)       = comlev1_bibj_k, key=kkey,kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+             IF ( usingZCoords ) THEN
+              DO j=jMin,jMax
+               DO i=iMin,iMax
+                rhoKp1(i,j) = rhoInSitu(i,j,k,bi,bj)
+               ENDDO
+              ENDDO
+              CALL FIND_RHO_2D(
+     I                 iMin, iMax, jMin, jMax, k,
+     I                 theta(1-OLx,1-OLy,k-1,bi,bj),
+     I                 salt (1-OLx,1-OLy,k-1,bi,bj),
+     O                 rhoKm1,
+     I                 k-1, bi, bj, myThid )
+             ELSE
+              CALL FIND_RHO_2D(
+     I                 iMin, iMax, jMin, jMax, k-1,
+     I                 theta(1-OLx,1-OLy,k,bi,bj),
+     I                 salt (1-OLx,1-OLy,k,bi,bj),
+     O                 rhoKp1,
+     I                 k, bi, bj, myThid )
+              DO j=jMin,jMax
+               DO i=iMin,iMax
+                rhoKm1(i,j) = rhoInSitu(i,j,k-1,bi,bj)
+               ENDDO
+              ENDDO
+             ENDIF
+            ENDIF
+#ifdef ALLOW_DEBUG
+            IF (debugMode) CALL DEBUG_CALL('GRAD_SIGMA',myThid)
+#endif
+            CALL GRAD_SIGMA(
+     I             bi, bj, iMin, iMax, jMin, jMax, k,
+     I             rhoInSitu(1-OLx,1-OLy,k,bi,bj), rhoKm1, rhoKp1,
+     O             sigmaX, sigmaY, sigmaR,
+     I             myThid )
+
+#if (defined (ALLOW_SIGMAR_COST_CONTRIBUTION) || defined (ALLOW_LEITH_QG))
+            DO j=jMin,jMax
+             DO i=iMin,iMax
+              sigmaRfield(i,j,k,bi,bj)=sigmaR(i,j,k)
+             ENDDO
+            ENDDO
+#endif /* ALLOW_SIGMAR_COST_CONTRIBUTION or ALLOW_LEITH_QG */
+
+#ifdef ALLOW_AUTODIFF
+#ifdef GMREDI_WITH_STABLE_ADJOINT
+cgf zero out adjoint fields to stabilize pkg/gmredi adjoint
+cgf -> cuts adjoint dependency from slope to state
+            CALL ZERO_ADJ_LOC( Nr, sigmaX, myThid)
+            CALL ZERO_ADJ_LOC( Nr, sigmaY, myThid)
+            CALL ZERO_ADJ_LOC( Nr, sigmaR, myThid)
+#endif
+#endif /* ALLOW_AUTODIFF */
+          ENDIF
+
+C--       Implicit Vertical Diffusion for Convection
+          IF (k.GT.1 .AND. calcConvect) THEN
+#ifdef ALLOW_DEBUG
+            IF (debugMode) CALL DEBUG_CALL('CALC_IVDC',myThid)
+#endif
+            CALL CALC_IVDC(
+     I        bi, bj, iMin, iMax, jMin, jMax, k,
+     I        sigmaR,
+     I        myTime, myIter, myThid)
+          ENDIF
+
+#ifdef ALLOW_DIAGNOSTICS
+          IF ( doDiagsRho.GE.4 ) THEN
+            CALL DIAGS_RHO_L( doDiagsRho, k, bi, bj,
+     I                        rhoInSitu(1-OLx,1-OLy,1,bi,bj),
+     I                        rhoKm1, wVel,
+     I                        myTime, myIter, myThid )
+          ENDIF
+#endif
+
+C--     end of diagnostic k loop (Nr:1)
+        ENDDO
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE IVDConvCount(:,:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif
+
+C--     Diagnose Mixed Layer Depth:
+        IF ( calcGMRedi .OR. MOD(doDiagsRho,2).EQ.1 ) THEN
+          CALL CALC_OCE_MXLAYER(
+     I              rhoInSitu(1-OLx,1-OLy,kSrf,bi,bj), sigmaR,
+     I              bi, bj, myTime, myIter, myThid )
+        ENDIF
+
+#ifdef ALLOW_SALT_PLUME
+        IF ( useSALT_PLUME ) THEN
+          CALL SALT_PLUME_CALC_DEPTH(
+     I              rhoInSitu(1-OLx,1-OLy,kSrf,bi,bj), sigmaR,
+     I              bi, bj, myTime, myIter, myThid )
+#ifdef SALT_PLUME_VOLUME
+          CALL SALT_PLUME_VOLFRAC(
+     I              bi, bj, myTime, myIter, myThid )
+C-- get forcings for kpp
+          CALL SALT_PLUME_APPLY(
+     I              1, bi, bj, recip_hFacC(1-OLx,1-OLy,kSrf,bi,bj),
+     I              theta, 0,
+     I              myTime, myIter, myThid )
+          CALL SALT_PLUME_APPLY(
+     I              2, bi, bj, recip_hFacC(1-OLx,1-OLy,kSrf,bi,bj),
+     I              salt, 0,
+     I              myTime, myIter, myThid )
+C-- need to call this S/R from here to apply just before kpp
+          CALL SALT_PLUME_FORCING_SURF(
+     I              bi, bj, iMin, iMax, jMin, jMax,
+     I              myTime, myIter, myThid )
+#endif /* SALT_PLUME_VOLUME */
+        ENDIF
+#endif /* ALLOW_SALT_PLUME */
+
+#ifdef ALLOW_DIAGNOSTICS
+        IF ( MOD(doDiagsRho,4).GE.2 ) THEN
+          CALL DIAGNOSTICS_FILL (sigmaR, 'DRHODR  ', 0, Nr,
+     &         2, bi, bj, myThid)
+        ENDIF
+#endif /* ALLOW_DIAGNOSTICS */
+
+C--    This is where EXTERNAL_FORCING_SURF(bi,bj) used to be called;
+C      now called earlier, before bi,bj loop.
+
+#ifdef ALLOW_AUTODIFF_TAMC
+cph needed for KPP
+CADJ STORE surfaceForcingU(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingV(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingS(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingT(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+#ifdef  ALLOW_KPP
+C--     Compute KPP mixing coefficients
+        IF ( calcKPP ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('KPP_CALC',myThid)
+#endif
+          CALL TIMER_START('KPP_CALC [DO_OCEANIC_PHYS]', myThid)
+          CALL KPP_CALC(
+     I                  bi, bj, myTime, myIter, myThid )
+          CALL TIMER_STOP ('KPP_CALC [DO_OCEANIC_PHYS]', myThid)
+#if (defined ALLOW_AUTODIFF) && !(defined ALLOW_OFFLINE)
+        ELSE
+          CALL KPP_CALC_DUMMY(
+     I                  bi, bj, myTime, myIter, myThid )
+#endif /* ALLOW_AUTODIFF and not ALLOW_OFFLINE */
+        ENDIF
+#endif  /* ALLOW_KPP */
+
+#ifdef  ALLOW_PP81
+C--     Compute PP81 mixing coefficients
+        IF (usePP81) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('PP81_CALC',myThid)
+#endif
+          CALL PP81_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_PP81 */
+
+#ifdef  ALLOW_KL10
+C--     Compute KL10 mixing coefficients
+        IF (useKL10) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('KL10_CALC',myThid)
+#endif
+          CALL KL10_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_KL10 */
+
+#ifdef  ALLOW_MY82
+C--     Compute MY82 mixing coefficients
+        IF (useMY82) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('MY82_CALC',myThid)
+#endif
+          CALL MY82_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_MY82 */
+
+#ifdef  ALLOW_GGL90
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE GGL90TKE(:,:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+C--     Compute GGL90 mixing coefficients
+        IF ( useGGL90 .AND. Nr.GT.1 ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('GGL90_CALC',myThid)
+#endif
+          CALL TIMER_START('GGL90_CALC [DO_OCEANIC_PHYS]', myThid)
+          CALL GGL90_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+          CALL TIMER_STOP ('GGL90_CALC [DO_OCEANIC_PHYS]', myThid)
+        ENDIF
+#endif /* ALLOW_GGL90 */
+
+#ifdef ALLOW_TIMEAVE
+        IF ( taveFreq.GT. 0. _d 0 ) THEN
+          CALL TIMEAVE_SURF_FLUX( bi, bj, myTime, myIter, myThid)
+        ENDIF
+        IF ( taveFreq.GT.0. .AND. calcConvect ) THEN
+          CALL TIMEAVE_CUMULATE(ConvectCountTave, IVDConvCount,
+     I                           Nr, deltaTClock, bi, bj, myThid)
+        ENDIF
+#endif /* ALLOW_TIMEAVE */
+
+#ifdef ALLOW_GMREDI
+#ifdef ALLOW_AUTODIFF_TAMC
+# ifndef GM_EXCLUDE_CLIPPING
+cph storing here is needed only for one GMREDI_OPTIONS:
+cph define GM_BOLUS_ADVEC
+cph keep it although TAF says you dont need to.
+cph but I have avoided the #ifdef for now, in case more things change
+CADJ STORE sigmaX(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE sigmaY(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE sigmaR(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--     Calculate iso-neutral slopes for the GM/Redi parameterisation
+        IF ( calcGMRedi ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('GMREDI_CALC_TENSOR',myThid)
+#endif
+          CALL GMREDI_CALC_TENSOR(
+     I             iMin, iMax, jMin, jMax,
+     I             sigmaX, sigmaY, sigmaR,
+     I             bi, bj, myTime, myIter, myThid )
+#if (defined ALLOW_AUTODIFF) && !(defined ALLOW_OFFLINE)
+        ELSE
+          CALL GMREDI_CALC_TENSOR_DUMMY(
+     I             iMin, iMax, jMin, jMax,
+     I             sigmaX, sigmaY, sigmaR,
+     I             bi, bj, myTime, myIter, myThid )
+#endif /* ALLOW_AUTODIFF and not ALLOW_OFFLINE */
+        ENDIF
+#endif /* ALLOW_GMREDI */
+
+#ifdef ALLOW_DOWN_SLOPE
+        IF ( useDOWN_SLOPE ) THEN
+C--     Calculate Downsloping Flow for Down_Slope parameterization
+         IF ( usingPCoords ) THEN
+          CALL DWNSLP_CALC_FLOW(
+     I                bi, bj, kSurfC, rhoInSitu,
+     I                myTime, myIter, myThid )
+         ELSE
+          CALL DWNSLP_CALC_FLOW(
+     I                bi, bj, kLowC, rhoInSitu,
+     I                myTime, myIter, myThid )
+         ENDIF
+        ENDIF
+#endif /* ALLOW_DOWN_SLOPE */
+
+C--   end bi,bj loops.
+       ENDDO
+      ENDDO
+
+#ifndef ALLOW_AUTODIFF
+C---  if fluid Is Water: end
+      ENDIF
+#endif
+
+#ifdef ALLOW_OFFLINE
+      IF ( useOffLine ) THEN
+#ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('OFFLINE_GET_DIFFUS',myThid)
+#endif /* ALLOW_DEBUG */
+        CALL OFFLINE_GET_DIFFUS( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_OFFLINE */
+
+#ifdef ALLOW_BBL
+      IF ( useBBL ) THEN
+       CALL BBL_CALC_RHS(
+     I                          myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_BBL */
+
+#ifdef ALLOW_MYPACKAGE
+      IF ( useMYPACKAGE ) THEN
+       CALL MYPACKAGE_CALC_RHS(
+     I                          myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_MYPACKAGE */
+
+#ifdef ALLOW_GMREDI
+      IF ( calcGMRedi ) THEN
+        CALL GMREDI_DO_EXCH( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_GMREDI */
+
+#ifdef ALLOW_KPP
+      IF ( calcKPP ) THEN
+        CALL KPP_DO_EXCH( myThid )
+      ENDIF
+#endif /* ALLOW_KPP */
+
+#ifdef ALLOW_GGL90
+      IF ( useGGL90 )
+     &  CALL GGL90_EXCHANGES( myThid )
+#endif /* ALLOW_GGL90 */
+
+#ifdef ALLOW_DIAGNOSTICS
+      IF ( fluidIsWater .AND. useDiagnostics ) THEN
+        CALL DIAGS_RHO_G(
+     I                    rhoInSitu, uVel, vVel, wVel,
+     I                    myTime, myIter, myThid )
+      ENDIF
+      IF ( useDiagnostics ) THEN
+        CALL DIAGS_OCEANIC_SURF_FLUX( myTime, myIter, myThid )
+      ENDIF
+      IF ( calcConvect .AND. useDiagnostics ) THEN
+        CALL DIAGNOSTICS_FILL( IVDConvCount, 'CONVADJ ',
+     &                               0, Nr, 0, 1, 1, myThid )
+      ENDIF
+#ifdef ALLOW_SALT_PLUME
+      IF ( useDiagnostics )
+     &      CALL SALT_PLUME_DIAGNOSTICS_FILL(bi,bj,myThid)
+#endif
+#endif
+
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_LEAVE('DO_OCEANIC_PHYS',myThid)
+#endif
+
+      RETURN
+      END
Index: /issm/trunk/test/MITgcm/code_remesh/eeboot_minimal.F
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/eeboot_minimal.F	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/eeboot_minimal.F	(revision 28013)
@@ -0,0 +1,328 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_EEOPTIONS.h"
+
+CBOP
+C     !ROUTINE: EEBOOT_MINIMAL
+
+C     !INTERFACE:
+      SUBROUTINE EEBOOT_MINIMAL( myComm )
+
+C     !DESCRIPTION:
+C     *==========================================================*
+C     | SUBROUTINE EEBOOT\_MINIMAL
+C     | o Set an initial environment that is predictable i.e.
+C     | behaves in a similar way on all machines and stable.
+C     *==========================================================*
+C     | Under MPI this routine calls MPI\_INIT to setup the
+C     | mpi environment ( on some systems the code is running as
+C     | a single process prior to MPI\_INIT, on others the mpirun
+C     | script has already created multiple processes). Until
+C     | MPI\_Init is called it is unclear what state the
+C     | application is in. Once this routine has been run it is
+C     | "safe" to do things like I/O to report erros and to get
+C     | run parameters.
+C     | Note: This routine can also be compiled with CPP
+C     | directives set so that no multi-processing is initialise.
+C     | This is OK and will work fine.
+C     *==========================================================*
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global data ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "EESUPPORT.h"
+
+C     !ROUTINE ARGUMENTS
+C     == Routine arguments ==
+C     myComm     :: Communicator that is passed down from
+C                   upper level driver (if there is one).
+      INTEGER myComm
+
+C     !FUNCTIONS:
+c     INTEGER  IFNBLNK
+c     EXTERNAL IFNBLNK
+      INTEGER  ILNBLNK
+      EXTERNAL ILNBLNK
+
+C     !LOCAL VARIABLES:
+C     == Local variables ==
+C     myThid     :: Temp. dummy thread number.
+C     fNam       :: Used to build file name for standard and error output.
+C     msgBuf     :: Used to build messages for printing.
+      INTEGER myThid
+#ifdef USE_PDAF
+      CHARACTER*18 fNam
+#else
+      CHARACTER*13 fNam
+#endif /* USE_PDAF */
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+#ifdef ALLOW_USE_MPI
+C     mpiRC      :: Error code reporting variable used with MPI.
+      INTEGER mpiRC
+      INTEGER mpiIsInitialized
+      LOGICAL doReport
+#if defined(ALLOW_OASIS) || defined(COMPONENT_MODULE)
+      INTEGER mpiMyWId
+#elif defined(ALLOW_NEST2W_COMMON)
+      INTEGER mpiMyWId
+#endif
+#ifdef ALLOW_CPL_ISSM
+      COMMON /CPL_MPI_ID/ mpiMyWid, toissmcomm
+      INTEGER             mpiMyWid, toissmcomm
+      INTEGER my_local_rank,my_local_size, numprocsworld
+      INTEGER status(MPI_STATUS_SIZE)
+#endif /* ALLOW_CPL_ISSM */
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+      INTEGER mpiMyWId, color
+#endif
+#ifdef USE_PDAF
+      INTEGER mpi_task_id
+      CHARACTER*(14) fmtStr
+#else
+      CHARACTER*(6) fmtStr
+#endif /* USE_PDAF */
+      INTEGER iTmp
+#endif /* ALLOW_USE_MPI */
+CEOP
+
+C--   Default values set to single processor case
+      numberOfProcs = 1
+      myProcId      = 0
+      pidIO         = myProcId
+      myProcessStr  = '------'
+C     Set a dummy value for myThid because we are not multi-threading yet.
+      myThid        = 1
+
+C     Annoyingly there is no universal way to have the usingMPI
+C     parameter work as one might expect. This is because, on some
+C     systems I/O does not work until MPI_Init has been called.
+C     The solution for now is that the parameter below may need to
+C     be changed manually!
+#ifdef ALLOW_USE_MPI
+      usingMPI = .TRUE.
+#else
+      usingMPI = .FALSE.
+#endif
+
+      IF ( .NOT.usingMPI ) THEN
+
+        WRITE(myProcessStr,'(I4.4)') myProcId
+        WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
+        OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
+c       WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
+c       OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
+
+#ifdef ALLOW_USE_MPI
+      ELSE
+C--   MPI style multiple-process initialisation
+C--   =========================================
+
+       CALL MPI_Initialized( mpiIsInitialized, mpiRC )
+
+       IF ( mpiIsInitialized .EQ. 0 ) THEN
+C--     Initialise MPI multi-process parallel environment.
+C       On some systems program forks at this point. Others have already
+C       forked within mpirun - now thats an open standard!
+        CALL MPI_INIT( mpiRC )
+        IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+         eeBootError = .TRUE.
+         WRITE(msgBuf,'(A,I5)')
+     &        'EEBOOT_MINIMAL: MPI_INIT return code', mpiRC
+         CALL PRINT_ERROR( msgBuf, myThid )
+         GOTO 999
+        ENDIF
+
+C--     MPI has now been initialized ; now we need to either
+C       ask for a communicator or pretend that we have:
+C       Pretend that we have asked for a communicator
+        MPI_COMM_MODEL = MPI_COMM_WORLD
+
+       ELSE
+C--     MPI was already initialized and communicator has been passed
+C       down from upper level driver
+        MPI_COMM_MODEL = myComm
+
+       ENDIF
+
+       doReport = .FALSE.
+#ifdef USE_PDAF
+C     initialize PDAF
+C     for more output increase second parameter from 1 to 2
+       CALL INIT_PARALLEL_PDAF(0, 1, MPI_COMM_MODEL, MPI_COMM_MODEL,
+     &      mpi_task_id)
+#endif /* USE_PDAF */
+
+#ifdef ALLOW_OASIS
+C      add a 1rst preliminary call EESET_PARAMS to set useOASIS
+C      (needed to decide either to call OASIS_INIT or not)
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+       IF ( useOASIS ) CALL OASIS_INIT(MPI_COMM_MODEL)
+#endif /* ALLOW_OASIS */
+
+#ifdef COMPONENT_MODULE
+C--    Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C- jmc: test:
+C      add a 1rst preliminary call EESET_PARAMS to set useCoupler
+C      (needed to decide either to call CPL_INIT or not)
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+C- jmc: test end ; otherwise, uncomment next line:
+c      useCoupler = .TRUE.
+
+C--    Ask coupler interface for a communicator
+       IF ( useCoupler) CALL CPL_INIT
+#endif /* COMPONENT_MODULE */
+
+C--    Case with Nest(ing)
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+C--    Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C--    Setup Nesting Execution Environment
+       CALL NEST_EEINIT( mpiMyWId, color )
+#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
+
+#ifdef ALLOW_NEST2W_COMMON
+C--    Case with 2-Ways Nest(ing)
+C-     Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C-     Setup Nesting Execution Environment
+       CALL NEST2W_EEINIT( mpiMyWId )
+       IF ( eeBootError ) GOTO 999
+#endif /* ALLOW_NEST2W_COMMON */
+
+#ifdef ALLOW_CPL_ISSM
+C     add a 1rst preliminary call EESET_PARAMS to set useCoupler
+       CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpiMyWid, mpiRC)
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+
+       IF ( useCoupler ) THEN
+          CALL MPI_COMM_SIZE(MPI_COMM_WORLD, numprocsworld, mpiRC)
+
+c     Split world into sub-communicators for each and every model:*/
+          CALL MPI_COMM_SPLIT(MPI_COMM_WORLD,1,MPIMYWID,
+     &         MPI_COMM_MODEL,mpiRC)
+
+          print*,'Oc My global rank',mpiMyWid
+          print*,'Oc My world size:',numprocsworld
+
+          CALL MPI_INTERCOMM_CREATE(MPI_COMM_MODEL,0,MPI_COMM_WORLD,
+     &         0,0,toissmcomm,mpiRC)
+
+          CALL MPI_COMM_RANK(MPI_COMM_MODEL, my_local_rank, mpiRC)
+          CALL MPI_COMM_SIZE(MPI_COMM_MODEL, my_local_size, mpiRC)
+
+          print*,'Oc My global rank',mpiMyWid,'MyLocal rank: ',
+     &         my_local_rank
+          print*,'Oc My world size:',numprocsworld,'My local size: ',
+     &         my_local_size
+       ENDIF
+#endif /* ALLOW_CPL_ISSM */
+
+C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----|
+
+C--    Get my process number
+       CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I5)')
+     &        'EEBOOT_MINIMAL: MPI_COMM_RANK return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+       myProcId = mpiMyId
+       iTmp = MAX(4,1 + INT(LOG10(DFLOAT(nPx*nPy))))
+#ifdef USE_PDAF
+       WRITE(fmtStr,'(4(A,I1),A)')
+     &      '(I',iTmp,'.',iTmp,',A1,I',iTmp,'.',iTmp,')'
+       WRITE(myProcessStr,fmtStr) mpi_task_id,'.',myProcId
+#else
+       WRITE(fmtStr,'(2(A,I1),A)') '(I',iTmp,'.',iTmp,')'
+       WRITE(myProcessStr,fmtStr) myProcId
+#endif /* USE_PDAF */
+       iTmp = ILNBLNK( myProcessStr )
+       mpiPidIo = myProcId
+       pidIO    = mpiPidIo
+       IF ( mpiPidIo .EQ. myProcId ) THEN
+#ifdef SINGLE_DISK_IO
+        IF( myProcId .EQ. 0 ) THEN
+#endif
+         WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:iTmp)
+         OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
+         WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:iTmp)
+         OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
+#ifdef SINGLE_DISK_IO
+        ELSE
+         OPEN(errorMessageUnit,FILE='/dev/null',STATUS='unknown')
+         standardMessageUnit=errorMessageUnit
+        ENDIF
+        IF( myProcId .EQ. 0 ) THEN
+          WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
+     &     'defined SINGLE_DISK_IO will result in losing'
+          CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
+     &                        SQUEEZE_RIGHT, myThid )
+          WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
+     &     'any message (error/warning) from any proc <> 0'
+          CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
+     &                        SQUEEZE_RIGHT, myThid )
+        ENDIF
+#endif
+       ENDIF
+
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+       WRITE(standardMessageUnit,'(2(A,I6))')
+     &           ' mpiMyWId =', mpiMyWId, ' , color =',color
+#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
+
+C--    Synchronise all processes
+C      Strictly this is superfluous, but by using it we can guarantee to
+C      find out about processes that did not start up.
+       CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I6)')
+     &        'EEBOOT_MINIMAL: MPI_BARRIER return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+
+C--    Get number of MPI processes
+       CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I6)')
+     &        'EEBOOT_MINIMAL: MPI_COMM_SIZE return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+       numberOfProcs = mpiNProcs
+
+#endif /* ALLOW_USE_MPI */
+      ENDIF
+
+C--    Under MPI only allow same number of processes as proc grid size.
+C      Strictly we are allowed more procs but knowing there
+C      is an exact match makes things easier.
+       IF ( numberOfProcs .NE. nPx*nPy ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(2(A,I6))')
+     &  'EEBOOT_MINIMAL: No. of procs=', numberOfProcs,
+     &  ' not equal to nPx*nPy=', nPx*nPy
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+
+#ifdef USE_LIBHPM
+       CALL F_HPMINIT(myProcId, "mitgcmuv")
+#endif
+
+ 999  CONTINUE
+      RETURN
+      END
Index: /issm/trunk/test/MITgcm/code_remesh/eedie.F
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/eedie.F	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/eedie.F	(revision 28013)
@@ -0,0 +1,107 @@
+#include "CPP_EEOPTIONS.h"
+#ifdef USE_LIBHPM
+# include "f_hpm.h"
+#endif
+
+CBOP
+      SUBROUTINE EEDIE
+C     *==========================================================*
+C     | SUBROUTINE EEDIE                                         |
+C     | o Close execution "environment", particularly perform    |
+C     |   steps to terminate parallel processing.                |
+C     *==========================================================*
+C     | Note: This routine can also be compiled with CPP         |
+C     | directives set so that no multi-processing is initialised|
+C     | This is OK and should work fine.                         |
+C     *==========================================================*
+      IMPLICIT NONE
+
+C     == Global variables ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "EESUPPORT.h"
+CEOP
+
+C     == Local variables ==
+C     msgBuf       :: I/O Buffer
+C     nThreadsDone :: Used to count number of completed threads.
+C     I            :: Loop counter.
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+      INTEGER nThreadsDone
+      INTEGER I
+#ifdef ALLOW_USE_MPI
+C     mpiRC        :: Error code reporting variable used with MPI.
+      INTEGER mpiRC
+#endif /* ALLOW_USE_MPI */
+
+      IF ( eeBootError ) THEN
+C--   Skip ended threads counting if earlier error was found
+        WRITE(msgBuf,'(2A)')
+     &   'EEDIE: earlier error in multi-proc/thread setting'
+        CALL PRINT_ERROR( msgBuf, 1 )
+        fatalError = .TRUE.
+
+      ELSE
+C--   Check that all the threads have ended
+C     No thread should reach this loop before all threads have set
+C     threadIsComplete to TRUE. If they do then either there is a bug
+C     in the code or the behaviour of the parallel compiler directives
+C     are not right for this code. In the latter case different
+C     directives may be available or the compiler itself may have a
+C     bug or you may need a different parallel compiler for main.F
+        nThreadsDone = 0
+        DO I = 1, nThreads
+         IF ( threadIsComplete(I) ) nThreadsDone = nThreadsDone+1
+        ENDDO
+        IF ( nThreadsDone .LT. nThreads ) THEN
+         WRITE(msgBuf,'(A,I5,A)')
+     &    'S/R EEDIE: Only',nThreadsDone,' threads have completed,'
+         CALL PRINT_ERROR( msgBuf, 1 )
+         WRITE(msgBuf,'(A,I5,A)')
+     &    'S/R EEDIE:',nThreads,' are expected for this config !'
+         CALL PRINT_ERROR( msgBuf, 1 )
+         eeEndError = .TRUE.
+         fatalError = .TRUE.
+        ENDIF
+
+C--   end if/else eebootError
+      ENDIF
+
+#ifdef USE_LIBHPM
+      CALL F_HPMTERMINATE(myProcId)
+#endif
+
+C--   Flush IO-unit before MPI termination
+      CALL MDS_FLUSH( errorMessageUnit, 1 )
+c#ifdef ALLOW_USE_MPI
+      CALL MDS_FLUSH( standardMessageUnit, 1 )
+c#endif /* ALLOW_USE_MPI */
+
+#ifdef ALLOW_USE_MPI
+C- Note: since MPI_INIT is always called, better to also always terminate MPI
+C        (even if usingMPI=F) --> comment out test on usingMPI
+c     IF ( usingMPI ) THEN
+
+C--   MPI style multiple-process termination
+C--   ======================================
+#if (defined COMPONENT_MODULE) || (defined ALLOW_CPL_ISSM)
+       IF ( useCoupler) CALL MPI_BARRIER( MPI_COMM_WORLD, mpiRC )
+#endif
+#ifdef ALLOW_OASIS
+       IF ( useOASIS ) CALL OASIS_FINALIZE
+#endif
+       CALL MPI_FINALIZE  ( mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeEndError = .TRUE.
+        fatalError = .TRUE.
+        WRITE(msgBuf,'(A,I5)')
+     &       'S/R FIN_PROCS: MPI_FINALIZE return code',
+     &       mpiRC
+        CALL PRINT_ERROR( msgBuf, 1 )
+       ENDIF
+
+c     ENDIF
+#endif /* ALLOW_USE_MPI */
+
+      RETURN
+      END
Index: /issm/trunk/test/MITgcm/code_remesh/packages.conf
===================================================================
--- /issm/trunk/test/MITgcm/code_remesh/packages.conf	(revision 28013)
+++ /issm/trunk/test/MITgcm/code_remesh/packages.conf	(revision 28013)
@@ -0,0 +1,6 @@
+#-- list of packages (or group of packages) to compile for this experiment:
+gfd
+obcs
+shelfice
+diagnostics
+timeave
Index: /issm/trunk/test/MITgcm/input_remesh/data
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/data	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/data	(revision 28013)
@@ -0,0 +1,79 @@
+# ====================
+# | Model parameters |
+# ====================
+#
+# Continuous equation parameters
+ &PARM01
+ Tref = 30*-1.9,
+ Sref = 30*34.4,
+ viscAz=1.E-3,
+ viscAh=600.0,
+ no_slip_sides=.FALSE.,
+ no_slip_bottom=.FALSE.,
+ diffKhT=100.0,
+ diffKzT=5.E-5,
+ diffKhS=100.0,
+ diffKzS=5.E-5,
+ bottomDragQuadratic=2.5E-3,
+ eosType='JMD95Z', 
+ HeatCapacity_cp = 3974.0,
+ rhoConst=1030.,
+ rhoNil=1030.,
+ gravity=9.81,
+ convertFW2Salt = 33.4,
+ rigidLid=.FALSE.,
+ implicitFreeSurface=.TRUE.,
+ exactConserv=.TRUE.,
+ hFacMin=0.05,
+ hFacInf=0.025,
+ nonHydrostatic=.FALSE.,
+ globalfiles = .TRUE.,
+ useSingleCpuIO = .TRUE.,
+ vectorInvariantMomentum = .TRUE.,
+ selectImplicitDrag = 2,  
+ nonlinfreesurf = 4, 
+ &
+
+# Elliptic solver parameters
+ &PARM02
+ cg2dMaxIters=1000,
+ cg2dTargetResidual=1.E-13,
+ cg3dMaxIters=400,
+ cg3dTargetResidual=1.E-13,
+ &
+
+# Time stepping parameters
+ &PARM03
+ niter0=0
+# endTime=2592000.,
+ nTimesteps=4380,
+ deltaT=600.0,
+ abEps=0.1,
+ cAdjFreq = 1.,
+ pChkptFreq=2628000.,
+ chkptFreq=0.,
+ dumpFreq=0.,
+ taveFreq=2628000.,
+ monitorFreq=1200.,
+ monitorSelect=2,
+ &
+
+# Gridding parameters
+ &PARM04
+ usingSphericalPolarGrid=.TRUE.,
+ xgOrigin = 0.0,
+ ygOrigin = -80.0,
+ delX = 20*0.25,
+ delY = 40*0.05,
+ delZ = 30*30.0,
+ &
+
+# Input datasets
+ &PARM05
+ bathyFile       = 'bathymetry.bin',
+# hydrogSaltFile  = 'Salt.bin',
+# hydrogThetaFile = 'Theta.bin',
+# uVelInitFile    = 'Uvel.bin',
+# vVelInitFile    = 'Vvel.bin',
+# pSurfInitFile   = 'Etan.bin',
+ &
Index: /issm/trunk/test/MITgcm/input_remesh/data.diagnostics
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/data.diagnostics	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/data.diagnostics	(revision 28013)
@@ -0,0 +1,49 @@
+# Diagnostic Package Choices
+#-----------------
+# for each output-stream:
+#  filename(n) : prefix of the output file name (only 8.c long) for outp.stream n
+#  frequency(n):< 0 : write snap-shot output every |frequency| seconds
+#               > 0 : write time-average output every frequency seconds
+#  timePhase(n)     : write at time = timePhase + multiple of |frequency|
+#  averagingFreq(n) : frequency (in s) for periodic averaging interval
+#  averagingPhase(n): phase     (in s) for periodic averaging interval
+#  repeatCycle(n)   : number of averaging intervals in 1 cycle
+#  levels(:,n) : list of levels to write to file (Notes: declared as REAL)
+#                 when this entry is missing, select all common levels of this list
+#  fields(:,n) : list of diagnostics fields (8.c) (see "available_diagnostics.log"
+#                 file for the list of all available diag. in this particular config)
+#-----------------
+ &DIAGNOSTICS_LIST
+# diag_mnc     = .FALSE.,
+  dumpAtLast   = .FALSE.,
+  fields(1:4,1) = 'ETAN    ','SHIfwFlx',
+                   'SHI_mass','SHIRshel'
+#                  'surForcT','surForcS','TFLUX   ','SFLUX   ','oceFreez',
+#                  'TRELAX  ','SRELAX  ',
+#  fields(1,1)='ETAN'
+   filename(1) = 'surfDiag',
+# frequency(1) =  2592000.,
+# frequency(1) =  86400.,
+  frequency(1) =  86400.,
+ &
+
+#--------------------
+# Parameter for Diagnostics of per level statistics:
+#--------------------
+#  diagSt_mnc (logical): write stat-diags to NetCDF files (default=diag_mnc)
+#  diagSt_regMaskFile : file containing the region-mask to read-in
+#  nSetRegMskFile   : number of region-mask sets within the region-mask file
+#  set_regMask(i)   : region-mask set-index that identifies the region "i"
+#  val_regMask(i)   : region "i" identifier value in the region mask
+#--for each output-stream:
+#  stat_fName(n) : prefix of the output file name (max 80c long) for outp.stream n
+#  stat_freq(n):< 0 : write snap-shot output every |stat_freq| seconds
+#               > 0 : write time-average output every stat_freq seconds
+#  stat_phase(n)    : write at time = stat_phase + multiple of |stat_freq|
+#  stat_region(:,n) : list of "regions" (default: 1 region only=global)
+#  stat_fields(:,n) : list of selected diagnostics fields (8.c) in outp.stream n
+#                (see "available_diagnostics.log" file for the full list of diags)
+#--------------------
+ &DIAG_STATIS_PARMS
+ &
+
Index: /issm/trunk/test/MITgcm/input_remesh/data.obcs
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/data.obcs	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/data.obcs	(revision 28013)
@@ -0,0 +1,18 @@
+# ***************
+# Open boundaries
+# ***************
+ &OBCS_PARM01
+ OB_Iwest = 40*1,
+ OB_Ieast = 40*-1,
+#
+ useOBCSprescribe = .TRUE.,
+#
+ OBWsFile = 'OBs.bin',
+ OBWtFile = 'OBt.bin',
+ OBWuFile = 'OBu.bin',
+ OBWvFile = 'zeros.bin',
+ OBEsFile = 'OBs.bin',
+ OBEtFile = 'OBt.bin',
+ OBEuFile = 'OBu.bin',
+ OBEvFile = 'zeros.bin',
+ &
Index: /issm/trunk/test/MITgcm/input_remesh/data.pkg
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/data.pkg	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/data.pkg	(revision 28013)
@@ -0,0 +1,5 @@
+# Packages
+ &PACKAGES
+ useShelfIce = .TRUE.,
+ useOBCS     = .TRUE.,
+ &
Index: /issm/trunk/test/MITgcm/input_remesh/data.shelfice
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/data.shelfice	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/data.shelfice	(revision 28013)
@@ -0,0 +1,16 @@
+# ===================================
+# | Parameters for SHELFICE package |
+# ===================================
+ &SHELFICE_PARM01
+ SHELFICEconserve = .TRUE.,
+ SHELFICEboundaryLayer = .TRUE.,
+ SHELFICEMassStepping = .TRUE.,
+ SHELFICEtopoFile='icetopo.bin',
+ SHELFICEwriteState = .TRUE.,
+ SHELFICEMassStepping = .TRUE.,
+ SHELFICEremeshFrequency = 600.0,
+ SHELFICEsplitThreshold = 1.12,
+ SHELFICEmergeThreshold = 0.10,
+ SHELFICEmassFile='shelficemass.bin',
+ SHELFICEMassDynTendFile='shelfice_dmdt.bin',
+ &
Index: /issm/trunk/test/MITgcm/input_remesh/eedata
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/eedata	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/eedata	(revision 28013)
@@ -0,0 +1,10 @@
+# Example "eedata" file
+# Lines beginning "#" are comments
+# nTx - No. threads per process in X
+# nTy - No. threads per process in Y
+ &EEPARMS
+ useCoupler=.TRUE.,
+ &
+# Note: Some systems use & as the
+# namelist terminator. Other systems
+# use a / character (as shown here).
Index: /issm/trunk/test/MITgcm/input_remesh/eedata_uncoupled
===================================================================
--- /issm/trunk/test/MITgcm/input_remesh/eedata_uncoupled	(revision 28013)
+++ /issm/trunk/test/MITgcm/input_remesh/eedata_uncoupled	(revision 28013)
@@ -0,0 +1,10 @@
+# Example "eedata" file
+# Lines beginning "#" are comments
+# nTx - No. threads per process in X
+# nTy - No. threads per process in Y
+ &EEPARMS
+ useCoupler=.FALSE.,
+ &
+# Note: Some systems use & as the
+# namelist terminator. Other systems
+# use a / character (as shown here).
Index: /issm/trunk/test/MITgcm/install.sh
===================================================================
--- /issm/trunk/test/MITgcm/install.sh	(revision 28012)
+++ /issm/trunk/test/MITgcm/install.sh	(revision 28013)
@@ -3,5 +3,5 @@
 
 # Cleanup
-rm -rf install
+rm -rf build/* checkpoint* install
 
 ################################################################################
Index: /issm/trunk/test/NightlyRun/GetIds.py
===================================================================
--- /issm/trunk/test/NightlyRun/GetIds.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/GetIds.py	(revision 28013)
@@ -54,5 +54,5 @@
     #
     # NOTE: ID inclusion/exclusion lists will always hit this condition 
-    #       becasue of the way their respective arguments are gathered at the 
+    #       because of the way their respective arguments are gathered at the 
     #       end of __main__ in the call to function runme.
     if type(ids_names) == list and len(ids_names) == 2:
@@ -63,5 +63,5 @@
                 if ':' in i:
                     i_range = i.split(':')
-                    for j in range(int(i_range[0]), int(i_range[1])):
+                    for j in range(int(i_range[0]), int(i_range[1]) + 1):
                         ids_expanded.append(j)
                 else:
Index: /issm/trunk/test/NightlyRun/runme.py
===================================================================
--- /issm/trunk/test/NightlyRun/runme.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/runme.py	(revision 28013)
@@ -1,10 +1,5 @@
-#!/usr/bin/env python
-#
-# NOTE: Switch to,
-#
-#   #!/usr/bin/python3
-#
-# when making Python 3 default
-#
+#!/usr/bin/env python3
+
+
 import argparse
 import os
@@ -14,4 +9,13 @@
 
 import numpy as np
+
+# Avoid the following error on Jenkins, 
+#
+#   "Unable to init server: Could not connect: Connection refused
+#
+#   (runme.py:28445): Gdk-CRITICAL **: 02:23:15.525: gdk_cursor_new_for_display: assertion 'GDK_IS_DISPLAY (display)' failed"
+#
+import matplotlib
+matplotlib.use('Agg')
 
 try:
@@ -31,5 +35,5 @@
 
 def runme(id=None, exclude=None, benchmark='nightly', procedure='check', output='none', rank=1, numprocs=1):
-    """RUNME - test deck for ISSM nightly runs
+    """runme - test deck for ISSM nightly runs
 
     In a test deck directory (for example, test/NightlyRun) the following
@@ -89,64 +93,67 @@
     exception'; see also jenkins/jenkins.sh). These should be counted as
     failures.
-    - Figure out why the following changes allow for correct partitioning of 
-    test set, but cause an error with ADOL-C build (some test logs are parsed 
-    twice).
     """
 
-    #Get ISSM_DIR variable
+    # Get ISSM_DIR variable
     ISSM_DIR = os.environ['ISSM_DIR']
 
-    #Process options
-    #GET benchmark {{{
+    # Process options
+    # Get benchmark {{{
     if benchmark not in ['all', 'nightly', 'validation', 'adolc', 'eismint', 'ismip', 'mesh', 'slc', 'thermal', 'tranforcing', 'qmu']:
-        print(("runme warning: benchmark '{}' not supported, defaulting to test 'nightly'.".format(benchmark)))
+        print(('runme warning: benchmark \'{}\' not supported, defaulting to test \'nightly\'.'.format(benchmark)))
         benchmark = 'nightly'
     # }}}
-    #GET procedure {{{
+    # Get procedure {{{
     if procedure not in ['check', 'update', 'runFromNC']:
-        print(("runme warning: procedure '{}' not supported, defaulting to test 'check'.".format(procedure)))
+        print(('runme warning: procedure \'{}\' not supported, defaulting to test \'check\'.'.format(procedure)))
         procedure = 'check'
     # }}}
-    #GET output {{{
+    # Get output {{{
     if output not in ['nightly', 'none']:
-        print(("runme warning: output '{}' not supported, defaulting to test 'none'.".format(output)))
+        print(('runme warning: output \'{}\' not supported, defaulting to test \'none\'.'.format(output)))
         output = 'none'
     # }}}
-    #GET RANK and NUMPROCS for multithreaded runs {{{
+    # Get rank and numprocs for multi-threaded runs {{{
     if (numprocs < rank):
         numprocs = 1
     # }}}
-    #GET ids  {{{
+    # Get ids  {{{
     flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py"
     list_ids = [int(re.search(r'\d+',f.split('.')[0]).group()) for f in flist] # Retrieve test IDs
     i1, i2 = parallelrange(rank, numprocs, len(list_ids))  # Get tests for this CPU only
     list_ids = list_ids[i1:i2 + 1]
-    if np.size(id) > 0 and id is not None:
+    # Check if we are calling runme as a function with a single integer or string argument and, if so, convert to the proper protocol
+    if isinstance(id, int):
+        id = str(id)
+    if isinstance(id, str):
+        id = [[id], []]
+    if len(id[0]) > 0 or len(id[1]) > 0:
         test_ids = set(GetIds(id)).intersection(set(list_ids))
         benchmark = None
     else:
-        # if no tests are specifically provided, do them all
+        # If no tests are specifically provided, do them all
         test_ids = set(list_ids)
 
     # }}}
-    #GET exclude {{{
+    # Get excluded tests {{{
     exclude_ids = GetIds(exclude)
     test_ids = test_ids.difference(exclude_ids)
+
     # }}}
     if procedure == 'runFromNC':
-        #That is a bamg test
+        # bamg test
         test_ids = test_ids.difference([119, 514])
-        # that is smbGEMB format is weird for the test
+        # smbGEMB format is weird for the test
         test_ids = test_ids.difference([243, 244, 252, 253])
-        #those are amr runs where the index is missing from fieldnames
+        # AMR runs where the index is missing from fieldnames
         test_ids = test_ids.difference([462, 463, 464, 465])
-        #test247 solves for thermal and transient which makes it complex to check
+        # test247 solves for thermal and transient which makes it complex to check
         test_ids = test_ids.difference([247])
-        #test 902 is running two models with different stepping
+        # test 902 is running two models with different stepping
         test_ids = test_ids.difference([902])
-        #I have a size issue in 517 needs investigation
+        # size issue in 517 needs investigation
         test_ids = test_ids.difference([517])
 
-    #Process Ids according to benchmarks {{{
+    # Process IDs according to benchmarks {{{
     if benchmark == 'nightly':
         test_ids = test_ids.intersection(set(range(1, 1000)))
@@ -173,9 +180,8 @@
     test_ids = list(test_ids)
     test_ids.sort()
-    print(test_ids)
-    exit()
-    # }}}
-
-    #Loop over tests and launch sequence
+
+    # }}}
+
+    # Loop over tests and launch sequence
     root = os.getcwd()
     errorcount = 0
@@ -184,5 +190,5 @@
         print(("----------------starting:{}-----------------------".format(id)))
         try:
-            #Execute test
+            # Execute test
             os.chdir(root)
             id_string = IdToName(id)
@@ -193,5 +199,5 @@
                 exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals())
 
-            #UPDATE ARCHIVE?
+            # Update archive?
             archive_name = 'Archive' + str(id)
             if procedure == 'update':
@@ -218,13 +224,13 @@
                         solvetype = re.split('Solution', key)[0]
 
-                #we save the results, scrap them and solve.
+                # Save the results, scrap them and solve
                 loaded_res = mdl.results
                 mdl.results = []
                 mdl = solve(mdl, solvetype)
 
-                #we loop on the field_names from the nghtly test
+                # Loop on the field_names from the nightly test
                 for k, fieldname in enumerate(Tmod.field_names):
                     try:
-                        #first look for indexing
+                        # First, look for indexing
                         if re.search(r'\d+$', fieldname):
                             index = int(re.search(r'\d+$', fieldname).group()) - 1
@@ -242,9 +248,9 @@
                             index = 0
 
-                        #Then check if the key exists in the loaded results
+                        # Then, check if the key exists in the loaded results
                         try:
                             reskeys = mdl.results.__dict__[solvetype + 'Solution'][index].__dict__.keys()
                         except TypeError:
-                            # most probably a steady state so no subscripting
+                            # Most likely a steady state so no subscripting
                             reskeys = mdl.results.__dict__[solvetype + 'Solution'].__dict__.keys()
                         if fieldname not in reskeys:
@@ -264,19 +270,19 @@
 
                             if fieldname in namedifs.keys():
-                                #Some fields are not consistent
+                                # Some fields are not consistent
                                 fieldname = namedifs[fieldname]
                             elif any([suf in fieldname for suf in sufixes]):
-                                #some test have loops that mess up with naming
+                                # Some tests have loops that mess up naming
                                 try:
                                     sufix = sufixes[np.squeeze(np.where([suf in fieldname for suf in sufixes]))]
                                 except TypeError:
-                                    #probably severalmatches, we take the last one which should be the good one (Needs to be controled in the list above)
+                                    # Probably several matches; we take the last one which should be the one we're want to run (needs to be controlled in the list above)
                                     sufix = sufixes[np.squeeze(np.where([suf in fieldname for suf in sufixes]))[-1]]
                                 fieldname = fieldname[:re.search(sufix, fieldname).start()]
                             elif fieldname.endswith("P") and index == 1:
-                                #we are looking for P2 but 2 as been considered as an index and so shifted by -1
+                                # Looking for P2 but 2 refers to an index, so shift by -1
                                 fieldname = fieldname[:-1]
                             else:
-                                # could be that the index selected above is part of the name
+                                # Handle case where index selected above is part of the name
                                 fieldname = fieldname + str(index + 1)
                         try:
@@ -284,5 +290,5 @@
                             loaded_field = loaded_res.__dict__[solvetype + 'Solution'][index].__dict__[fieldname]
                         except TypeError:
-                            # most probably a steady state so no subscripting
+                            # Most likely a steady state so no subscripting
                             try:
                                 field = mdl.results.__dict__[solvetype + 'Solution'].__dict__[fieldname]
@@ -296,17 +302,17 @@
 
                         ref = Tmod.field_values[k]
-                        #Get tolerance
+                        # Get tolerance
                         tolerance = Tmod.field_tolerances[k]
-                        #compute differences for the results computed from the nc file
+                        # Compute differences for the results computed from the nc file
                         error_diff = np.amax(np.abs(ref - field), axis=0) / (np.amax(np.abs(ref), axis=0) + float_info.epsilon)
                         if not np.isscalar(error_diff):
                             error_diff = error_diff[0]
 
-                        #compute the differences for the results of the nc file
+                        # Compute the differences for the results of the nc file
                         load_diff = np.amax(np.abs(np.squeeze(ref) - loaded_field), axis=0) / (np.amax(np.abs(np.squeeze(ref)), axis=0) + float_info.epsilon)
                         if not np.isscalar(load_diff):
                             load_diff = load_diff[0]
 
-                        #disp test result
+                        # Display test result
                         if (np.any(error_diff > tolerance) or np.isnan(error_diff)) and (np.any(load_diff > tolerance) or np.isnan(load_diff)):
                             if abs(error_diff - load_diff) < tolerance:
@@ -326,8 +332,8 @@
                         else:
                             print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1)))
-                        #disp only if errors for the results
+                        # Display only if there are errors in the results
 
                     except Exception as message:
-                        #something went wrong, print failure message:
+                        # Something went wrong; print failure message
                         print((format_exc()))
                         if output == 'nightly':
@@ -340,7 +346,7 @@
                             print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname)))
                             raise RuntimeError(message)
-            #ELSE: CHECK TEST
+            # Check test
             else:
-                #load archive
+                # Load archive
                 if os.path.exists(os.path.join('..', 'Archives', archive_name + '.arch')):
                     archive_file = os.path.join('..', 'Archives', archive_name + '.arch')
@@ -350,5 +356,5 @@
                 for k, fieldname in enumerate(field_names):
                     try:
-                        #Get field and tolerance
+                        # Get field and tolerance
                         field = np.array(field_values[k])
                         if len(field.shape) == 1:
@@ -359,5 +365,5 @@
                         tolerance = field_tolerances[k]
 
-                        #compare to archive
+                        # Compare to archive
                         # Matlab uses base 1, so use base 1 in labels
                         archive = np.array(archread(archive_file, archive_name + '_field' + str(k + 1)))
@@ -374,5 +380,5 @@
                             error_diff = error_diff[0]
 
-                        #disp test result
+                        # Display test result
                         if (np.any(error_diff > tolerance) or np.isnan(error_diff)):
                             print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
@@ -383,5 +389,5 @@
 
                     except Exception as message:
-                        #something went wrong, print failure message:
+                        # Something went wrong; print failure message
                         print((format_exc()))
                         if output == 'nightly':
@@ -396,5 +402,5 @@
 
         except Exception as message:
-            #something went wrong, print failure message:
+            # Something went wrong; print failure message
             print((format_exc()))
             if output == 'nightly':
@@ -425,5 +431,5 @@
             print(("PYTHONSTARTUP file '{}' does not exist.".format(PYTHONSTARTUP)))
 
-        parser = argparse.ArgumentParser(description='RUNME - test deck for ISSM nightly runs')
+        parser = argparse.ArgumentParser(description='runme - test deck for ISSM nightly runs')
         parser.add_argument('-i', '--id', nargs='*', type=str, help='followed by the list of ids requested', default=[])
         parser.add_argument('-in', '--include_name', nargs='*', type=str, help='followed by the list of test names requested', default=[])
Index: /issm/trunk/test/NightlyRun/test2002.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2002.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2002.py	(revision 28013)
@@ -25,5 +25,5 @@
 # Load precomputed mesh
 with open('../Data/SlcTestMesh.pkl', 'rb') as slc_test_mesh_file:
-    md.mesh = pickle.load(slc_test_mesh_file)
+    md.mesh = pickle.load(slc_test_mesh_file, encoding='latin1')
 
 # Geometry for the bed, arbitrary thickness of 100
Index: /issm/trunk/test/NightlyRun/test2003.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2003.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2003.py	(revision 28013)
@@ -15,5 +15,5 @@
 # Load precomputed mesh
 with open('../Data/SlcTestMesh.pkl', 'rb') as slc_test_mesh_file:
-    md.mesh = pickle.load(slc_test_mesh_file)
+    md.mesh = pickle.load(slc_test_mesh_file, encoding='latin1')
 
 # Geometry for the bed, arbitrary thickness of 100
Index: /issm/trunk/test/NightlyRun/test2005.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2005.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2005.py	(revision 28013)
@@ -16,5 +16,5 @@
 # Load precomputed mesh
 with open('../Data/SlcTestMesh.pkl', 'rb') as slc_test_mesh_file:
-    md.mesh = pickle.load(slc_test_mesh_file)
+    md.mesh = pickle.load(slc_test_mesh_file, encoding='latin1')
 
 # Geometry for the bed, arbitrary thickness of 100
Index: /issm/trunk/test/NightlyRun/test2010.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2010.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2010.py	(revision 28013)
@@ -15,5 +15,5 @@
 # Load precomputed mesh
 with open('../Data/SlcTestMesh.pkl', 'rb') as slc_test_mesh_file:
-    md.mesh = pickle.load(slc_test_mesh_file)
+    md.mesh = pickle.load(slc_test_mesh_file, encoding='latin1')
 
 # Geometry for the bed, arbitrary thickness of 100
@@ -124,6 +124,4 @@
 areaice = md.results.TransientSolution.SealevelBarystaticIceArea
 areaice[np.isnan(areaice)] = 0
-print(np.isnan(areaice))
-print(np.sum(areaice))
 loadice = md.results.TransientSolution.SealevelBarystaticIceLoad
 rad_e = md.solidearth.planetradius
Index: /issm/trunk/test/NightlyRun/test2071.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2071.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2071.m	(revision 28013)
@@ -64,5 +64,5 @@
 %Fields and tolerances to track changes
 field_names     ={'LoveH_loading_temporal','LoveK_loading_temporal','LoveL_loading_temporal'};
-field_tolerances={4.0e-7,3.0e-7,8.0e-8};
+field_tolerances={5e-6,5e-5,4e-5};
 field_values={h,k,l};
 
Index: /issm/trunk/test/NightlyRun/test2072.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2072.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2072.m	(revision 28013)
@@ -66,5 +66,5 @@
 
 field_names     ={'LoveH_loading_elastic','LoveK_loading_elastic','LoveL_loading_elastic'};
-field_tolerances={3.0e-6,1.0e-5,6.0e-7};
+field_tolerances={2e-5,2e-5,2e-5};
 field_values={h,k,l};
 
Index: /issm/trunk/test/NightlyRun/test2101.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2101.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2101.m	(revision 28013)
@@ -53,5 +53,5 @@
 %Fields and tolerances to track changes
 field_names     ={'EsaUmotion','EsaNmotion','EsaEmotion'};
-field_tolerances={1e-13,1e-13,1e-13};
+field_tolerances={1e-13,1e-13,2e-13};
 field_values={...
 	(md.results.EsaSolution.EsaUmotion),...
Index: /issm/trunk/test/NightlyRun/test2101.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2101.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2101.py	(revision 28013)
@@ -16,5 +16,5 @@
 # Load precomputed mesh
 with open('../Data/SlcTestMesh.pkl', 'rb') as slc_test_mesh_file:
-    md.mesh = pickle.load(slc_test_mesh_file)
+    md.mesh = pickle.load(slc_test_mesh_file, encoding='latin1')
 
 #define load
@@ -63,5 +63,5 @@
 #Fields and tolerances to track changes
 field_names = ['EsaUmotion', 'EsaNmotion', 'EsaEmotion']
-field_tolerances = [1e-13, 1e-13, 1e-13]
+field_tolerances = [1e-13, 1e-13, 2e-13]
 field_values = [md.results.EsaSolution.EsaUmotion,
                 md.results.EsaSolution.EsaNmotion,
Index: /issm/trunk/test/NightlyRun/test2110.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2110.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2110.m	(revision 28013)
@@ -56,5 +56,5 @@
 field_names     ={'EsaUmotion','EsaXmotion','EsaYmotion',...
 	'EsaStrainratexx','EsaStrainratexy','EsaStrainrateyy','EsaRotationrate'};
-field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13};
+field_tolerances={1e-13,2e-12,2e-12,9e-12,8e-12,8e-12,3e-11};
 field_values={...
 	(md.results.EsaSolution.EsaUmotion),...
Index: /issm/trunk/test/NightlyRun/test2110.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2110.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2110.py	(revision 28013)
@@ -68,5 +68,5 @@
 field_names = ['EsaUmotion', 'EsaXmotion', 'EsaYmotion',
                'EsaStrainratexx', 'EsaStrainratexy', 'EsaStrainrateyy', 'EsaRotationrate']
-field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13]
+field_tolerances = [1e-13, 2e-12, 2e-12, 9e-12, 8e-12, 8e-12, 3e-11]
 field_values = [md.results.EsaSolution.EsaUmotion,
                 md.results.EsaSolution.EsaXmotion,
Index: /issm/trunk/test/NightlyRun/test2111.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2111.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2111.m	(revision 28013)
@@ -56,5 +56,5 @@
 %fields and tolerances to track changes {{{
 field_names     ={'EsaUmotion','EsaNmotion','EsaEmotion','EsaXmotion','EsaYmotion'}; 
-field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13};
+field_tolerances={1e-13,3e-13,3e-13,2e-13,3e-13};
 field_values={...
 	(md.results.EsaSolution.EsaUmotion),...
Index: /issm/trunk/test/NightlyRun/test2111.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2111.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2111.py	(revision 28013)
@@ -66,5 +66,5 @@
 #Fields and tolerances to track changes: {{{
 field_names = ['EsaUmotion', 'EsaNmotion', 'EsaEmotion', 'EsaXmotion', 'EsaYmotion']
-field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13]
+field_tolerances = [1e-13, 3e-13, 3e-13, 2e-13, 3e-13]
 field_values = [md.results.EsaSolution.EsaUmotion,
                 md.results.EsaSolution.EsaNmotion,
Index: /issm/trunk/test/NightlyRun/test2112.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2112.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2112.m	(revision 28013)
@@ -54,5 +54,5 @@
 %fields and tolerances to track changes {{{
 field_names     ={'EsaUmotion','EsaNmotion','EsaEmotion','EsaXmotion','EsaYmotion'}; 
-field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13};
+field_tolerances={1e-13,4e-13,3e-12,3e-13,3e-13};
 field_values={...
 	(md.results.EsaSolution.EsaUmotion),...
Index: /issm/trunk/test/NightlyRun/test2112.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2112.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2112.py	(revision 28013)
@@ -66,5 +66,5 @@
 #Fields and tolerances to track changes: {{{
 field_names = ['EsaUmotion', 'EsaNmotion', 'EsaEmotion', 'EsaXmotion', 'EsaYmotion']
-field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13]
+field_tolerances = [1e-13, 4e-13, 3e-12, 3e-13, 3e-13]
 field_values = [md.results.EsaSolution.EsaUmotion,
                 md.results.EsaSolution.EsaNmotion,
Index: /issm/trunk/test/NightlyRun/test2113.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2113.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2113.m	(revision 28013)
@@ -55,5 +55,5 @@
 %fields and tolerances to track changes {{{
 field_names     ={'EsaUmotion','EsaNmotion','EsaEmotion'}; 
-field_tolerances={1e-13,1e-13,1e-13};
+field_tolerances={1e-13,2e-13,2e-13};
 field_values={...
 	(md.results.EsaSolution.EsaUmotion),...
Index: /issm/trunk/test/NightlyRun/test2113.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2113.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test2113.py	(revision 28013)
@@ -67,5 +67,5 @@
 #Fields and tolerances to track changes: {{{
 field_names = ['EsaUmotion', 'EsaNmotion', 'EsaEmotion']
-field_tolerances = [1e-13, 1e-13, 1e-13]
+field_tolerances = [1e-13, 2e-13, 2e-13]
 field_values = [md.results.EsaSolution.EsaUmotion,
                 md.results.EsaSolution.EsaNmotion,
Index: /issm/trunk/test/NightlyRun/test228.py
===================================================================
--- /issm/trunk/test/NightlyRun/test228.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test228.py	(revision 28013)
@@ -18,5 +18,5 @@
 md.timestepping.time_step = 1.
 md.settings.output_frequency = 1
-md.timestepping.final_time = 4.
+md.timestepping.final_time = 4
 
 #Set up transient
Index: /issm/trunk/test/NightlyRun/test243.m
===================================================================
--- /issm/trunk/test/NightlyRun/test243.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test243.m	(revision 28013)
@@ -57,5 +57,5 @@
 %Fields and tolerances to track changes
 field_names      ={'Layers','SmbDz','SmbT','SmbD','SmbRe','SmbGdn','SmbGsp','SmbA' ,'SmbEC','SmbMassBalance','SmbMAdd','SmbDzAdd','SmbFAC','SmbMeanSHF','SmbMeanLHF','SmbMeanULW','SmbNetLW','SmbNetSW','SmbAccumulatedMassBalance','SmbAccumulatedRunoff','SmbAccumulatedMelt','SmbAccumulatedEC','SmbAccumulatedPrecipitation','SmbAccumulatedRain','SmbAccumulatedRefreeze','SmbRunoff','SmbMelt','SmbEC','SmbPrecipitation','SmbRain','SmbRefreeze','SmbWAdd'};
-field_tolerances ={1e-12,2e-11,2e-11,2e-11,3e-11,3e-11,3e-11,1e-12,2e-11,1e-12,1e-12,1e-12,2e-11,2e-11,2e-11,1e-11,9e-10,2e-11,1e-11,9e-10,2e-11,5e-10,1e-11,1e-11,1e-11,2e-10,2e-11,1e-11,1e-11,1e-11,1e-11,1e-11};
+field_tolerances ={1e-12,4e-11,2e-11,3e-11,6e-11,8e-11,8e-11,1e-12,5e-11,2e-12,1e-12,1e-12,4e-11,2e-11,5e-11,1e-11,9e-10,2e-11,1e-11,9e-10,2e-11,2e-09,1e-11,1e-11,1e-11,2e-10,2e-11,1e-11,1e-11,1e-11,1e-11,1e-11};
 
 field_values={...
Index: /issm/trunk/test/NightlyRun/test243.py
===================================================================
--- /issm/trunk/test/NightlyRun/test243.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test243.py	(revision 28013)
@@ -70,5 +70,5 @@
 #Fields and tolerances to track changes
 field_names = ['Layers', 'SmbDz', 'SmbT', 'SmbD', 'SmbRe', 'SmbGdn', 'SmbGsp', 'SmbA', 'SmbEC', 'SmbMassBalance', 'SmbMAdd', 'SmbDzAdd', 'SmbFAC', 'SmbMeanSHF', 'SmbMeanLHF', 'SmbMeanULW', 'SmbNetLW', 'SmbNetSW', 'SmbAccumulatedMassBalance', 'SmbAccumulatedRunoff', 'SmbAccumulatedMelt', 'SmbAccumulatedEC', 'SmbAccumulatedPrecipitation', 'SmbAccumulatedRain', 'SmbAccumulatedRefreeze', 'SmbRunoff', 'SmbMelt', 'SmbEC', 'SmbPrecipitation', 'SmbRain', 'SmbRefreeze', 'SmbWAdd']
-field_tolerances = [1e-12, 2e-11, 2e-11, 2e-11, 3e-11, 3e-11, 3e-11, 1e-12, 2e-11, 1e-12, 1e-12, 1e-12, 2e-11, 2e-11, 2e-11, 1e-11, 9e-10, 2e-11, 1e-11, 9e-10, 2e-11, 5e-10, 1e-11, 1e-11, 1e-11, 2e-10, 2e-11, 1e-11, 1e-11, 1e-11, 1e-11, 1e-11]
+field_tolerances = [1e-12, 4e-11, 2e-11, 3e-11, 6e-11, 8e-11, 8e-11, 1e-12, 5e-11, 2e-12, 1e-12, 1e-12, 4e-11, 2e-11, 5e-11, 1e-11, 9e-10, 2e-11, 1e-11, 9e-10, 2e-11, 2e-09, 1e-11, 1e-11, 1e-11, 2e-10, 2e-11, 1e-11, 1e-11, 1e-11, 1e-11, 1e-11]
 
 # Shape is different in python solution (fixed using reshape) which can cause test failure
Index: /issm/trunk/test/NightlyRun/test244.m
===================================================================
--- /issm/trunk/test/NightlyRun/test244.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test244.m	(revision 28013)
@@ -116,5 +116,5 @@
 end
 field_names     ={'moments'};
-field_tolerances={2e-9};
+field_tolerances={3e-9};
 field_values={...
 	md.results.dakota.moments,...
Index: /issm/trunk/test/NightlyRun/test244.py
===================================================================
--- /issm/trunk/test/NightlyRun/test244.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test244.py	(revision 28013)
@@ -143,4 +143,4 @@
 
 field_names = ['moments']
-field_tolerances = [2e-9]
+field_tolerances = [3e-9]
 field_values = [md.results.dakota.moments]
Index: /issm/trunk/test/NightlyRun/test247.py
===================================================================
--- /issm/trunk/test/NightlyRun/test247.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test247.py	(revision 28013)
@@ -96,6 +96,6 @@
 
 # Friction
-TEMP = np.zeros((md.mesh.numberofvertices,1))
-TEMP[md.mesh.elements - 1] = md.initialization.temperature[:,0:6].reshape(md.mesh.numberofelements,6,1)
+TEMP = np.zeros((md.mesh.numberofvertices,))
+TEMP[md.mesh.elements - 1] = md.initialization.temperature[:,0:6].reshape(md.mesh.numberofelements,6)
 
 temperature = TEMP
Index: /issm/trunk/test/NightlyRun/test252.m
===================================================================
--- /issm/trunk/test/NightlyRun/test252.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test252.m	(revision 28013)
@@ -66,7 +66,7 @@
 	'SmbDz3','SmbT3' ,'SmbD3' ,'SmbRe3','SmbGdn3','SmbGsp3','SmbA3' ,'SmbEC3','SmbMassBalance3','SmbMAdd3','SmbDzAdd3','SmbFAC3',...
 	'SmbDz4','SmbT4' ,'SmbD4' ,'SmbRe4','SmbGdn4','SmbGsp4','SmbA4' ,'SmbEC4','SmbMassBalance4','SmbMAdd4','SmbDzAdd4','SmbFAC4'};
-field_tolerances ={1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,...
-                   1e-12,1e-12,1e-11,1e-10,2e-11,1e-11,1e-12,1e-11,1e-12,1e-12,1e-12,1e-11,...
-                   1e-12,1e-12,2e-12,2e-11,4e-11,1e-11,1e-12,2e-11,2e-11,1e-12,1e-12,1e-11,...
+field_tolerances ={1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,7e-12,1e-12,1e-12,1e-12,...
+                   1e-12,4e-12,1e-11,1e-10,2e-11,1e-11,1e-12,2e-11,1e-12,1e-12,1e-12,1e-11,...
+                   1e-12,4e-12,2e-12,2e-11,4e-11,1e-11,1e-12,4e-11,4e-11,1e-12,1e-12,1e-11,...
                    1e-11,1e-11,4e-11,4e-11,2e-11,4e-11,1e-12,3e-12,1e-10,1e-12,1e-12,2e-11};
 
Index: /issm/trunk/test/NightlyRun/test252.py
===================================================================
--- /issm/trunk/test/NightlyRun/test252.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test252.py	(revision 28013)
@@ -81,7 +81,7 @@
                'SmbDz3', 'SmbT3', 'SmbD3', 'SmbRe3', 'SmbGdn3', 'SmbGsp3', 'SmbA3', 'SmbEC3', 'SmbMassBalance3', 'SmbMAdd3', 'SmbDzAdd3', 'SmbFAC3',
                'SmbDz4', 'SmbT4', 'SmbD4', 'SmbRe4', 'SmbGdn4', 'SmbGsp4', 'SmbA4', 'SmbEC4', 'SmbMassBalance4', 'SmbMAdd4', 'SmbDzAdd4', 'SmbFAC4']
-field_tolerances = [1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12,
-                    1e-12, 1e-12, 1e-11, 1e-10, 2e-11, 1e-11, 1e-12, 1e-11, 1e-12, 1e-12, 1e-12, 1e-11,
-                    1e-12, 1e-12, 2e-12, 2e-11, 4e-11, 1e-11, 1e-12, 2e-11, 2e-11, 1e-12, 1e-12, 1e-11,
+field_tolerances = [1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 7e-12, 1e-12, 1e-12, 1e-12,
+                    1e-12, 4e-12, 1e-11, 1e-10, 2e-11, 1e-11, 1e-12, 2e-11, 1e-12, 1e-12, 1e-12, 1e-11,
+                    1e-12, 4e-12, 2e-12, 2e-11, 4e-11, 1e-11, 1e-12, 4e-11, 4e-11, 1e-12, 1e-12, 1e-11,
                     1e-11, 1e-11, 4e-11, 4e-11, 2e-11, 4e-11, 1e-12, 3e-12, 1e-10, 1e-12, 1e-12, 2e-11]
 # Shape is different in python solution (fixed using reshape) which can cause test failure
Index: /issm/trunk/test/NightlyRun/test253.m
===================================================================
--- /issm/trunk/test/NightlyRun/test253.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test253.m	(revision 28013)
@@ -67,8 +67,9 @@
 	'SmbDz3','SmbT3' ,'SmbD3' ,'SmbRe3','SmbGdn3','SmbGsp3','SmbA3' ,'SmbEC3','SmbMassBalance3','SmbMAdd3','SmbDzAdd3','SmbFAC3',...
 	'SmbDz4','SmbT4' ,'SmbD4' ,'SmbRe4','SmbGdn4','SmbGsp4','SmbA4' ,'SmbEC4','SmbMassBalance4','SmbMAdd4','SmbDzAdd4','SmbFAC4'};
-field_tolerances ={1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,...
-                   1e-12,1e-12,1e-11,1e-10,4e-11,1e-11,1e-12,1e-11,1e-12,1e-12,1e-12,1e-11,...
-                   1e-12,1e-12,2e-12,2e-11,1e-10,1e-11,1e-12,1e-11,1e-11,1e-12,1e-12,1e-11,...
-                   1e-11,1e-11,1e-10,1e-11,1e-12,3e-11,1e-12,4e-12,1e-10,1e-12,1e-12,2e-11};
+field_tolerances ={1e-12,...
+                   1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,...
+                   1e-12,4e-12,1e-11,1e-10,4e-11,1e-11,1e-12,1e-11,1e-12,1e-12,1e-12,1e-11,...
+                   1e-12,4e-12,2e-12,2e-11,1e-10,1e-11,1e-12,1e-11,1e-11,1e-12,1e-12,1e-11,...
+                   1e-11,1e-11,1e-10,1e-11,7e-12,3e-11,1e-12,4e-12,1e-10,1e-12,1e-12,2e-11};
 
 field_values={...
Index: /issm/trunk/test/NightlyRun/test253.py
===================================================================
--- /issm/trunk/test/NightlyRun/test253.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test253.py	(revision 28013)
@@ -86,7 +86,7 @@
     1e-12,
     1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,1e-12,
-    1e-12,1e-12,1e-11,1e-10,4e-11,1e-11,1e-12,1e-11,1e-12,1e-12,1e-12,1e-11,
-    1e-12,1e-12,2e-12,2e-11,1e-10,1e-11,1e-12,1e-11,1e-11,1e-12,1e-12,1e-11,
-    1e-11,1e-11,1e-10,1e-11,1e-12,3e-11,1e-12,4e-12,1e-10,1e-12,1e-12,2e-11
+    1e-12,4e-12,1e-11,1e-10,4e-11,1e-11,1e-12,1e-11,1e-12,1e-12,1e-12,1e-11,
+    1e-12,4e-12,2e-12,2e-11,1e-10,1e-11,1e-12,1e-11,1e-11,1e-12,1e-12,1e-11,
+    1e-11,1e-11,1e-10,1e-11,7e-12,3e-11,1e-12,4e-12,1e-10,1e-12,1e-12,2e-11
 ]
 # Shape is different in python solution (fixed using reshape) which can cause test failure
Index: /issm/trunk/test/NightlyRun/test257.m
===================================================================
--- /issm/trunk/test/NightlyRun/test257.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test257.m	(revision 28013)
@@ -41,6 +41,6 @@
 
 md.timestepping.start_time = 0;
-md.timestepping.time_step  = 1;
-md.timestepping.final_time = 7;
+md.timestepping.time_step  = 1/12;
+md.timestepping.final_time = 2;
 md.smb                     = SMBarma();
 md.smb.num_basins          = 3; %number of basins
@@ -55,6 +55,18 @@
 md.smb.arlag_coefs         = [[0.2,0.1,0.05,0.01];[0.4,0.2,-0.2,0.1];[0.4,-0.4,0.1,-0.1]];
 md.smb.malag_coefs         = [1.0;0;0.2];
-md.smb.lapserates          = [0.01,0.0;0.01,-0.01;0.0,-0.01];
-md.smb.elevationbins       = [100;150;100];
+lm0                        = [1e-4*[1,-0.1,-1];1e-6*[1,-0.1,-1];1e-5*[1,-0.1,-1]];
+lm1                        = [1e-4*[2,-0.2,-2];1e-6*[2,-0.2,-2];1e-5*[2,-0.2,-2]];
+lm2                        = [1e-4*[3,-0.3,-3];1e-6*[3,-0.3,-3];1e-5*[3,-0.3,-3]];
+lm3                        = [1e-4*[4,-0.4,-4];1e-6*[4,-0.4,-4];1e-5*[4,-0.4,-4]];
+lm4                        = [1e-4*[5,-0.5,-5];1e-6*[5,-0.5,-5];1e-5*[5,-0.5,-5]];
+lm5                        = [1e-4*[6,-0.6,-6];1e-6*[6,-0.6,-6];1e-5*[6,-0.6,-6]];
+lm6                        = [1e-4*[7,-0.7,-7];1e-6*[7,-0.7,-7];1e-5*[7,-0.7,-7]];
+lm7                        = [1e-4*[8,-0.8,-8];1e-6*[8,-0.8,-8];1e-5*[8,-0.8,-8]];
+lm8                        = [1e-4*[9,-0.9,-9];1e-6*[9,-0.9,-9];1e-5*[9,-0.9,-9]];
+lm9                        = [1e-4*[10,-1,-10];1e-6*[10,-1.0,-10];1e-5*[10,-1.0,-10]];
+lm10                       = [1e-4*[11,-1.1,-11];1e-6*[11,-1.1,-11];1e-5*[11,-1.1,-11]];
+lm11                       = [1e-4*[12,-1.2,-12];1e-6*[12,-1.2,-12];1e-5*[12,-1.2,-12]];
+md.smb.lapserates          = cat(3,lm0,lm1,lm2,lm3,lm4,lm5,lm6,lm7,lm8,lm9,lm10,lm11);
+md.smb.elevationbins       = repmat([100,300;200,400;250,450],1,1,12);
 
 %Stochastic forcing
@@ -79,15 +91,15 @@
 	(md.results.TransientSolution(1).IceVolume),...
 	(md.results.TransientSolution(1).SmbMassBalance),...
-	(md.results.TransientSolution(2).Vx),...
-	(md.results.TransientSolution(2).Vy),...
-	(md.results.TransientSolution(2).Vel),...
-	(md.results.TransientSolution(2).Thickness),...
-	(md.results.TransientSolution(2).IceVolume),...
-	(md.results.TransientSolution(2).SmbMassBalance),...
-	(md.results.TransientSolution(7).Vx),...
-	(md.results.TransientSolution(7).Vy),...
-	(md.results.TransientSolution(7).Vel),...
-	(md.results.TransientSolution(7).Thickness),...
-	(md.results.TransientSolution(7).IceVolume),...
-	(md.results.TransientSolution(7).SmbMassBalance),...
+	(md.results.TransientSolution(12).Vx),...
+	(md.results.TransientSolution(12).Vy),...
+	(md.results.TransientSolution(12).Vel),...
+	(md.results.TransientSolution(12).Thickness),...
+	(md.results.TransientSolution(12).IceVolume),...
+	(md.results.TransientSolution(12).SmbMassBalance),...
+	(md.results.TransientSolution(24).Vx),...
+	(md.results.TransientSolution(24).Vy),...
+	(md.results.TransientSolution(24).Vel),...
+	(md.results.TransientSolution(24).Thickness),...
+	(md.results.TransientSolution(24).IceVolume),...
+	(md.results.TransientSolution(24).SmbMassBalance),...
 	};
Index: /issm/trunk/test/NightlyRun/test257.py
===================================================================
--- /issm/trunk/test/NightlyRun/test257.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test257.py	(revision 28013)
@@ -1,4 +1,3 @@
 #Test Name: SquareShelfSMBarma
-from __future__ import division  # TODO: Remove this import after ISSM is updated to use Python 3 by default
 import numpy as np
 from model import *
@@ -46,6 +45,6 @@
 
 md.timestepping.start_time = 0
-md.timestepping.time_step = 1
-md.timestepping.final_time = 8
+md.timestepping.time_step = 1/12
+md.timestepping.final_time = 2
 md.smb = SMBarma()
 md.smb.num_basins = 3  # number of basins
@@ -60,6 +59,21 @@
 md.smb.arlag_coefs = np.array([[0.2, 0.1, 0.05, 0.01], [0.4, 0.2, -0.2, 0.1], [0.4, -0.4, 0.1, -0.1]])
 md.smb.malag_coefs = np.array([[1.0],[0],[0.2]])
-md.smb.lapserates        = np.array([[0.01,0.0],[0.01,-0.01],[0.0,-0.01]])
-md.smb.elevationbins  = np.array([100,150,100]).reshape(md.smb.num_basins,1)
+
+lm0                   = np.array([1e-4*np.array([1,-0.1,-1]),1e-6*np.array([1,-0.1,-1]),1e-5*np.array([1,-0.1,-1])])
+lm1                   = np.array([1e-4*np.array([2,-0.2,-2]),1e-6*np.array([2,-0.2,-2]),1e-5*np.array([2,-0.2,-2])])
+lm2                   = np.array([1e-4*np.array([3,-0.3,-3]),1e-6*np.array([3,-0.3,-3]),1e-5*np.array([3,-0.3,-3])])
+lm3                   = np.array([1e-4*np.array([4,-0.4,-4]),1e-6*np.array([4,-0.4,-4]),1e-5*np.array([4,-0.4,-4])])
+lm4                   = np.array([1e-4*np.array([5,-0.5,-5]),1e-6*np.array([5,-0.5,-5]),1e-5*np.array([5,-0.5,-5])])
+lm5                   = np.array([1e-4*np.array([6,-0.6,-6]),1e-6*np.array([6,-0.6,-6]),1e-5*np.array([6,-0.6,-6])])
+lm6                   = np.array([1e-4*np.array([7,-0.7,-7]),1e-6*np.array([7,-0.7,-7]),1e-5*np.array([7,-0.7,-7])])
+lm7                   = np.array([1e-4*np.array([8,-0.8,-8]),1e-6*np.array([8,-0.8,-8]),1e-5*np.array([8,-0.8,-8])])
+lm8                   = np.array([1e-4*np.array([9,-0.9,-9]),1e-6*np.array([9,-0.9,-9]),1e-5*np.array([9,-0.9,-9])])
+lm9                   = np.array([1e-4*np.array([10,-1.0,-10]),1e-6*np.array([10,-1.0,-10]),1e-5*np.array([10,-1.0,-10])])
+lm10                  = np.array([1e-4*np.array([11,-1.1,-11]),1e-6*np.array([11,-1.1,-11]),1e-5*np.array([11,-1.1,-11])])
+lm11                  = np.array([1e-4*np.array([12,-1.2,-12]),1e-6*np.array([12,-1.2,-12]),1e-5*np.array([12,-1.2,-12])])
+md.smb.lapserates     = np.stack((lm0,lm1,lm2,lm3,lm4,lm5,lm6,lm7,lm8,lm9,lm10,lm11),axis=2)
+ebins                 = np.array([[100,300],[200,400],[250,450]])
+md.smb.elevationbins  = np.stack([ebins for ii in range(12)],axis=2)
+
 
 # Stochastic forcing
@@ -90,15 +104,15 @@
     md.results.TransientSolution[0].IceVolume,
     md.results.TransientSolution[0].SmbMassBalance,
-    md.results.TransientSolution[1].Vx,
-    md.results.TransientSolution[1].Vy,
-    md.results.TransientSolution[1].Vel,
-    md.results.TransientSolution[1].Thickness,
-    md.results.TransientSolution[1].IceVolume,
-    md.results.TransientSolution[1].SmbMassBalance,
-    md.results.TransientSolution[6].Vx,
-    md.results.TransientSolution[6].Vy,
-    md.results.TransientSolution[6].Vel,
-    md.results.TransientSolution[6].Thickness,
-    md.results.TransientSolution[6].IceVolume,
-    md.results.TransientSolution[6].SmbMassBalance
+    md.results.TransientSolution[11].Vx,
+    md.results.TransientSolution[11].Vy,
+    md.results.TransientSolution[11].Vel,
+    md.results.TransientSolution[11].Thickness,
+    md.results.TransientSolution[11].IceVolume,
+    md.results.TransientSolution[11].SmbMassBalance,
+    md.results.TransientSolution[23].Vx,
+    md.results.TransientSolution[23].Vy,
+    md.results.TransientSolution[23].Vel,
+    md.results.TransientSolution[23].Thickness,
+    md.results.TransientSolution[23].IceVolume,
+    md.results.TransientSolution[23].SmbMassBalance
 ]
Index: /issm/trunk/test/NightlyRun/test3201.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3201.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3201.m	(revision 28013)
@@ -81,5 +81,4 @@
 max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
 md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
-	'md_name','md.materials.rheology_B',...
 	'control_size',size(md.materials.rheology_B,2),...
 	'type','vertex',... %Really needed??
Index: /issm/trunk/test/NightlyRun/test3202.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3202.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3202.m	(revision 28013)
@@ -81,5 +81,4 @@
 max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
 md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
-	'md_name','md.materials.rheology_B',...
 	'control_size',size(md.materials.rheology_B,2),...
 	'type','vertex',... %Really needed??
@@ -104,5 +103,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Gradient','Misfit','Rheology'};
-field_tolerances={1e-12,1e-12,1e-12};
+field_tolerances={2e-12,1e-12,1e-12};
 field_values={...
 	(md.results.TransientSolution(1).Gradient1),...
Index: /issm/trunk/test/NightlyRun/test3203.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3203.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3203.m	(revision 28013)
@@ -61,5 +61,4 @@
 max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
 md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
-	'md_name','md.materials.rheology_B',...
 	'control_size',size(md.materials.rheology_B,2),...
 	'type','vertex',... %Really needed??
Index: /issm/trunk/test/NightlyRun/test3204.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3204.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3204.m	(revision 28013)
@@ -81,5 +81,4 @@
 max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
 md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
-	'md_name','md.materials.rheology_B',...
 	'control_size',size(md.materials.rheology_B,2),...
 	'type','vertex',... %Really needed??
Index: /issm/trunk/test/NightlyRun/test3205.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3205.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3205.m	(revision 28013)
@@ -83,5 +83,4 @@
 max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
 md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
-	'md_name','md.materials.rheology_B',...
 	'control_size',size(md.materials.rheology_B,2),...
 	'type','vertex',... %Really needed??
Index: /issm/trunk/test/NightlyRun/test3206.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3206.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test3206.m	(revision 28013)
@@ -0,0 +1,162 @@
+%Test Name: SquareShelfTransientCalibrationWithParamcodipack
+
+%Generate observations
+md = model;
+md=triangle(model(),'../Exp/Square.exp',50000.);
+md = setmask(md,'all','');
+md = parameterize(md,'../Par/SquareShelf.par');
+md = setflowequation(md,'SSA','all');
+md.cluster = generic('np',2);
+
+%Create real time series for B
+md.timestepping.interp_forcing = 0;
+md.timestepping.final_time = 2*md.timestepping.time_step;
+md.materials.rheology_B = 1.8e8*ones(md.mesh.numberofvertices,2);
+md.materials.rheology_B(find(md.mesh.x<md.mesh.y),2)=1.4e8;
+md.materials.rheology_B=[md.materials.rheology_B;0.01 2*md.timestepping.time_step];
+
+%Initial values
+md.initialization.vx = zeros(md.mesh.numberofvertices,1);
+md.initialization.vy = zeros(md.mesh.numberofvertices,1);
+md.initialization.pressure = zeros(md.mesh.numberofvertices,1);
+md.initialization.temperature = zeros(md.mesh.numberofvertices,1);
+md.basalforcings.geothermalflux = zeros(md.mesh.numberofvertices,1);
+md.thermal.spctemperature = NaN(md.mesh.numberofvertices,1);
+
+%Param
+md.basalforcings=linearbasalforcings();
+md.basalforcings.deepwater_melting_rate=50.; % m/yr ice equivalent
+md.basalforcings.deepwater_elevation=-500;
+md.basalforcings.upperwater_melting_rate=0; % no melting for zb>=0
+md.basalforcings.upperwater_elevation=0; % sea level
+md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1); % no melting on grounded ice
+md.basalforcings.perturbation_melting_rate(:)=0;
+md.transient.isthermal = 0;
+
+md = solve(md,'tr');
+
+%Set cost function
+count = 1;
+for i=1:numel(md.results.TransientSolution)
+	vx_obs = md.results.TransientSolution(i).Vx;
+	vy_obs = md.results.TransientSolution(i).Vy;
+	time   = md.results.TransientSolution(i).time;
+	weights= ones(md.mesh.numberofvertices,1);
+
+	md.outputdefinition.definitions{count}=cfsurfacelogvel('name',['LogVelMis' num2str(count)],...
+		'definitionstring',['Outputdefinition' num2str(count)],...
+		'vxobs_string','VxObs','vxobs',vx_obs,...
+		'vyobs_string','VyObs','vyobs',vy_obs,...
+		'weights',weights,'weights_string','WeightsSurfaceObservation',...
+		'datatime',time);
+	md.autodiff.dependents{count} = dependent('name',['Outputdefinition' num2str(count)],'type','scalar','fos_reverse_index',1);
+	count = count+1;
+end
+
+%Deal with vx separately
+vx_obs  = [[md.results.TransientSolution(:).Vx]/md.constants.yts; [md.results.TransientSolution(:).time]];
+weights = [ones(size(vx_obs,1)-1,1); 0];
+md.outputdefinition.definitions{count}=cfsurfacesquaretransient('name',['VxMisfit_Transient'],...
+	'definitionstring',['Outputdefinition' num2str(count)],...
+	'model_string','Vx','observations_string','VxObs',...
+	'observations',vx_obs,'weights',500*weights,'weights_string','WeightsSurfaceObservation');
+md.autodiff.dependents{count} = dependent('name',['Outputdefinition' num2str(count)],'type','scalar','fos_reverse_index',1);
+count = count+1;
+
+vy_obs  = [[md.results.TransientSolution(:).Vy]/md.constants.yts; [md.results.TransientSolution(:).time]];
+md.outputdefinition.definitions{count}=cfsurfacesquaretransient('name',['VyMisfit_Transient'],...
+	'definitionstring',['Outputdefinition' num2str(count)],...
+	'model_string','Vy','observations_string','VyObs',...
+	'observations',vy_obs,'weights',weights,'weights_string','WeightsSurfaceObservation');
+md.autodiff.dependents{count} = dependent('name',['Outputdefinition' num2str(count)],'type','scalar','fos_reverse_index',1);
+count = count+1;
+
+surf_obs  = [[md.results.TransientSolution(:).Surface]; [md.results.TransientSolution(:).time]];
+md.outputdefinition.definitions{count}=cfsurfacesquaretransient('name',['SurfMisfit_Transient'],...
+	'definitionstring',['Outputdefinition' num2str(count)],...
+	'model_string','Surface','observations_string','SurfaceObservation',...
+	'observations',surf_obs,'weights',weights/(md.constants.yts),'weights_string','WeightsSurfaceObservation');
+md.autodiff.dependents{count} = dependent('name',['Outputdefinition' num2str(count)],'type','scalar','fos_reverse_index',1);
+count = count+1;
+
+%Independent
+md.materials.rheology_B(1:end-1,:) = 1.8e8;
+min_params = md.materials.rheology_B; min_params(1:end-1,:) = cuffey(273);
+max_params = md.materials.rheology_B; max_params(1:end-1,:) = cuffey(200);
+md.autodiff.independents{1} = independent('name','MaterialsRheologyBbar',...
+	'control_size',size(md.materials.rheology_B,2),...
+	'type','vertex',... %Really needed??
+	'min_parameters',min_params,...
+	'max_parameters',max_params,...
+	'control_scaling_factor',1e8);
+
+md.basalforcings.deepwater_melting_rate=1.; % m/yr ice equivalent
+field =md.basalforcings.deepwater_melting_rate/md.constants.yts;
+name = 'BasalforcingsDeepwaterMeltingRate';
+scaling = 50/md.constants.yts;
+md.autodiff.independents{2} = independent('name',name,'type','vertex','nods',md.mesh.numberofvertices,...
+	'control_size', size(field,2), 'min_parameters',1e-5*field, 'max_parameters',100*field, 'control_scaling_factor',scaling);
+
+md.inversion=adm1qn3inversion(md.inversion);
+md.inversion.iscontrol=1;
+md.inversion.maxiter=3;
+md.inversion.maxsteps=md.inversion.maxiter;
+md.inversion.dxmin=1e-5;
+md.autodiff.isautodiff=1;
+md.autodiff.driver='fos_reverse';
+md.settings.checkpoint_frequency = 2;
+
+%Go solve!
+md.verbose=verbose(0);
+md=solve(md,'tr');
+
+%Fields and tolerances to track changes
+field_names     ={'Gradient1','Gradient2','Misfit','Rheology','DeepMelt'};
+field_tolerances={1e-10,1e-10,1e-10,1e-10,1e-10};
+field_values={...
+	(md.results.TransientSolution(1).Gradient1),...
+	(md.results.TransientSolution(1).Gradient2),...
+	(md.results.TransientSolution(1).J),...
+	(md.results.TransientSolution(1).MaterialsRheologyBbar),...
+	(md.results.TransientSolution(1).BasalforcingsDeepwaterMeltingRate),...
+	};
+
+
+return;
+%The code below validates the gradient, run only with maxiter=1 above!
+disp('Testing Gradient');
+index = 3;
+dJdB_ad = md.results.TransientSolution(1).Gradient1(index);
+delta=0.001;
+B1=md.materials.rheology_B(index);
+%B1=md.basalforcings.deepwater_melting_rate;
+B0=B1*(1.-delta);
+B2=B1*(1.+delta);
+deltaB=(B2-B0);
+
+list = {}; for i=1:numel(md.outputdefinition.definitions), list{i} = md.autodiff.dependents{i}.name;end
+md.transient.requested_outputs = list;
+md.autodiff.isautodiff=false;
+md.inversion.iscontrol=false;
+md2=md;
+
+%forward
+md=md2;
+md.materials.rheology_B(index)=B0;
+%md.basalforcings.deepwater_melting_rate = B0;
+md=solve(md,'tr');
+J0 = 0;
+for i=1:numel(md.outputdefinition.definitions), eval(['J0 = J0 + md.results.TransientSolution(end).' list{i} ';']); end
+
+%backward
+md=md2;
+md.materials.rheology_B(index)=B2;
+%md.basalforcings.deepwater_melting_rate = B2;
+md=solve(md,'tr');
+J2 = 0;
+for i=1:numel(md.outputdefinition.definitions), eval(['J2 = J2 + md.results.TransientSolution(end).' list{i} ';']); end
+
+%compute resulting derivative
+dJdB_an=(J2-J0)/deltaB;
+
+disp(sprintf('dJ/dB: analytical:  %16.16g\n       using ad:    %16.16g\n',dJdB_an,dJdB_ad));
Index: /issm/trunk/test/NightlyRun/test328.m
===================================================================
--- /issm/trunk/test/NightlyRun/test328.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test328.m	(revision 28013)
@@ -5,8 +5,8 @@
 md=setflowequation(md,'SSA','all');
 md.smb = SMBgradients();
-md.smb.b_pos=-100. + 0.00005*md.mesh.x - 0.0001*md.mesh.y;
-md.smb.b_neg=250. + 0.000051*md.mesh.x - 0.00011*md.mesh.y;
+md.smb.b_pos=(-100. + 0.00005*md.mesh.x - 0.0001*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
+md.smb.b_neg=(250. + 0.000051*md.mesh.x - 0.00011*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
 md.smb.href=md.geometry.surface;
-md.smb.smbref= 1000. - 0.001*md.mesh.x - 0.005*md.mesh.y;
+md.smb.smbref= (1000. - 0.001*md.mesh.x - 0.005*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
 md.transient.requested_outputs={'default','TotalSmb'};
 md.cluster=generic('name',oshostname(),'np',3);
Index: /issm/trunk/test/NightlyRun/test328.py
===================================================================
--- /issm/trunk/test/NightlyRun/test328.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test328.py	(revision 28013)
@@ -14,9 +14,9 @@
 md = setflowequation(md, 'SSA', 'all')
 md.smb = SMBgradients()
-md.smb.b_pos = -100. + 0.00005 * md.mesh.x - 0.0001 * md.mesh.y
-md.smb.b_neg = 250. + 0.000051 * md.mesh.x - 0.00011 * md.mesh.y
+md.smb.b_pos = (-100. + 0.00005 * md.mesh.x - 0.0001 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
+md.smb.b_neg = (250. + 0.000051 * md.mesh.x - 0.00011 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
 md.transient.requested_outputs = ['default', 'TotalSmb']
 md.smb.href = copy.deepcopy(md.geometry.surface)
-md.smb.smbref = 1000. - 0.001 * md.mesh.x - 0.005 * md.mesh.y
+md.smb.smbref= (1000. - 0.001 * md.mesh.x - 0.005 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
 md.cluster = generic('name', gethostname(), 'np', 3)
 md = solve(md, 'Transient')
Index: /issm/trunk/test/NightlyRun/test329.m
===================================================================
--- /issm/trunk/test/NightlyRun/test329.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test329.m	(revision 28013)
@@ -6,8 +6,8 @@
 md=setflowequation(md,'HO','all');
 md.smb = SMBgradients();
-md.smb.b_pos=-100. + 0.00005*md.mesh.x - 0.0001*md.mesh.y;
-md.smb.b_neg=250. + 0.000051*md.mesh.x - 0.00011*md.mesh.y;
+md.smb.b_pos=(-100. + 0.00005*md.mesh.x - 0.0001*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
+md.smb.b_neg=(250. + 0.000051*md.mesh.x - 0.00011*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
 md.smb.href=md.geometry.surface;
-md.smb.smbref= 1000. - 0.001*md.mesh.x - 0.005*md.mesh.y;
+md.smb.smbref= (1000. - 0.001*md.mesh.x - 0.005*md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice;
 md.transient.requested_outputs={'default','TotalSmb'};
 md.cluster=generic('name',oshostname(),'np',3);
Index: /issm/trunk/test/NightlyRun/test329.py
===================================================================
--- /issm/trunk/test/NightlyRun/test329.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test329.py	(revision 28013)
@@ -15,8 +15,8 @@
 md = setflowequation(md, 'HO', 'all')
 md.smb = SMBgradients()
-md.smb.b_pos = -100. + 0.00005 * md.mesh.x - 0.0001 * md.mesh.y
-md.smb.b_neg = 250. + 0.000051 * md.mesh.x - 0.00011 * md.mesh.y
+md.smb.b_pos = (-100. + 0.00005 * md.mesh.x - 0.0001 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
+md.smb.b_neg = (250. + 0.000051 * md.mesh.x - 0.00011 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
 md.smb.href = copy.deepcopy(md.geometry.surface)
-md.smb.smbref = 1000. - 0.001 * md.mesh.x - 0.005 * md.mesh.y
+md.smb.smbref = (1000. - 0.001 * md.mesh.x - 0.005 * md.mesh.y) / 1000. * md.materials.rho_freshwater / md.materials.rho_ice
 md.transient.requested_outputs = ['default', 'TotalSmb']
 md.cluster = generic('name', gethostname(), 'np', 3)
Index: /issm/trunk/test/NightlyRun/test3481.m
===================================================================
--- /issm/trunk/test/NightlyRun/test3481.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test3481.m	(revision 28013)
@@ -28,5 +28,4 @@
 max_parameters(md.mesh.numberofvertices+1,1:2)=[0.75,1.25];
 md.autodiff.independents = {independent('name','FrictionCoefficient',...
-	'md_name','md.friction.coefficient',...
 	'control_size',2,...
 	'type','vertex',...
Index: /issm/trunk/test/NightlyRun/test355.m
===================================================================
--- /issm/trunk/test/NightlyRun/test355.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test355.m	(revision 28013)
@@ -23,5 +23,5 @@
 
 %Define initial conditions
-md.initialization.vx = 10^-6*md.constants.yts*ones(md.mesh.numberofvertices,1);
+md.initialization.vx = 1.0e-6*md.constants.yts*ones(md.mesh.numberofvertices,1);
 md.initialization.vy = zeros(md.mesh.numberofvertices,1);
 md.initialization.temperature=(273.-20.)*ones(md.mesh.numberofvertices,1);
@@ -58,4 +58,5 @@
 md.hydrology.bump_height = 1e-1 * ones(md.mesh.numberofvertices,1);
 md.hydrology.sheet_conductivity= 1e-3 * ones(md.mesh.numberofvertices,1);
+md.hydrology.channel_conductivity= 5.e-2 * ones(md.mesh.numberofvertices,1);
 
 % BCs for hydrology
@@ -74,8 +75,8 @@
 	'HydrologySheetThickness4','HydraulicPotential4','ChannelArea4',};
 field_tolerances={...
-	1e-13,1e-13,1e-13,...
-	1e-13,1e-13,2e-13,...
-	1e-13,1e-13,7e-13,...
-	1e-13,1e-13,2e-12};
+	5e-11,2e-08,3e-07,...
+	2e-10,2e-08,4e-07,...
+	3e-10,2e-08,4e-07,...
+	4e-10,1e-08,4e-07};
 field_values={...
 	md.results.TransientSolution(1).HydrologySheetThickness, ...
Index: /issm/trunk/test/NightlyRun/test355.py
===================================================================
--- /issm/trunk/test/NightlyRun/test355.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test355.py	(revision 28013)
@@ -10,19 +10,20 @@
 from generic import generic
 
-#create model:
+# Create model
 md = triangle(model(), '../Exp/Square.exp', 50000.)
 md.mesh.x = md.mesh.x / 100
 md.mesh.y = md.mesh.y / 100
-#miscellaneous
-md = setmask(md, '', '')   #everywhere grounded
+md.miscellaneous.name = 'testChannels'
+
+# Miscellaneous
+md = setmask(md, '', '') # Everywhere grounded
 md = setflowequation(md, 'SSA', 'all')
-md.miscellaneous.name = 'testChannels'
-md.stressbalance.maxiter = 2  #Make sure it runs quickly...
+md.stressbalance.maxiter = 2 # Make sure it runs quickly...
 
-#Some constants
+# Some constants
 md.constants.g = 9.8
 md.materials.rho_ice = 910
 
-#Geometry
+# Geometry
 md.geometry.surface = -0.02 * md.mesh.x + 320
 md.geometry.bed = np.zeros((md.mesh.numberofvertices))
@@ -30,5 +31,5 @@
 md.geometry.thickness = md.geometry.surface - md.geometry.bed
 
-#Define initial conditions
+# Define initial conditions
 md.initialization.vx = 1.0e-6 * md.constants.yts * np.ones((md.mesh.numberofvertices))
 md.initialization.vy = np.zeros((md.mesh.numberofvertices))
@@ -37,9 +38,9 @@
 md.initialization.hydraulic_potential = md.materials.rho_ice * md.constants.g * md.geometry.thickness
 
-#Materials
+#cMaterials
 md.materials.rheology_B = (5e-25)**(-1./3.) * np.ones((md.mesh.numberofvertices))
 md.materials.rheology_n = 3. * np.ones((md.mesh.numberofelements))
 
-#Friction
+#cFriction
 md.friction.coefficient = np.zeros((md.mesh.numberofvertices))
 md.friction.p = np.ones((md.mesh.numberofelements))
@@ -47,5 +48,5 @@
 #md.friction.coupling = 0
 
-#Boundary conditions:
+#Bcoundary conditions:
 md = SetIceSheetBC(md)
 
@@ -66,4 +67,5 @@
 md.hydrology.bump_height = 1.e-1 * np.ones((md.mesh.numberofvertices))
 md.hydrology.sheet_conductivity = 1.e-3 * np.ones((md.mesh.numberofvertices))
+md.hydrology.channel_conductivity = 5.e-2 * np.ones((md.mesh.numberofvertices))
 
 # BCs for hydrology
@@ -73,15 +75,15 @@
 
 md.cluster = generic('np', 2)
-md = solve(md, 'Transient')   #or 'tr'
+md = solve(md, 'Transient') # Or 'tr'
 
-#Fields and tolerances to track changes
+# Fields and tolerances to track changes
 field_names = ['HydrologySheetThickness1', 'HydraulicPotential1', 'ChannelArea1',
                'HydrologySheetThickness2', 'HydraulicPotential2', 'ChannelArea2',
                'HydrologySheetThickness3', 'HydraulicPotential3', 'ChannelArea3',
                'HydrologySheetThickness4', 'HydraulicPotential4', 'ChannelArea4']
-field_tolerances = [1e-13, 1e-13, 1e-13,
-                    1e-13, 1e-13, 2e-13,
-                    1e-13, 1e-13, 7e-13,
-                    1e-13, 1e-13, 2e-12]
+field_tolerances = [5e-11, 2e-08, 3e-07,
+                    2e-10, 2e-08, 4e-07,
+                    3e-10, 2e-08, 4e-07,
+                    4e-10, 1e-08, 4e-07]
 field_values = [md.results.TransientSolution[0].HydrologySheetThickness,
                 md.results.TransientSolution[0].HydraulicPotential,
Index: /issm/trunk/test/NightlyRun/test358.m
===================================================================
--- /issm/trunk/test/NightlyRun/test358.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test358.m	(revision 28013)
@@ -0,0 +1,43 @@
+%Test Name: SquareSheetConstrainedCMDragRegCoulombSSA2d
+md=triangle(model(),'../Exp/Square.exp',200000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareSheetConstrained.par');
+md=setflowequation(md,'SSA','all');
+
+%use Regularized Coulomb's law
+md.friction = frictionregcoulomb();
+md.friction.m = 3.0*ones(md.mesh.numberofelements,1);
+md.friction.u0 = 2000; %m/yr
+md.friction.C = 200*ones(md.mesh.numberofvertices,1);
+	
+%control parameters
+md.inversion.iscontrol=1;
+md.inversion.control_parameters={'FrictionC'};
+md.inversion.min_parameters=1.*ones(md.mesh.numberofvertices,1);
+md.inversion.max_parameters=10000.*ones(md.mesh.numberofvertices,1);
+md.inversion.nsteps=2;
+md.inversion.cost_functions=[102  501];
+md.inversion.cost_functions_coefficients=ones(md.mesh.numberofvertices,2); md.inversion.cost_functions_coefficients(:,2)=2.*10^-7;
+md.inversion.gradient_scaling=3.*ones(md.inversion.nsteps,1);
+md.inversion.maxiter_per_step=2*ones(md.inversion.nsteps,1);
+md.inversion.step_threshold=0.3*ones(md.inversion.nsteps,1);
+md.inversion.vx_obs=md.initialization.vx; md.inversion.vy_obs=md.initialization.vy;
+
+md.verbose = verbose('all');
+md.debug.valgrind = 0;
+
+md.cluster=generic('name',oshostname(),'np',1);
+md=solve(md,'Stressbalance');
+
+%Fields and tolerances to track changes
+field_names     ={'Gradient','Misfits','FrictionC','Pressure','Vel','Vx','Vy'};
+field_tolerances={1e-12,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13};
+field_values={...
+	(md.results.StressbalanceSolution.Gradient1),...
+	(md.results.StressbalanceSolution.J),...
+	(md.results.StressbalanceSolution.FrictionC),...
+	(md.results.StressbalanceSolution.Pressure),...
+	(md.results.StressbalanceSolution.Vel),...
+	(md.results.StressbalanceSolution.Vx),...
+	(md.results.StressbalanceSolution.Vy)
+};
Index: /issm/trunk/test/NightlyRun/test4001.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4001.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test4001.m	(revision 28013)
@@ -865,7 +865,8 @@
     'Melting2','Vx3','Vy3','Thickness3','Base3','MaskOceanLevelset3','FloatingiceMeltingRate3',...
     'Melting3','Vx4','Vy4','Thickness4','Base4','MaskOceanLevelset4','FloatingiceMeltingRate4','Melting4'};
-field_tolerances={2e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,...
-    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
-    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
+field_tolerances={2e-13,1e-13,...
+    8e-06,7e-06,2e-07,2e-08,3e-08,1e-13,1e-13,...
+    8e-06,7e-06,4e-07,3e-08,5e-08,1e-13,6e-08,...
+    8e-06,7e-06,5e-07,4e-08,8e-08,6e-08,3e-07};
 field_values={...
     (md.results(1).TransientSolution(end).Base),...
Index: /issm/trunk/test/NightlyRun/test4002.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4002.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test4002.m	(revision 28013)
@@ -999,5 +999,5 @@
         % }}}
 
-    md.transient.isoceancoupling=1;
+    md.transient.isoceancoupling=2;
     md.transient.isgroundingline=0;
     md.groundingline.migration='None';
@@ -1032,7 +1032,8 @@
     'Melting2','Vx3','Vy3','Thickness3','Base3','MaskOceanLevelset3','FloatingiceMeltingRate3',...
     'Melting3','Vx4','Vy4','Thickness4','Base4','MaskOceanLevelset4','FloatingiceMeltingRate4','Melting4'};
-field_tolerances={2e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,...
-    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
-    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
+field_tolerances={3e-11,1e-13,...
+    9e-06,7e-06,7e-10,5e-11,1e-13,1e-13,1e-13,...
+    9e-06,7e-06,2e-09,7e-11,1e-13,1e-13,1e-13,...
+    9e-06,7e-06,2e-09,9e-11,1e-13,1e-13,1e-13};
 field_values={...
     (md.results.TransientSolution(1).Base),...
Index: /issm/trunk/test/NightlyRun/test4003.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4003.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test4003.m	(revision 28013)
@@ -378,5 +378,5 @@
     md.initialization.vel=md.results.TransientSolution(end).Vel;
     md.initialization.pressure=md.results.TransientSolution(end).Pressure;
-    md.transient.isoceancoupling=1;
+    md.transient.isoceancoupling=2;
     md.transient.isgroundingline=0;
     md.masstransport.requested_outputs={'default','BasalforcingsFloatingiceMeltingRate'};
Index: /issm/trunk/test/NightlyRun/test4004.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4004.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test4004.m	(revision 28013)
@@ -337,5 +337,5 @@
 
     % {{{ prepare ISSM
-    md.transient.isoceancoupling=1;
+    md.transient.isoceancoupling=2;
     md.transient.isgroundingline=0;
     md.transient.isthermal=0;
Index: /issm/trunk/test/NightlyRun/test4005.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4005.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test4005.m	(revision 28013)
@@ -0,0 +1,758 @@
+%Test Name: IceOcean
+%ISSM/MITgcm coupled set-up
+%
+%Script control parameters
+steps=[1:5 7:11];
+final_time=1;
+
+!rm -rf ${ISSM_DIR}/test/MITgcm/install
+!rm -rf ${ISSM_DIR}/test/MITgcm/build/*
+!rm -rf Models
+
+%Organizer
+!mkdir Models
+org=organizer('repository','Models','prefix','IceOcean.','steps',steps);
+
+presentdirectory=pwd;
+
+% {{{ Parameters:
+if perform(org,'Parameters'),
+    Nx=20; %number of longitude cells
+    Ny=40; %number of latitude cells
+    Nz=30; %number of MITgcm vertical cells
+    nPx=1; %number of MITgcm processes to use in x direction
+    nPy=2; %number of MITgcm processes to use in y direction
+    xgOrigin=0; %origin of longitude
+    ygOrigin=-80; %origin of latitude
+    dLong=.25; %longitude grid spacing
+    dLat=.05; %latitude grid spacing
+    delZ=30; %thickness of vertical levels
+    icefront_position_ratio=.75;
+    ice_thickness=100;
+    rho_ice=917;
+    rho_water=1028.14;
+    di=rho_ice/rho_water;
+
+    % MITgcm initial and lateral boundary conditions
+    iniSalt  = 34.4; % initial salinity (PSU)
+    iniTheta = -1.9; % initial potential temperature (deg C)
+    obcSalt  = 34.4; % open boundary salinity (PSU)
+    obcTheta =  1.0; % open boundary potential temperature (deg C)
+    mlDepth  = 120.; % mixed layer depth (m)
+    mlSalt   = 33.4; % open boundary salinity (PSU)
+    mlTheta  = -1.9; % open boundary potential temperature (deg C)
+    obcUvel  = -0.1; % open boundary velocity (m/s)
+
+    MITgcmDeltaT=600; % MITgcm time step in seconds
+    y2s=31536000; % year to seconds conversion, i.e., seconds per year
+
+    % start_time, final_time, and time_step
+    start_time=0; % in decimal years
+    time_step=1/12; % coupling interval in decimal years
+    async_step_MITgcm_multiplier=1/30; % used to reduce run time for MItgcm
+
+    % bedrock/bathymetry
+    hmax=1000;
+    trough_depth=200;
+    deltah=300;
+    sea_level=1095;
+
+    % issm settings:
+    numlayers=10;
+
+    savedata(org, Nx, Ny, nPx, nPy, Nz, dLong, dLat, delZ, xgOrigin, ...
+        ygOrigin, icefront_position_ratio, ice_thickness, rho_ice, ...
+        rho_water, di, hmax, trough_depth, deltah, sea_level, ...
+        iniSalt, iniTheta, obcSalt, obcTheta, mlDepth, mlSalt, ...
+        mlTheta, obcUvel, start_time, time_step, MITgcmDeltaT, y2s,...
+        numlayers,async_step_MITgcm_multiplier);
+end
+% }}}
+% {{{ Bathymetry:
+if perform(org,'Bathymetry'),
+
+    loaddata(org,'Parameters');
+    %create lat,long
+    lat=(ygOrigin+dLat/2):dLat:(ygOrigin+Ny*dLat);
+    long=(xgOrigin+dLong/2):dLong:(xgOrigin+Nx*dLong);
+    [lat long]=meshgrid(lat,long);
+
+    longmin=min(long(:));
+    longmax=max(long(:));
+    latmin=min(lat(:));
+    latmax=max(lat(:));
+
+    %create bedrock/bathymetry:
+    bedrock=zeros(Nx,Ny);
+    bedrock=hmax-deltah*tanh(pi*(2*(lat-latmin)./(latmax-latmin)-1))+ ...
+            trough_depth*cos(2*pi*long./(longmax-longmin));
+
+    %save bathymetry file for MITgcm
+    bathymetry=bedrock-sea_level;
+    savedata(org,lat,long,bathymetry);
+
+end
+% }}}
+% {{{ IceSheetGeometry:
+if perform(org,'IceSheetGeometry'),
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    latmin=min(lat(:));
+    latmax=max(lat(:));
+
+    %put ice_thickness constant layer of ice over the bathymetry, unless it floats:
+    s=size(bathymetry);
+    thickness=ice_thickness*ones(s);
+
+    %figure out ice shelf:
+    pos=find(-di*thickness>bathymetry);
+    iceshelf_mask=zeros(s);
+    iceshelf_mask(pos)=1;
+
+    ice_mask=ones(s);
+    pos=find((lat-latmin)/(latmax-latmin)>(icefront_position_ratio));
+    ice_mask(pos)=0;
+    iceshelf_mask(pos)=0;
+
+    %compute draft of ice shelf:
+    draft=bathymetry;
+    pos=find(iceshelf_mask);
+    draft(pos)=-di*thickness(pos);
+    pos=find(~ice_mask);
+    draft(pos)=0;
+
+    savedata(org,ice_mask,iceshelf_mask,draft,thickness);
+end
+% }}}
+
+%Configure MITgcm
+% {{{ GetMITgcm:
+if perform(org,'GetMITgcm'),
+  system([pwd '/../MITgcm/get_mitgcm.sh']);
+end
+% }}}
+% {{{ BuildMITgcm:
+if perform(org,'BuildMITgcm'),
+
+    %load data:
+    loaddata(org,'Parameters');
+
+    system(['../MITgcm/build_remesh.sh amundsen ' pwd '/../MITgcm']);
+end
+% }}}
+addpath(recursivepath([pwd '/../MITgcm']));
+% {{{ RunUncoupledMITgcm:
+if perform(org,'RunUncoupledMITgcm'),
+
+    %load data:
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+     endtime = round(MITgcmDeltaT * ...
+         floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
+
+    % {{{ prepare MITgcm
+    % rename previous run directory and create new one
+    if exist ('run.old')
+        !\rm -rf run.old
+    end
+    if exist ('run')
+        !\mv run run.old
+    end
+    !\mkdir run
+    !\cp ../MITgcm/build/mitgcmuv run
+    !\cp ../MITgcm/input_remesh/* run
+    !\cp ../MITgcm/input_remesh/eedata_uncoupled run/eedata
+
+    %load data:
+    loaddata(org,'Parameters');
+
+    % initial salinity
+    S=iniSalt*ones(Nx,Ny,Nz);
+    writebin('run/Salt.bin',S);
+
+    % initial temperature
+    T=iniTheta*ones(Nx,Ny,Nz);
+    writebin('run/Theta.bin',T);
+
+    % initial velocity
+    Z=zeros(Nx,Ny,Nz);
+    writebin('run/Uvel.bin',Z);
+    writebin('run/Vvel.bin',Z);
+
+    % initial sea surface height
+    Z=zeros(Nx,Ny);
+    writebin('run/Etan.bin',Z);
+
+    % salinity boundary conditions
+    S=obcSalt*ones(Ny,Nz);
+    thk=delZ*ones(Nz,1);
+    bot=cumsum(thk);
+    ik=find(bot<=mlDepth);
+    S(:,ik)=mlSalt;
+    writebin('run/OBs.bin',S);
+
+    % temperature boundary conditions
+    T=obcTheta*ones(Ny,Nz);
+    T(:,ik)=mlTheta;
+    writebin('run/OBt.bin',T);
+
+    % zonal velocity boundary conditions
+    U=obcUvel*ones(Ny,Nz);
+    writebin('run/OBu.bin',U);
+
+    % zero boundary conditions
+    Z=zeros(Ny,Nz);
+    writebin('run/zeros.bin',Z);
+
+    % build parameter file data.obcs
+    fidi=fopen('../MITgcm/input_remesh/data.obcs','r');
+    fido=fopen('run/data.obcs','w');
+    tline = fgetl(fidi);
+    fprintf(fido,'%s\n',tline);
+    while 1
+        tline = fgetl(fidi);
+        if ~ischar(tline), break, end
+        %do the change here:
+        if strcmpi(tline,' OB_Iwest = 40*1,'),
+            fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
+            continue;
+        end
+        if strcmpi(tline,' OB_Ieast = 40*-1,'),
+            fprintf(fido,'%s%i%s\n',' OB_Ieast = ',Ny,'*-1,');
+            continue;
+        end
+        fprintf(fido,'%s\n',tline);
+    end
+    %close  files
+    fclose(fidi);
+    fclose(fido);
+
+    %save bathymetry and bedrock in run directory
+    writebin('run/bathymetry.bin',bathymetry);
+    writebin('run/icetopo.bin',draft);
+	 shelficemass=-draft*rho_water;
+    writebin('run/shelficemass.bin',shelficemass);
+	 dmdt=-5*rho_ice/y2s*ones(size(draft));
+    writebin('run/shelfice_dmdt.bin',dmdt);
+    % }}}
+
+    %start looping:
+    for t=start_time:time_step:final_time,
+        disp(['Year: ' num2str(t)])
+        % {{{ generate MITgcm parameter file data
+        fidi=fopen('../MITgcm/input_remesh/data','r');
+        fido=fopen('run/data','w');
+        tline = fgetl(fidi);
+        fprintf(fido,'%s\n',tline);
+        while 1
+            tline = fgetl(fidi);
+            if ~ischar(tline), break, end
+            %do the change here:
+            if strcmpi(tline,' xgOrigin = 0.0,'),
+                fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
+                continue;
+            end
+            if strcmpi(tline,' ygOrigin = -80.0,'),
+                fprintf(fido,'%s%i%s\n',' ygOrigin = ',ygOrigin,',');
+                continue;
+            end
+            if strcmpi(tline,' delX = 20*0.25,'),
+                fprintf(fido,'%s%i*%g%s\n',' delX = ',Nx,dLong,',');
+                continue;
+            end
+            if strcmpi(tline,' delY = 20*0.25,'),
+                fprintf(fido,'%s%i*%g%s\n',' delY = ',Ny,dLat,',');
+                continue;
+            end
+            if strcmpi(tline,' delZ = 30*30.0,'),
+                fprintf(fido,'%s%i*%g%s\n',' delZ = ',Nz,delZ,',');
+                continue;
+            end
+            if strcmpi(tline,' endTime=2592000.,'),
+                fprintf(fido,'%s%i%s\n',' endTime= ',endtime,',');
+                continue;
+            end
+            if strcmpi(tline,' deltaT=1200.0,'),
+                fprintf(fido,'%s%i%s\n',' deltaT= ',MITgcmDeltaT,',');
+                continue;
+            end
+            if strcmpi(tline,' pChkptFreq=2592000.,'),
+                fprintf(fido,'%s%i%s\n',' pChkptFreq= ',endtime,',');
+                continue;
+            end
+            if strcmpi(tline,' taveFreq=2592000.,'),
+                fprintf(fido,'%s%i%s\n',' taveFreq= ',endtime,',');
+                continue;
+            end
+            if strcmpi(tline,' rhoConst=1030.,'),
+                fprintf(fido,'%s%i%s\n',' rhoConst= ',rho_water,',');
+                continue;
+            end
+            if strcmpi(tline,' rhoNil=1030.,'),
+                fprintf(fido,'%s%i%s\n',' rhoNil= ',rho_water,',');
+                continue;
+            end
+            fprintf(fido,'%s\n',tline);
+        end
+        %close  files
+        fclose(fidi);
+        fclose(fido);
+        % }}}
+        % {{{ generate initial MITgcm conditions
+
+        ds=round(endtime/MITgcmDeltaT);
+        if t>start_time
+            % Read pickup file
+            fnm=['run/pickup.' myint2str(ds,10) '.data'];
+            U=readbin(fnm,[Nx Ny Nz],1,'real*8',0);
+            V=readbin(fnm,[Nx Ny Nz],1,'real*8',1);
+            T=readbin(fnm,[Nx Ny Nz],1,'real*8',2);
+            S=readbin(fnm,[Nx Ny Nz],1,'real*8',3);
+            E=readbin(fnm,[Nx Ny],1,'real*8',8*Nz);
+            writebin('run/Salt.bin' ,S);
+            writebin('run/Theta.bin',T);
+            writebin('run/Uvel.bin' ,U);
+            writebin('run/Vvel.bin' ,V);
+            writebin('run/Etan.bin' ,E);
+        end
+
+        % }}}
+        % {{{ system call to run MITgcm
+        cd run
+        eval(['!mpirun -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
+        ts=round((t+time_step)*y2s/MITgcmDeltaT);
+        eval(['!\mv STDERR.0000 STDERR_' myint2str(ts,10) '.data'])
+        eval(['!\mv STDOUT.0000 STDOUT_' myint2str(ts,10) '.data'])
+        eval(['!\cp hFacC.data hFacC_' myint2str(ts,10) '.data'])
+        eval(['!\cp icetopo.bin icetopo_' myint2str(ts,10) '.data'])
+        for fld={'S','T','U','V','Eta', ...
+                 'SHICE_heatFluxtave','SHICE_fwFluxtave'}
+            eval(['!\mv ' fld{1} '.' myint2str(ds,10) '.data ' ...
+                  fld{1} '_' myint2str(ts,10) '.data'])
+        end
+        cd ..
+        % }}}
+    end
+end
+% }}}
+
+%Configure ISSM
+% {{{ CreateMesh:
+if perform(org,'CreateMesh'),
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %create model:
+    md=model();
+
+    %Grab lat,long from MITgcm:
+    lat=lat(:);
+    long=long(:);
+
+    %project lat,long:
+    [x,y]=ll2xy(lat,long,-1);
+
+    index=[];
+    %  C  D
+    %  A  B
+    for j=1:Ny-1,
+        for i=1:Nx-1,
+            A=(j-1)*Nx+i;
+            B=(j-1)*Nx+i+1;
+            C=j*Nx+i;
+            D=j*Nx+i+1;
+            index(end+1,:)=[A B C];
+            index(end+1,:)=[C B D];
+        end
+    end
+
+    %fill mesh and model:
+    md=meshconvert(md,index,x,y);
+    md.mesh.lat=lat;
+    md.mesh.long=long;
+
+    savemodel(org,md);
+
+end
+% }}}
+% {{{ MeshGeometry:
+if perform(org,'MeshGeometry'),
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %transfer to vertices:
+    bathymetry=bathymetry(:);
+    iceshelf_mask=iceshelf_mask(:);
+    ice_mask=ice_mask(:);
+    thickness=thickness(:);
+    draft=draft(:);
+
+    %start filling some of the fields
+    md.geometry.bed=bathymetry;
+    md.geometry.thickness=thickness;
+    md.geometry.base=md.geometry.bed;
+    pos=find(iceshelf_mask); md.geometry.base(pos)=draft(pos);
+    md.geometry.surface=md.geometry.base+md.geometry.thickness;
+
+    %nothing passes icefront:
+    pos=find(~ice_mask);
+    md.geometry.thickness(pos)=1;
+    md.geometry.surface(pos)=(1-di)*md.geometry.thickness(pos);
+    md.geometry.base(pos)=-di*md.geometry.thickness(pos);
+
+    %level sets:
+    md.mask.ocean_levelset=-ones(md.mesh.numberofvertices,1);
+    md.mask.ice_levelset=ones(md.mesh.numberofvertices,1);
+
+    pos=find(ice_mask); md.mask.ice_levelset(pos)=-1;
+    pos=find(~iceshelf_mask & ice_mask); md.mask.ocean_levelset(pos)=1;
+
+    %identify edges of grounded ice:
+    ocean_levelset=md.mask.ocean_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=ocean_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==1); md.mask.ocean_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    %identify edges of ice:
+    ice_levelset=md.mask.ice_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=ice_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==-1); md.mask.ice_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ ParameterizeIce:
+if perform(org,'ParameterizeIce'),
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'MeshGeometry');
+
+    %miscellaneous
+    md.miscellaneous.name='test4001';
+
+    %initial velocity:
+    md.initialization.vx=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vy=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vz=zeros(md.mesh.numberofvertices,1);
+
+    %friction:
+    md.friction.coefficient=30*ones(md.mesh.numberofvertices,1);
+    pos=find(md.mask.ocean_levelset<=0);
+    md.friction.coefficient(pos)=0;
+    md.friction.p=ones(md.mesh.numberofelements,1);
+    md.friction.q=ones(md.mesh.numberofelements,1);
+
+    %temperatures and surface mass balance:
+    md.initialization.temperature=(273.15-20)*ones(md.mesh.numberofvertices,1);
+    md.initialization.pressure=md.materials.rho_ice*md.constants.g*(md.geometry.surface-md.geometry.base);
+    md.smb.mass_balance = [1*ones(md.mesh.numberofvertices,1); 1];
+
+    %Flow law
+    md.materials.rheology_B=paterson(md.initialization.temperature);
+    md.materials.rheology_n=3*ones(md.mesh.numberofelements,1);
+    md.damage.D=zeros(md.mesh.numberofvertices,1);
+    md.damage.spcdamage=NaN*ones(md.mesh.numberofvertices,1);
+
+    %the spcs going
+    md.stressbalance.spcvx=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvy=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvz=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.referential=NaN*ones(md.mesh.numberofvertices,6);
+    md.stressbalance.loadingforce=0*ones(md.mesh.numberofvertices,3);
+    md.masstransport.spcthickness=NaN*ones(md.mesh.numberofvertices,1);
+
+    %deal with water:
+    pos=find(md.mask.ice_levelset>0);
+    md.stressbalance.spcvx(pos)=0;
+    md.stressbalance.spcvy(pos)=0;
+    md.stressbalance.spcvz(pos)=0;
+    md.masstransport.spcthickness(pos)=0;
+
+    %get some flux at the ice divide:
+    pos=find(md.mesh.lat==min(md.mesh.lat));
+    md.stressbalance.spcvy(pos)=200;
+
+    %deal with boundaries, excluding icefront:
+    vertex_on_boundary=zeros(md.mesh.numberofvertices,1);
+    vertex_on_boundary(md.mesh.segments(:,1:2))=1;
+    pos=find(vertex_on_boundary & md.mask.ocean_levelset<=0);
+    md.stressbalance.spcvx(pos)=md.initialization.vx(pos);
+    md.stressbalance.spcvy(pos)=md.initialization.vy(pos);
+    md.stressbalance.spcvz(pos)=md.initialization.vz(pos);
+    md.masstransport.spcthickness(pos)=md.geometry.thickness(pos);
+
+    md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.thermal.spctemperature=[md.initialization.temperature; 1]; %impose observed temperature on surface
+    md.basalforcings.geothermalflux=.064*ones(md.mesh.numberofvertices,1);
+
+    %flow equations:
+    md=setflowequation(md,'SSA','all');
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ RunUncoupledISSM:
+if perform(org,'RunUncoupledISSM'),
+
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+
+    %timestepping:
+    md.timestepping.final_time=final_time;
+    md.timestepping.time_step=time_step;
+    md.transient.isgroundingline=0;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    md.cluster=generic('name',oshostname(),'np',2);
+    md=solve(md,'Transient');
+
+    savemodel(org,md);
+end
+% }}}
+
+%Run MITgcm/ISSM
+% {{{ RunCoupledMITgcmISSM:
+if perform(org,'RunCoupledMITgcmISSM'),
+
+    %load data:
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+	 loadmodel ./Models/IceOcean.RunUncoupledISSM
+	 md.results.TransientSolution=md.results.TransientSolution(end);
+        endtime = round(MITgcmDeltaT * ...
+         floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
+
+        % {{{ prepare MITgcm
+        % rename previous run directory and create new one
+        if exist ('run.old')
+            !\rm -rf run.old
+        end
+        if exist ('run')
+            !\mv run run.old
+        end
+        !\mkdir run
+        !\cp ../MITgcm/build/mitgcmuv run
+        !\cp ../MITgcm/input_remesh/* run
+        !\cp ../MITgcm/input_remesh/eedata_uncoupled run/eedata
+
+        %load data:
+        loaddata(org,'Parameters');
+
+        % initial salinity
+        S=iniSalt*ones(Nx,Ny,Nz);
+        writebin('run/Salt.bin',S);
+
+        % initial temperature
+        T=iniTheta*ones(Nx,Ny,Nz);
+        writebin('run/Theta.bin',T);
+
+        % initial velocity
+        Z=zeros(Nx,Ny,Nz);
+        writebin('run/Uvel.bin',Z);
+        writebin('run/Vvel.bin',Z);
+
+        % initial sea surface height
+        Z=zeros(Nx,Ny);
+        writebin('run/Etan.bin',Z);
+
+        % salinity boundary conditions
+        S=obcSalt*ones(Ny,Nz);
+        thk=delZ*ones(Nz,1);
+        bot=cumsum(thk);
+        ik=find(bot<=mlDepth);
+        S(:,ik)=mlSalt;
+        writebin('run/OBs.bin',S);
+
+        % temperature boundary conditions
+        T=obcTheta*ones(Ny,Nz);
+        T(:,ik)=mlTheta;
+        writebin('run/OBt.bin',T);
+
+        % zonal velocity boundary conditions
+        U=obcUvel*ones(Ny,Nz);
+        writebin('run/OBu.bin',U);
+
+        % zero boundary conditions
+        Z=zeros(Ny,Nz);
+        writebin('run/zeros.bin',Z);
+
+        % build parameter file data.obcs
+        fidi=fopen('../MITgcm/input_remesh/data.obcs','r');
+        fido=fopen('run/data.obcs','w');
+        tline = fgetl(fidi);
+        fprintf(fido,'%s\n',tline);
+        while 1
+            tline = fgetl(fidi);
+            if ~ischar(tline), break, end
+            %do the change here:
+            if strcmpi(tline,' OB_Iwest = 40*1,'),
+                fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
+                continue;
+            end
+            if strcmpi(tline,' OB_Ieast = 40*-1,'),
+                fprintf(fido,'%s%i%s\n',' OB_Ieast = ',Ny,'*-1,');
+                continue;
+            end
+            fprintf(fido,'%s\n',tline);
+        end
+        %close  files
+        fclose(fidi);
+        fclose(fido);
+
+        %save bathymetry in MITgcm run directory
+        writebin('run/bathymetry.bin',bathymetry);
+        draft=md.results.TransientSolution(end).Base;
+        pos=find(md.mask.ice_levelset>0); draft(pos)=0;
+        writebin('run/icetopo.bin',draft);
+		  shelficemass=-draft*rho_water;
+		  writebin('run/shelficemass.bin',shelficemass);
+        % }}}
+
+    % {{{ ISSM settings:
+
+    setenv('DYLD_LIBRARY_PATH', '/usr/local/gfortran/lib')
+    %timestepping:
+    md.timestepping.time_step=time_step;
+    md.cluster=generic('name',oshostname(),'np',2);
+    md.results.TransientSolution.Base=md.geometry.base;
+    md.transient.isgroundingline=0;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    % }}}
+
+    %start looping:
+    results=md.results;
+
+    for t=0:time_step:4*time_step
+		 disp(['Year: ' num2str(t)])
+
+		 %Initialize time steps
+		 md.timestepping.final_time=t+time_step;
+		 md.timestepping.time_step=time_step;
+		 md.timestepping.start_time=t;
+
+		 %Calculate dynamic thinning and write 
+		 md.basalforcings.floatingice_melting_rate(:)=0;
+		 md.cluster=generic('name',oshostname(),'np',2);
+		 md=solve(md,'Transient');
+
+		 dmdt_icenodes=(md.results.TransientSolution(1).Thickness-md.geometry.thickness)/(md.timestepping.final_time-md.timestepping.start_time);
+		 dmdt=rho_ice/y2s*reshape(dmdt_icenodes,[Nx,Ny]);
+		 writebin('run/shelfice_dmdt.bin',dmdt);
+		 system(['cp run/shelfice_dmdt.bin run/shelfice_dmdt_' int2str(t) '.bin'])
+
+		 % {{{ system call to run MITgcm
+		 cd run
+       newline = [' niter0 = ' num2str(t*y2s/MITgcmDeltaT)];
+		 command=['!sed "s/.*niter0.*/' newline '/" data > data.temp; mv data.temp data'];
+		 eval(command)
+
+       ds=round(endtime/MITgcmDeltaT);
+       ts=round((t+time_step)*y2s/MITgcmDeltaT);
+                 
+		 eval(['!mpirun -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
+		 eval(['!\mv STDERR.0000 STDERR_' myint2str(t/time_step) '.data'])
+		 eval(['!\mv STDOUT.0000 STDOUT_' myint2str(t/time_step) '.data'])
+		 eval(['!\cp hFacC.data hFacC_' myint2str(t/time_step) '.data'])
+		 eval(['!\cp icetopo.bin icetopo_' myint2str(t/time_step) '.data'])
+%		 for fld={'S','T','U','V','Eta', ...
+%				 'SHICE_heatFluxtave','SHICE_fwFluxtave'}
+%			 eval(['!\mv ' fld{1} '.' myint2str(ds,10) '.data ' ...
+%				 fld{1} '_' myint2str(t,10) '.data'])
+%		 end
+		 cd ..
+		 % }}}
+
+		 system(['cp run/SHICE_fwFluxtave.' myint2str((t+time_step)*y2s/MITgcmDeltaT,10) '.data run/melt.data'])
+%		 system(['cp run/SHICE_fwFluxtave.' myint2str((t+1)*2160,10) '.data run/melt.data'])
+%		 system(['mv run/SHICE_fwFluxtave.' myint2str((t+1)*2160,10) '.data run/melt' int2str(t) '.data'])
+
+       melt=readbin('./run/melt.data',[Nx,Ny]);
+       md.basalforcings.floatingice_melting_rate=-melt(:)*y2s/rho_ice;
+		 md=solve(md,'Transient');
+
+		 %Save results of run with melt
+		 results.TransientSolution(end+1)= md.results.TransientSolution(end);
+		 
+		 base=md.results.TransientSolution(end).Base;
+		 thickness=md.results.TransientSolution(end).Thickness;
+		 md.geometry.base=base;
+		 md.geometry.thickness=thickness;
+		 md.geometry.surface=md.geometry.base+md.geometry.thickness;
+		 md.initialization.vx=md.results.TransientSolution(end).Vx;
+		 md.initialization.vy=md.results.TransientSolution(end).Vy;
+		 md.initialization.vel=md.results.TransientSolution(end).Vel;
+		 md.initialization.pressure=md.results.TransientSolution(end).Pressure;
+
+    end
+
+    md.results=results;
+    savemodel(org,md);
+end
+% }}}
+
+%%Fields and tolerances to track changes
+%fnm=['run/SHICE_fwFluxtave.0000004380.data'];
+%melting_rate_1=readbin(fnm,[Nx Ny]);
+%fnm=['run/SHICE_fwFluxtave.0000008760.data'];
+%melting_rate_2=readbin(fnm,[Nx Ny]);
+%fnm=['run/SHICE_fwFluxtave.0000013140.data'];
+%melting_rate_3=readbin(fnm,[Nx Ny]);
+%fnm=['run/SHICE_fwFluxtave.0000017520.data'];
+%melting_rate_4=readbin(fnm,[Nx Ny]);
+%field_names     ={'Base1','Melting1','Vx2','Vy2','Thickness2','Base2','MaskOceanLevelset2','FloatingiceMeltingRate2',...
+%    'Melting2','Vx3','Vy3','Thickness3','Base3','MaskOceanLevelset3','FloatingiceMeltingRate3',...
+%    'Melting3','Vx4','Vy4','Thickness4','Base4','MaskOceanLevelset4','FloatingiceMeltingRate4','Melting4'};
+%field_tolerances={2e-13,1e-13,...
+%    8e-06,7e-06,2e-07,2e-08,3e-08,1e-13,1e-13,...
+%    8e-06,7e-06,4e-07,3e-08,5e-08,1e-13,6e-08,...
+%    8e-06,7e-06,5e-07,4e-08,8e-08,6e-08,3e-07};
+%field_values={...
+%    (md.results(1).TransientSolution(end).Base),...
+%    (melting_rate_1(:)),...
+%    (md.results(2).TransientSolution(end).Vx),...
+%    (md.results(2).TransientSolution(end).Vy),...
+%    (md.results(2).TransientSolution(end).Thickness),...
+%    (md.results(2).TransientSolution(end).Base),...
+%    (md.results(2).TransientSolution(end).MaskOceanLevelset),...
+%    (md.results(2).TransientSolution(end).FloatingiceMeltingRate),...
+%    (melting_rate_2(:)),...
+%    (md.results(3).TransientSolution(end).Vx),...
+%    (md.results(3).TransientSolution(end).Vy),...
+%    (md.results(3).TransientSolution(end).Thickness),...
+%    (md.results(3).TransientSolution(end).Base),...
+%    (md.results(3).TransientSolution(end).MaskOceanLevelset),...
+%    (md.results(3).TransientSolution(end).FloatingiceMeltingRate),...
+%    (melting_rate_3(:)),...
+%    (md.results(4).TransientSolution(end).Vx),...
+%    (md.results(4).TransientSolution(end).Vy),...
+%    (md.results(4).TransientSolution(end).Thickness),...
+%    (md.results(4).TransientSolution(end).Base),...
+%    (md.results(4).TransientSolution(end).MaskOceanLevelset),...
+%    (md.results(4).TransientSolution(end).FloatingiceMeltingRate),...
+%    (melting_rate_4(:)),...
+%    };
Index: /issm/trunk/test/NightlyRun/test419.m
===================================================================
--- /issm/trunk/test/NightlyRun/test419.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test419.m	(revision 28013)
@@ -10,5 +10,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx','Vy','Vz','Vel','Pressure'};
-field_tolerances={2e-09,1e-09,1e-09,1e-09,1e-09};
+field_tolerances={2e-09,2e-09,1e-09,2e-09,1e-09};
 field_values={...
 	(md.results.StressbalanceSolution.Vx),...
Index: /issm/trunk/test/NightlyRun/test419.py
===================================================================
--- /issm/trunk/test/NightlyRun/test419.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test419.py	(revision 28013)
@@ -19,5 +19,5 @@
 #Fields and tolerances to track changes
 field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure']
-field_tolerances = [2e-09, 1e-09, 1e-09, 1e-09, 1e-09]
+field_tolerances = [2e-09, 2e-09, 1e-09, 2e-09, 1e-09]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
Index: /issm/trunk/test/NightlyRun/test442.m
===================================================================
--- /issm/trunk/test/NightlyRun/test442.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test442.m	(revision 28013)
@@ -55,7 +55,7 @@
 	'Bed3','Surface3','Thickness3','Floatingice3','Vx3','Vy3','Vz3','Pressure3','FloatingiceMeltingrate3',};
 field_tolerances={...
-	2e-11,5e-12,2e-11,1e-11,5e-10,1e-08,6e-10,1e-13,1e-13,...
+	2e-11,5e-12,2e-11,1e-11,5e-10,3e-08,6e-10,1e-13,1e-13,...
 	5e-09,5e-09,5e-09,5e-09,9e-05,9e-05,9e-05,5e-09,1e-13,...
-	8e-09,3e-08,8e-09,5e-09,8e-04,6e-04,2e-09,1e-8,4e-10};
+	1e-08,3e-08,7e-09,2e-07,1e-03,8e-04,2e-09,1e-08,4e-10};
 field_values={...
 	(md.results.TransientSolution(1).Base),...
Index: /issm/trunk/test/NightlyRun/test442.py
===================================================================
--- /issm/trunk/test/NightlyRun/test442.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test442.py	(revision 28013)
@@ -65,6 +65,6 @@
                'Bed3', 'Surface3', 'Thickness3', 'Floatingice3', 'Vx3', 'Vy3', 'Vz3', 'Pressure3', 'FloatingiceMeltingrate3']
 field_tolerances = [2e-11, 5e-12, 2e-11, 1e-11, 5e-10, 3e-08, 6e-10, 1e-13, 1e-13,
-                    3e-11, 3e-11, 9e-10, 7e-11, 7e-09, 1e-07, 1e-09, 1e-10, 1e-13,
-                    1e-8, 2e-08, 7e-09, 2e-7, 1e-03, 8e-04, 2e-09, 1e-10, 1e-13]
+                    5e-09, 5e-09, 5e-09, 5e-09, 9e-05, 9e-05, 9e-05, 5e-09, 1e-13,
+                    1e-08, 3e-08, 7e-09, 2e-07, 1e-03, 8e-04, 2e-09, 1e-08, 4e-10]
 field_values = [md.results.TransientSolution[0].Base,
                 md.results.TransientSolution[0].Surface,
Index: /issm/trunk/test/NightlyRun/test509.m
===================================================================
--- /issm/trunk/test/NightlyRun/test509.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test509.m	(revision 28013)
@@ -12,5 +12,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx','Vy','Vz','Vel','Pressure','Temperature','BasalforcingsGroundediceMeltingRate'};
-field_tolerances={1e-09,1e-09,5e-08,5e-08,1e-09,6e-09,1e-06
+field_tolerances={1e-09,2e-09,5e-08,5e-08,1e-09,7e-09,1e-06
 };
 field_values={...
Index: /issm/trunk/test/NightlyRun/test509.py
===================================================================
--- /issm/trunk/test/NightlyRun/test509.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test509.py	(revision 28013)
@@ -21,5 +21,5 @@
 # Fields and tolerances to track changes
 field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure', 'Temperature', 'BasalforcingsGroundediceMeltingRate']
-field_tolerances = [1e-09, 1e-09, 5e-08, 5e-08, 1e-09, 6e-09, 1e-06]
+field_tolerances = [1e-09, 2e-09, 5e-08, 5e-08, 1e-09, 7e-09, 1e-06]
 field_values = [md.results.SteadystateSolution.Vx,
                 md.results.SteadystateSolution.Vy,
Index: /issm/trunk/test/NightlyRun/test517.m
===================================================================
--- /issm/trunk/test/NightlyRun/test517.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test517.m	(revision 28013)
@@ -31,5 +31,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Gradient','Misfits','MaterialsRheologyB','Pressure','Vel','Vx','Vy'};
-field_tolerances={5e-11,5e-11,5e-11,1e-09,1e-11,5e-11,1e-11};
+field_tolerances={6e-11,5e-11,5e-10,1e-09,2e-11,5e-11,2e-11};
 field_values={...
 	(md.results.StressbalanceSolution.Gradient1),...
Index: /issm/trunk/test/NightlyRun/test517.py
===================================================================
--- /issm/trunk/test/NightlyRun/test517.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test517.py	(revision 28013)
@@ -42,5 +42,5 @@
 #Fields and tolerances to track changes
 field_names = ['Gradient', 'Misfits', 'MaterialsRheologyB', 'Pressure', 'Vel', 'Vx', 'Vy']
-field_tolerances = [5e-11, 5e-11, 5e-11, 1e-09, 1e-11, 5e-11, 1e-11]
+field_tolerances = [6e-11, 5e-11, 5e-10, 1e-09, 2e-11, 5e-11, 2e-11]
 field_values = [md.results.StressbalanceSolution.Gradient1,
                 md.results.StressbalanceSolution.J,
Index: /issm/trunk/test/NightlyRun/test518.m
===================================================================
--- /issm/trunk/test/NightlyRun/test518.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test518.m	(revision 28013)
@@ -11,5 +11,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx','Vy','Vel','Pressure','VxSurface','VySurface','VxShear','VyShear','VxBase','VyBase'};
-field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13};
+field_tolerances={5e-13,6e-13,6e-13,1e-13,5e-13,6e-13,1e-13,1e-13,5e-13,6e-13};
 field_values={...
    (md.results.StressbalanceSolution.Vx),...
Index: /issm/trunk/test/NightlyRun/test518.py
===================================================================
--- /issm/trunk/test/NightlyRun/test518.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test518.py	(revision 28013)
@@ -22,5 +22,5 @@
 #Fields and tolerances to track changes
 field_names = ['Vx', 'Vy', 'Vel', 'Pressure', 'VxSurface', 'VySurface', 'VxShear', 'VyShear', 'VxBase', 'VyBase']
-field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13]
+field_tolerances = [5e-13, 6e-13, 6e-13, 1e-13, 5e-13, 6e-13, 1e-13, 1e-13, 5e-13, 6e-13]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
Index: /issm/trunk/test/NightlyRun/test540.m
===================================================================
--- /issm/trunk/test/NightlyRun/test540.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test540.m	(revision 28013)
@@ -34,7 +34,7 @@
 	};
 field_tolerances={...
-	1e-12,2e-12,2e-12,1e-13,1e-13,1e-13,1e-13,1e-13,...
+	1e-12,2e-12,2e-12,1e-13,1e-13,1e-13,1e-13,2e-12,...
 	1e-12,2e-12,2e-12,1e-13,1e-13,...
-	1e-12,1e-12,1e-12,1e-13,1e-13,1e-13,1e-13,1e-12,...
+	1e-12,2e-12,2e-12,1e-13,2e-13,1e-13,1e-13,2e-12,...
 	1e-12,2e-12,2e-12,1e-13,1e-13,...
 	1e-11,1e-11,1e-11,1e-11,1e-11,1e-11,1e-11,1e-9,...
Index: /issm/trunk/test/NightlyRun/test540.py
===================================================================
--- /issm/trunk/test/NightlyRun/test540.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test540.py	(revision 28013)
@@ -41,7 +41,7 @@
                'Vx10', 'Vy10', 'Vel10', 'Pressure10', 'Bed10', 'Surface10', 'Thickness10', 'MaskIceLevelset10',
                'IceVolume10', 'IceVolumeAboveFloatation10', 'TotalSmb10', 'TotalGroundedBmb10', 'TotalFloatingBmb10']
-field_tolerances = [1e-12, 2e-12, 2e-12, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,
+field_tolerances = [1e-12, 2e-12, 2e-12, 1e-13, 1e-13, 1e-13, 1e-13, 2e-12,
                     1e-12, 2e-12, 2e-12, 1e-13, 1e-13,
-                    1e-12, 1e-12, 1e-12, 1e-13, 1e-13, 1e-13, 1e-13, 1e-12,
+                    1e-12, 2e-12, 2e-12, 1e-13, 2e-13, 1e-13, 1e-13, 2e-12,
                     1e-12, 2e-12, 2e-12, 1e-13, 1e-13,
                     1e-11, 1e-11, 1e-11, 1e-11, 1e-11, 1e-11, 1e-11, 1e-9,
Index: /issm/trunk/test/NightlyRun/test541.m
===================================================================
--- /issm/trunk/test/NightlyRun/test541.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test541.m	(revision 28013)
@@ -34,5 +34,5 @@
 	};
 field_tolerances={...
-	1e-11,2e-11,2e-11,1e-12,2e-11,6e-12,9e-12,1e-12,...
+	1e-11,2e-11,2e-11,1e-12,2e-11,6e-12,9e-12,2e-12,...
 	1e-11,2e-11,2e-11,2e-9,2e-11,...
 	2e-11,1e-11,1e-11,9e-12,2e-11,3e-11,2e-11,1e-11,...
Index: /issm/trunk/test/NightlyRun/test541.py
===================================================================
--- /issm/trunk/test/NightlyRun/test541.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test541.py	(revision 28013)
@@ -41,5 +41,5 @@
                'Vx10', 'Vy10', 'Vel10', 'Pressure10', 'Bed10', 'Surface10', 'Thickness10', 'MaskIceLevelset10',
                'IceVolume10', 'IceVolumeAboveFloatation10', 'TotalSmb10', 'TotalGroundedBmb10', 'TotalFloatingBmb10']
-field_tolerances = [1e-11, 2e-11, 2e-11, 1e-12, 2e-11, 6e-12, 9e-12, 1e-12,
+field_tolerances = [1e-11, 2e-11, 2e-11, 1e-12, 2e-11, 6e-12, 9e-12, 2e-12,
                     1e-11, 2e-11, 2e-11, 2e-9, 2e-11,
                     2e-11, 1e-11, 1e-11, 9e-12, 2e-11, 3e-11, 2e-11, 1e-11,
Index: /issm/trunk/test/NightlyRun/test543.m
===================================================================
--- /issm/trunk/test/NightlyRun/test543.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test543.m	(revision 28013)
@@ -1,3 +1,3 @@
-%Test Name: PigTranStochasticforcing
+%Test Name: PigTranRignotarma
 md=triangle(model(),'../Exp/Pig.exp',10000.);
 md=setmask(md,'../Exp/PigShelves.exp','../Exp/PigIslands.exp');
@@ -21,25 +21,10 @@
     end
 end
-% Basin separation default
-idb_df = zeros(md.mesh.numberofelements,1);
-iid1   = find(md.mesh.x<=-1.62e6);
-for ii=1:md.mesh.numberofelements
-    for vertex=1:3
-        if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in basin 1
-            idb_df(ii) = 1;
-        end
-    end
-    if idb_df(ii)==0 %no vertex was found in basin 1
-        idb_df(ii) = 2;
-    end
-end
 % Dimensionalities
 nb_tf    = 2;
-nb_clv   = 2;
-nb_flmlt = 2;
 
 %Calving parameters
 md.mask.ice_levelset = 1e4*(md.mask.ice_levelset + 0.5);
-md.calving.calvingrate = 0.3*ones(md.mesh.numberofvertices,1);
+md.calving.calvingrate = 0*ones(md.mesh.numberofvertices,1);
 md.levelset.spclevelset = NaN(md.mesh.numberofvertices,1);
 md.levelset.migration_max = 10.0; %avoid fast advance/retreat of the front
@@ -67,4 +52,19 @@
 trendsM          = cat(3,trendsMp0,trendsMp1);
 datebreaksM      = [1;1];
+% Subglacial discharge parameters %
+isdischargearma            = 1;
+sd_ar_order                = 1;
+sd_ma_order                = 1;
+sd_num_breaks              = 1;
+sd_num_params              = 2;
+sd_arma_timestep           = 1;
+sd_arlag_coefs             = [0.95;0.95];
+sd_malag_coefs             = [0;0];
+sd_datebreaks              = [1;1];
+sd_monthlyfrac             = [0,0,0,0,0,0,0.5,0.5,0,0,0,0;
+                              0,0,0,0,0,0,0.5,0.5,0,0,0,0];
+sd_const                   = [50000,70000.0;8000,10000.0];
+sd_trend                   = [0,10000;0,0];
+sd_polyparam               = cat(3,sd_const,sd_trend);
 
 md.frontalforcings.num_basins              = nb_tf;
@@ -84,19 +84,33 @@
 md.frontalforcings.monthlyvals_trends      = trendsM;
 md.frontalforcings.monthlyvals_datebreaks  = datebreaksM;
+md.frontalforcings.isdischargearma         = isdischargearma;
+if(isdischargearma==0)
+	md.frontalforcings.subglacial_discharge    = 0.01*ones(md.mesh.numberofvertices,1);
+else
+    md.frontalforcings.sd_num_breaks         = sd_num_breaks;
+    md.frontalforcings.sd_num_params         = sd_num_params;
+    md.frontalforcings.sd_ar_order           = sd_ar_order;
+    md.frontalforcings.sd_ma_order           = sd_ma_order;
+    md.frontalforcings.sd_arma_timestep      = sd_arma_timestep;
+    md.frontalforcings.sd_arlag_coefs        = sd_arlag_coefs;
+    md.frontalforcings.sd_malag_coefs        = sd_malag_coefs;
+    md.frontalforcings.sd_datebreaks         = sd_datebreaks;
+    md.frontalforcings.sd_monthlyfrac        = sd_monthlyfrac;
+    md.frontalforcings.sd_polynomialparams   = sd_polyparam;
+end
 % Floating Ice Melt parameters
-md.basalforcings.floatingice_melting_rate = 0.1*ones(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate = 0*ones(md.mesh.numberofvertices,1);
+
 
 % Covariance matrix
 covtf       = 1e-4*eye(nb_tf);
-covclv      = 1e-1*eye(nb_clv);
-covclv(1,1) = 1/10*covclv(1,1);
-covflmlt    = 0.05*eye(nb_flmlt);
-covglob     = blkdiag(covtf,covclv,covflmlt);
+covsd       = 1e3*eye(nb_tf);
+covglob     = blkdiag(covtf,covsd);
 
 %Stochastic forcing
 md.stochasticforcing.isstochasticforcing = 1;
-md.stochasticforcing.fields              = [{'FrontalForcingsRignotarma'},{'DefaultCalving'},{'FloatingMeltRate'}];
+md.stochasticforcing.fields              = [{'FrontalForcingsRignotarma'},{'FrontalForcingsSubglacialDischargearma'}];
 md.stochasticforcing.defaultdimension    = 2;
-md.stochasticforcing.default_id          = idb_df;
+md.stochasticforcing.default_id          = idb_tf;
 md.stochasticforcing.covariance          = covglob; %global covariance among- and between-fields
 md.stochasticforcing.randomflag          = 0; %determines true/false randomness
@@ -105,5 +119,5 @@
 md.transient.isgroundingline = 1;
 
-md.transient.requested_outputs = {'default','CalvingCalvingrate','CalvingMeltingrate','BasalforcingsFloatingiceMeltingRate'};
+md.transient.requested_outputs = {'default','CalvingMeltingrate'};
 md.cluster=generic('name',oshostname(),'np',2);
 md=solve(md,'Transient');
@@ -111,12 +125,12 @@
 %Fields and tolerances to track changes
 field_names ={...
-   'Vx1' ,'Vy1' ,'Vel1' ,'Thickness1' ,'MaskIceLevelset1' ,'CalvingCalvingrate1' ,'CalvingMeltingrate1' ,'BasalforcingsFloatingiceMeltingRate1',...
-   'Vx2' ,'Vy2' ,'Vel2' ,'Thickness2' ,'MaskIceLevelset2' ,'CalvingCalvingrate2' ,'CalvingMeltingrate2' ,'BasalforcingsFloatingiceMeltingRate2',...
-   'Vx10','Vy10','Vel10','Thickness10','MaskIceLevelset10','CalvingCalvingrate10','CalvingMeltingrate10','BasalforcingsFloatingiceMeltingRate10',...
+   'Vx1' ,'Vy1' ,'Vel1' ,'Thickness1' ,'MaskIceLevelset1' ,'CalvingMeltingrate1' ,...
+   'Vx2' ,'Vy2' ,'Vel2' ,'Thickness2' ,'MaskIceLevelset2' ,'CalvingMeltingrate2' ,...
+   'Vx10','Vy10','Vel10','Thickness10','MaskIceLevelset10','CalvingMeltingrate10',...
    };
 field_tolerances={...
-   1e-11,2e-11,2e-11,1e-11,1e-9,1e-10,1e-10,1e-10,...
-   2e-11,1e-11,1e-11,9e-11,2e-9,1e-10,1e-10,1e-10,...
-   2e-6,1e-6,1e-6,1e-6,5e-6,1e-6,1e-6,1e-6,...
+   1e-11,2e-11,2e-11,1e-11,1e-9,1e-10,...
+   2e-11,1e-11,1e-11,9e-11,2e-9,1e-10,...
+   2e-6,1e-6,1e-6,1e-6,5e-6,1e-6,...
    };
 field_values={...
@@ -126,7 +140,5 @@
    (md.results.TransientSolution(1).Thickness),...
    (md.results.TransientSolution(1).MaskIceLevelset),...
-   (md.results.TransientSolution(1).CalvingCalvingrate),...
    (md.results.TransientSolution(1).CalvingMeltingrate),...
-   (md.results.TransientSolution(1).BasalforcingsFloatingiceMeltingRate),...
    (md.results.TransientSolution(20).Vx),...
    (md.results.TransientSolution(20).Vy),...
@@ -134,7 +146,5 @@
    (md.results.TransientSolution(20).Thickness),...
    (md.results.TransientSolution(20).MaskIceLevelset),...
-   (md.results.TransientSolution(20).CalvingCalvingrate),...
    (md.results.TransientSolution(20).CalvingMeltingrate),...
-   (md.results.TransientSolution(20).BasalforcingsFloatingiceMeltingRate),...
 	(md.results.TransientSolution(40).Vx),...
 	(md.results.TransientSolution(40).Vy),...
@@ -142,6 +152,4 @@
 	(md.results.TransientSolution(40).Thickness),...
 	(md.results.TransientSolution(40).MaskIceLevelset),...
-	(md.results.TransientSolution(40).CalvingCalvingrate),...
 	(md.results.TransientSolution(40).CalvingMeltingrate),...
-	(md.results.TransientSolution(40).BasalforcingsFloatingiceMeltingRate),...
 	};
Index: /issm/trunk/test/NightlyRun/test543.py
===================================================================
--- /issm/trunk/test/NightlyRun/test543.py	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test543.py	(revision 28013)
@@ -1,3 +1,3 @@
-#Test Name: PigTranStochasticforcing
+#Test Name: PigTranRignotarma 
 import numpy as np
 from frontalforcingsrignotarma import frontalforcingsrignotarma
@@ -29,22 +29,10 @@
         for vertex in range(3):
             idb_tf[ii] = 2
-# Basin separation default
-idb_df = np.zeros((md.mesh.numberofelements,))
-iid1 = np.where(md.mesh.x <= -1.62e6)[0]
-for ii in range(md.mesh.numberofelements):
-    for vertex in range(3):
-        if md.mesh.elements[ii][vertex] - 1 in iid1:  # one vertex in basin 1; NOTE: offset because of 1-based vertex indexing
-            idb_df[ii] = 1
-    if idb_df[ii] == 0:  # no vertex was found in basin 1
-        for vertex in range(3):
-            idb_df[ii] = 2
 #Dimensionalities
 nb_tf = 2
-nb_clv  = 2
-nb_flmlt = 2
 
 # Calving parameters
 md.mask.ice_levelset = 1e4 * (md.mask.ice_levelset + 0.5)
-md.calving.calvingrate = 0.3 * np.ones((md.mesh.numberofvertices,))
+md.calving.calvingrate = 0 * np.ones((md.mesh.numberofvertices,))
 md.levelset.spclevelset = np.full((md.mesh.numberofvertices,), np.nan)
 md.levelset.migration_max = 10.0
@@ -72,4 +60,19 @@
 trendsM          = np.transpose(np.stack((trendsMp0,trendsMp1)),(1,2,0)) 
 datebreaksM      = np.array([[1],[1]]) 
+# Subglacial discharge params #
+isdischargearma            = 1
+sd_ar_order                = 1
+sd_ma_order                = 1
+sd_num_breaks              = 1
+sd_num_params              = 2
+sd_arma_timestep           = 1
+sd_arlag_coefs             = np.array([[0.95],[0.95]])
+sd_malag_coefs             = np.array([[0.0],[0.0]])
+sd_datebreaks              = np.array([[1.0],[1.0]])
+sd_monthlyfrac             = np.array([[0,0,0,0,0,0,0.5,0.5,0,0,0,0],[0,0,0,0,0,0,0.5,0.5,0,0,0,0]])
+sd_const                   = np.array([[50000,70000],[8000,10000.0]])
+sd_trend                   = np.array([[0.0,10000],[0,0]])
+sd_polyparam               = np.transpose(np.stack((sd_const,sd_trend)),(1,2,0))
+
 
 md.frontalforcings.num_basins = nb_tf
@@ -89,12 +92,24 @@
 md.frontalforcings.monthlyvals_trends      = trendsM
 md.frontalforcings.monthlyvals_datebreaks  = datebreaksM
+md.frontalforcings.isdischargearma         = isdischargearma
+if(isdischargearma==0):
+   md.frontalforcings.subglacial_discharge    = 0.01*ones(md.mesh.numberofvertices,1)
+else:
+    md.frontalforcings.sd_num_breaks         = sd_num_breaks
+    md.frontalforcings.sd_num_params         = sd_num_params
+    md.frontalforcings.sd_ar_order           = sd_ar_order
+    md.frontalforcings.sd_ma_order           = sd_ma_order
+    md.frontalforcings.sd_arma_timestep      = sd_arma_timestep
+    md.frontalforcings.sd_arlag_coefs        = sd_arlag_coefs
+    md.frontalforcings.sd_malag_coefs        = sd_malag_coefs
+    md.frontalforcings.sd_datebreaks         = sd_datebreaks
+    md.frontalforcings.sd_monthlyfrac        = sd_monthlyfrac
+    md.frontalforcings.sd_polynomialparams   = sd_polyparam
 #Floating Ice Melt parameters
-md.basalforcings.floatingice_melting_rate = 0.1 * np.ones((md.mesh.numberofvertices,))
+md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices,))
 
 #Covariance matrix
 covtf = 1e-4 * np.identity(nb_tf)
-covclv = 1e-1 * np.identity(nb_clv)
-covclv[0, 0] = 1 / 10 * covclv[0, 0]
-covflmlt = 0.05 * np.identity(nb_flmlt)
+covsd = 1e3 * np.identity(nb_tf)
 #covglob          = np.zeros([6,6])
 #covglob[0:2,0:2] = covtf
@@ -103,10 +118,8 @@
 
 #Hard-coding covariance matrix because python is complaining
-covglob = np.array([[1e-4, 0., 0., 0., 0., 0.],
-                    [0., 1e-4, 0., 0., 0., 0.],
-                    [0., 0., 1e-2, 0., 0., 0.],
-                    [0., 0., 0., 1e-1, 0., 0.],
-                    [0., 0., 0., 0., 0.05, 0.],
-                    [0., 0., 0., 0., 0., 0.05]])
+covglob = np.array([[1e-4, 0., 0., 0.],
+                    [0., 1e-4, 0., 0.],
+                    [0., 0., 1e3, 0.],
+                    [0., 0., 0., 1e3]])
 #testchol = np.linalg.cholesky(covglob)
 #print(testchol)
@@ -114,7 +127,7 @@
 # Stochastic forcing
 md.stochasticforcing.isstochasticforcing = 1
-md.stochasticforcing.fields = ['FrontalForcingsRignotarma', 'DefaultCalving', 'FloatingMeltRate']
+md.stochasticforcing.fields = ['FrontalForcingsRignotarma','FrontalForcingsSubglacialDischargearma']
 md.stochasticforcing.defaultdimension = 2
-md.stochasticforcing.default_id = idb_df
+md.stochasticforcing.default_id = idb_tf
 md.stochasticforcing.covariance = covglob  # global covariance among- and between-fields
 md.stochasticforcing.randomflag = 0  # determines true/false randomness
@@ -129,12 +142,12 @@
 # Fields and tolerances to track changes
 field_names = [
-    'Vx1', 'Vy1', 'Vel1', 'Thickness1', 'MaskIceLevelset1', 'CalvingCalvingrate1', 'CalvingMeltingrate1', 'BasalforcingsFloatingiceMeltingRate1',
-    'Vx2', 'Vy2', 'Vel2', 'Thickness2', 'MaskIceLevelset2', 'CalvingCalvingrate2', 'CalvingMeltingrate2', 'BasalforcingsFloatingiceMeltingRate2',
-    'Vx10', 'Vy10', 'Vel10', 'Thickness10', 'MaskIceLevelset10', 'CalvingCalvingrate10', 'CalvingMeltingrate10', 'BasalforcingsFloatingiceMeltingRate10']
+    'Vx1', 'Vy1', 'Vel1', 'Thickness1', 'MaskIceLevelset1', 'CalvingMeltingrate1',
+    'Vx2', 'Vy2', 'Vel2', 'Thickness2', 'MaskIceLevelset2', 'CalvingMeltingrate2',
+    'Vx10', 'Vy10', 'Vel10', 'Thickness10', 'MaskIceLevelset10', 'CalvingMeltingrate10']
 
 field_tolerances = [
-    1e-11, 2e-11, 2e-11, 1e-11, 1e-9, 1e-10, 1e-10, 1e-10,
-    2e-11, 1e-11, 1e-11, 9e-11, 2e-9, 1e-10, 1e-10, 1e-10,
-    2e-6, 1e-6, 1e-6, 1e-6, 5e-6, 1e-6, 1e-6, 1e-6]
+        1e-11,2e-11,2e-11,1e-11,1e-9,1e-10,
+        2e-11,1e-11,1e-11,9e-11,2e-9,1e-10,
+        2e-6,1e-6,1e-6,1e-6,5e-6,1e-6]
 field_values = [
     md.results.TransientSolution[0].Vx,
@@ -143,7 +156,5 @@
     md.results.TransientSolution[0].Thickness,
     md.results.TransientSolution[0].MaskIceLevelset,
-    md.results.TransientSolution[0].CalvingCalvingrate,
     md.results.TransientSolution[0].CalvingMeltingrate,
-    md.results.TransientSolution[0].BasalforcingsFloatingiceMeltingRate,
     md.results.TransientSolution[19].Vx,
     md.results.TransientSolution[19].Vy,
@@ -151,7 +162,5 @@
     md.results.TransientSolution[19].Thickness,
     md.results.TransientSolution[19].MaskIceLevelset,
-    md.results.TransientSolution[19].CalvingCalvingrate,
     md.results.TransientSolution[19].CalvingMeltingrate,
-    md.results.TransientSolution[19].BasalforcingsFloatingiceMeltingRate,
     md.results.TransientSolution[39].Vx,
     md.results.TransientSolution[39].Vy,
@@ -159,5 +168,3 @@
     md.results.TransientSolution[39].Thickness,
     md.results.TransientSolution[39].MaskIceLevelset,
-    md.results.TransientSolution[39].CalvingCalvingrate,
-    md.results.TransientSolution[39].CalvingMeltingrate,
-    md.results.TransientSolution[39].BasalforcingsFloatingiceMeltingRate]
+    md.results.TransientSolution[39].CalvingMeltingrate]
Index: /issm/trunk/test/NightlyRun/test545.m
===================================================================
--- /issm/trunk/test/NightlyRun/test545.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test545.m	(revision 28013)
@@ -0,0 +1,127 @@
+%Test Name: PigTranStochasticforcingCovariance
+md=triangle(model(),'../Exp/Pig.exp',10000.);
+md=setmask(md,'../Exp/PigShelves.exp','../Exp/PigIslands.exp');
+md=parameterize(md,'../Par/Pig.par');
+md=setflowequation(md,'SSA','all');
+md.timestepping.start_time = 0;
+md.timestepping.time_step  = 1;
+md.timestepping.final_time = 10;
+
+%Basin separation TF
+idb_tf  = zeros(md.mesh.numberofelements,1);
+iid1    = find(md.mesh.x<=-1.6e6);
+for ii=1:md.mesh.numberofelements
+    for vertex=1:3
+        if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in basin 1
+            idb_tf(ii) = 1;
+        end
+    end
+    if idb_tf(ii)==0 %no vertex was found in basin 1
+        idb_tf(ii) = 2;
+    end
+end
+% Basin separation default
+idb_df = zeros(md.mesh.numberofelements,1);
+iid1   = find(md.mesh.x<=-1.62e6);
+for ii=1:md.mesh.numberofelements
+    for vertex=1:3
+        if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in basin 1
+            idb_df(ii) = 1;
+        end
+    end
+    if idb_df(ii)==0 %no vertex was found in basin 1
+        idb_df(ii) = 2;
+    end
+end
+% Dimensionalities
+nb_tf    = 2;
+nb_clv   = 2;
+nb_flmlt = 2;
+
+%Calving parameters
+md.mask.ice_levelset = 1e4*(md.mask.ice_levelset + 0.5);
+md.calving.calvingrate = 0.3*ones(md.mesh.numberofvertices,1);
+md.levelset.spclevelset = NaN(md.mesh.numberofvertices,1);
+md.levelset.migration_max = 10.0; %avoid fast advance/retreat of the front
+%%% Frontal forcing parameters %%%
+md.frontalforcings=frontalforcingsrignotarma();
+md.frontalforcings.num_basins              = nb_tf;
+md.frontalforcings.basin_id                = idb_tf;
+% Polynomial params %
+md.frontalforcings.num_params        = 1; %only a constant term
+md.frontalforcings.num_breaks        = 0; %no breakpoint
+constval                             = [2.5;0.5];
+md.frontalforcings.polynomialparams  = constval;
+% No monthly effects: do nothing %
+% ARMA model parameters %
+md.frontalforcings.ar_order        = 3;
+md.frontalforcings.ma_order        = 2;
+md.frontalforcings.arma_timestep   = 2; %timestep of the ARMA model [yr]
+md.frontalforcings.arlag_coefs     = [[0.1,-0.1,0.01];[0.2,-0.2,0.1]]; %autoregressive parameters
+md.frontalforcings.malag_coefs     = [[0.1,0.0];[0.0,0.1]]; %moving-average parameters
+% No ARMA model of subglacial discharge: simply specify values at vertices %
+md.frontalforcings.subglacial_discharge = 10*ones(md.mesh.numberofvertices,1);
+
+% Floating Ice Melt parameters
+md.basalforcings.floatingice_melting_rate = 0.1*ones(md.mesh.numberofvertices,1);
+
+
+% Covariance matrix
+covtf       = 1e-4*eye(nb_tf);
+covclv      = 1e-1*eye(nb_clv);
+covclv(1,1) = 1/10*covclv(1,1);
+covflmlt    = 0.05*eye(nb_flmlt);
+covglob     = blkdiag(covtf,covclv,covflmlt);
+
+%Stochastic forcing
+md.stochasticforcing.isstochasticforcing = 1;
+md.stochasticforcing.fields              = [{'FrontalForcingsRignotarma'},{'DefaultCalving'},{'FloatingMeltRate'}];
+md.stochasticforcing.defaultdimension    = 2;
+md.stochasticforcing.default_id          = idb_df;
+md.stochasticforcing.covariance          = covglob; %global covariance among- and between-fields
+md.stochasticforcing.randomflag          = 0; %determines true/false randomness
+
+md.transient.ismovingfront   = 1;
+md.transient.isgroundingline = 1;
+
+md.transient.requested_outputs = {'default','CalvingCalvingrate','CalvingMeltingrate','BasalforcingsFloatingiceMeltingRate'};
+md.cluster=generic('name',oshostname(),'np',2);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names ={...
+   'Vx1' ,'Vy1' ,'Vel1' ,'Thickness1' , 'MaskIceLevelset1', 'CalvingCalvingrate1', 'CalvingMeltingrate1', 'BasalforcingsFloatingiceMeltingRate1',...
+   'Vx5' ,'Vy5' ,'Vel5' ,'Thickness5' , 'MaskIceLevelset5', 'CalvingCalvingrate5', 'CalvingMeltingrate5', 'BasalforcingsFloatingiceMeltingRate5',...
+   'Vx10' ,'Vy10' ,'Vel10' ,'Thickness10' , 'MaskIceLevelset10', 'CalvingCalvingrate10', 'CalvingMeltingrate10', 'BasalforcingsFloatingiceMeltingRate10',...
+   };
+field_tolerances={...
+   1e-11,2e-11,2e-11,1e-11,1e-9,1e-10,1e-10,1e-10,...
+   2e-11,1e-11,1e-11,9e-11,2e-9,1e-10,1e-10,1e-10,...
+   2e-6,1e-6,1e-6,1e-6,5e-6,1e-6,1e-6,1e-6,...
+   };
+field_values={...
+   (md.results.TransientSolution(1).Vx),...
+   (md.results.TransientSolution(1).Vy),...
+   (md.results.TransientSolution(1).Vel),...
+   (md.results.TransientSolution(1).Thickness),...
+   (md.results.TransientSolution(1).MaskIceLevelset),...
+   (md.results.TransientSolution(1).CalvingCalvingrate),...
+   (md.results.TransientSolution(1).CalvingMeltingrate),...
+   (md.results.TransientSolution(1).BasalforcingsFloatingiceMeltingRate),...
+   (md.results.TransientSolution(5).Vx),...
+   (md.results.TransientSolution(5).Vy),...
+   (md.results.TransientSolution(5).Vel),...
+   (md.results.TransientSolution(5).Thickness),...
+   (md.results.TransientSolution(5).MaskIceLevelset),...
+   (md.results.TransientSolution(5).CalvingCalvingrate),...
+   (md.results.TransientSolution(5).CalvingMeltingrate),...
+   (md.results.TransientSolution(5).BasalforcingsFloatingiceMeltingRate),...
+	(md.results.TransientSolution(10).Vx),...
+	(md.results.TransientSolution(10).Vy),...
+	(md.results.TransientSolution(10).Vel),...
+	(md.results.TransientSolution(10).Thickness),...
+	(md.results.TransientSolution(10).MaskIceLevelset),...
+	(md.results.TransientSolution(10).CalvingCalvingrate),...
+	(md.results.TransientSolution(10).CalvingMeltingrate),...
+	(md.results.TransientSolution(10).BasalforcingsFloatingiceMeltingRate),...
+	};
Index: /issm/trunk/test/NightlyRun/test545.py
===================================================================
--- /issm/trunk/test/NightlyRun/test545.py	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test545.py	(revision 28013)
@@ -0,0 +1,142 @@
+#Test Name: PigTranStochasticforcingCovariance
+import numpy as np
+from frontalforcingsrignotarma import frontalforcingsrignotarma
+from socket import gethostname
+from model import *
+from parameterize import parameterize
+from setflowequation import setflowequation
+from setmask import setmask
+from solve import solve
+from triangle import triangle
+
+
+md = triangle(model(), '../Exp/Pig.exp', 10000)
+md = setmask(md, '../Exp/PigShelves.exp', '../Exp/PigIslands.exp')
+md = parameterize(md, '../Par/Pig.py')
+md = setflowequation(md, 'SSA', 'all')
+md.timestepping.start_time = 0
+md.timestepping.time_step = 1
+md.timestepping.final_time = 10
+
+# Basin separation TF
+idb_tf = np.zeros((md.mesh.numberofelements,))
+iid1 = np.where(md.mesh.x <= -1.6e6)[0]
+for ii in range(md.mesh.numberofelements):
+    for vertex in range(3):
+        if md.mesh.elements[ii][vertex] - 1 in iid1:  # one vertex in basin 1; NOTE: offset because of 1-based vertex indexing
+            idb_tf[ii] = 1
+    if idb_tf[ii] == 0:  # no vertex was found in basin 1
+        for vertex in range(3):
+            idb_tf[ii] = 2
+# Basin separation default
+idb_df = np.zeros((md.mesh.numberofelements,))
+iid1 = np.where(md.mesh.x <= -1.62e6)[0]
+for ii in range(md.mesh.numberofelements):
+    for vertex in range(3):
+        if md.mesh.elements[ii][vertex] - 1 in iid1:  # one vertex in basin 1; NOTE: offset because of 1-based vertex indexing
+            idb_df[ii] = 1
+    if idb_df[ii] == 0:  # no vertex was found in basin 1
+        for vertex in range(3):
+            idb_df[ii] = 2
+#Dimensionalities
+nb_tf = 2
+nb_clv  = 2
+nb_flmlt = 2
+
+# Calving parameters
+md.mask.ice_levelset = 1e4 * (md.mask.ice_levelset + 0.5)
+md.calving.calvingrate = 0.3 * np.ones((md.mesh.numberofvertices,))
+md.levelset.spclevelset = np.full((md.mesh.numberofvertices,), np.nan)
+md.levelset.migration_max = 10.0
+### Frontal forcing parameters ###
+md.frontalforcings = frontalforcingsrignotarma()
+md.frontalforcings.num_basins = nb_tf
+md.frontalforcings.basin_id = idb_tf
+# Polynomial params #
+md.frontalforcings.num_params       = 1 #only a constant term
+md.frontalforcings.num_breaks       = 0 #no breakpoint
+constval                            = np.array([[2.5],[0.5]])
+md.frontalforcings.polynomialparams = np.copy(constval)
+# No monthly effects: do nothing #
+# ARMA model parameters #
+md.frontalforcings.ar_order = 3
+md.frontalforcings.ma_order = 2
+md.frontalforcings.arma_timestep = 2  # timestep of the ARMA model [yr]
+md.frontalforcings.arlag_coefs = np.array([[0.1, -0.1, 0.01], [0.2, -0.2, 0.1]])  # autoregressive parameters
+md.frontalforcings.malag_coefs = np.array([[0.1, 0.0], [0.0, 0.1]])  # moving-average parameters
+# No ARMA model of subglacial discharge: simply specify values at vertices #
+md.frontalforcings.subglacial_discharge = 10 * np.ones((md.mesh.numberofvertices,))
+
+#Floating Ice Melt parameters
+md.basalforcings.floatingice_melting_rate = 0.1 * np.ones((md.mesh.numberofvertices,))
+
+#Covariance matrix
+covtf = 1e-4 * np.identity(nb_tf)
+covclv = 1e-1 * np.identity(nb_clv)
+covclv[0, 0] = 1 / 10 * covclv[0, 0]
+covflmlt = 0.05 * np.identity(nb_flmlt)
+#covglob          = np.zeros([6,6])
+#covglob[0:2,0:2] = covtf
+#covglob[2:4,2:4] = covclv
+#covglob[4:6,4:6] = covflmlt
+
+#Hard-coding covariance matrix because python is complaining
+covglob = np.array([[1e-4, 0., 0., 0., 0., 0.],
+                    [0., 1e-4, 0., 0., 0., 0.],
+                    [0., 0., 1e-2, 0., 0., 0.],
+                    [0., 0., 0., 1e-1, 0., 0.],
+                    [0., 0., 0., 0., 0.05, 0.],
+                    [0., 0., 0., 0., 0., 0.05]])
+#testchol = np.linalg.cholesky(covglob)
+#print(testchol)
+
+# Stochastic forcing
+md.stochasticforcing.isstochasticforcing = 1
+md.stochasticforcing.fields = ['FrontalForcingsRignotarma', 'DefaultCalving', 'FloatingMeltRate']
+md.stochasticforcing.defaultdimension = 2
+md.stochasticforcing.default_id = idb_df
+md.stochasticforcing.covariance = covglob  # global covariance among- and between-fields
+md.stochasticforcing.randomflag = 0  # determines true/false randomness
+
+md.transient.ismovingfront = 1
+md.transient.isgroundingline = 1
+
+md.transient.requested_outputs = ['default', 'CalvingCalvingrate', 'CalvingMeltingrate', 'BasalforcingsFloatingiceMeltingRate']
+md.cluster = generic('name', gethostname(), 'np', 2)
+md = solve(md, 'Transient')
+
+# Fields and tolerances to track changes
+field_names = [
+    'Vx1', 'Vy1', 'Vel1', 'Thickness1', 'MaskIceLevelset1', 'CalvingCalvingrate1', 'CalvingMeltingrate1', 'BasalforcingsFloatingiceMeltingRate1',
+    'Vx5', 'Vy5', 'Vel5', 'Thickness5', 'MaskIceLevelset5', 'CalvingCalvingrate5', 'CalvingMeltingrate5', 'BasalforcingsFloatingiceMeltingRate5',
+    'Vx10', 'Vy10', 'Vel10', 'Thickness10', 'MaskIceLevelset10', 'CalvingCalvingrate10', 'CalvingMeltingrate10', 'BasalforcingsFloatingiceMeltingRate10']
+
+field_tolerances = [
+    1e-11, 2e-11, 2e-11, 1e-11, 1e-9, 1e-10, 1e-10, 1e-10,
+    2e-11, 1e-11, 1e-11, 9e-11, 2e-9, 1e-10, 1e-10, 1e-10,
+    2e-6, 1e-6, 1e-6, 1e-6, 5e-6, 1e-6, 1e-6, 1e-6]
+field_values = [
+    md.results.TransientSolution[0].Vx,
+    md.results.TransientSolution[0].Vy,
+    md.results.TransientSolution[0].Vel,
+    md.results.TransientSolution[0].Thickness,
+    md.results.TransientSolution[0].MaskIceLevelset,
+    md.results.TransientSolution[0].CalvingCalvingrate,
+    md.results.TransientSolution[0].CalvingMeltingrate,
+    md.results.TransientSolution[0].BasalforcingsFloatingiceMeltingRate,
+    md.results.TransientSolution[4].Vx,
+    md.results.TransientSolution[4].Vy,
+    md.results.TransientSolution[4].Vel,
+    md.results.TransientSolution[4].Thickness,
+    md.results.TransientSolution[4].MaskIceLevelset,
+    md.results.TransientSolution[4].CalvingCalvingrate,
+    md.results.TransientSolution[4].CalvingMeltingrate,
+    md.results.TransientSolution[4].BasalforcingsFloatingiceMeltingRate,
+    md.results.TransientSolution[9].Vx,
+    md.results.TransientSolution[9].Vy,
+    md.results.TransientSolution[9].Vel,
+    md.results.TransientSolution[9].Thickness,
+    md.results.TransientSolution[9].MaskIceLevelset,
+    md.results.TransientSolution[9].CalvingCalvingrate,
+    md.results.TransientSolution[9].CalvingMeltingrate,
+    md.results.TransientSolution[9].BasalforcingsFloatingiceMeltingRate]
Index: /issm/trunk/test/NightlyRun/test546.m
===================================================================
--- /issm/trunk/test/NightlyRun/test546.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test546.m	(revision 28013)
@@ -0,0 +1,144 @@
+%Test Name: PigTranMultcovStochasticforcings 
+md=triangle(model(),'../Exp/Pig.exp',8000.);
+md=setmask(md,'../Exp/PigShelves.exp','../Exp/PigIslands.exp');
+md=parameterize(md,'../Par/Pig.par');
+md=setflowequation(md,'SSA','all');
+md.timestepping.start_time = 0;
+md.timestepping.time_step  = 1;
+md.timestepping.final_time = 10;
+
+%Basin separation
+idb     = zeros(md.mesh.numberofelements,1);
+iid1    = find(md.mesh.x>=-1.6e6);
+for ii=1:md.mesh.numberofelements
+    for vertex=1:3
+        if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in basin 1
+            idb(ii) = 1;
+        end
+    end
+    if idb(ii)==0 %no vertex was found in basin 1
+        idb(ii) = 2;
+    end
+end
+nb_bas = 2;
+
+%SMB
+numparams               = 1;
+numbreaks               = 0;
+intercept               = [0.5;0.01];
+polynomialparams        = intercept;
+datebreaks              = NaN;
+md.smb                  = SMBarma();
+md.smb.num_basins       = nb_bas; %number of basins
+md.smb.basin_id         = idb; %prescribe basin ID number to elements
+md.smb.num_params       = numparams; %number of parameters in the polynomial
+md.smb.num_breaks       = numbreaks; %number of breakpoints
+md.smb.polynomialparams = polynomialparams;
+md.smb.datebreaks       = datebreaks;
+md.smb.ar_order         = 4;
+md.smb.ma_order         = 4;
+md.smb.arma_timestep    = 2.0; %timestep of the ARMA model [yr]
+md.smb.arlag_coefs      = [[0.2,0.1,0.05,0.01];[0.4,0.2,-0.2,0.1]];
+md.smb.malag_coefs      = [[0.1,0.1,0.2,0.3];[0.5,0.8,1.3,2.4]];
+
+%Calving
+md.mask.ice_levelset           = 1e4*(md.mask.ice_levelset + 0.5);
+md.calving.calvingrate         = 0.1*ones(md.mesh.numberofvertices,1);
+md.levelset.spclevelset        = NaN(md.mesh.numberofvertices,1);
+md.levelset.migration_max      = 10.0;
+md.frontalforcings.meltingrate = zeros(md.mesh.numberofvertices,1);
+
+% Basal forcing implementation
+numparams                         = 2;
+numbreaks                         = 1;
+intercept                         = [3.0,4.0;1.0,0.5];
+trendlin                          = [0.0,0.1;0,0];
+polynomialparams                  = cat(3,intercept,trendlin);
+datebreaks                        = [6;7];
+md.basalforcings                  = linearbasalforcingsarma();
+md.basalforcings.num_basins       = nb_bas; %number of basins
+md.basalforcings.basin_id         = idb; %prescribe basin ID number to elements
+md.basalforcings.polynomialparams = polynomialparams;
+md.basalforcings.datebreaks       = datebreaks;
+md.basalforcings.num_params       = numparams; %number of parameters in the polynomial
+md.basalforcings.num_breaks       = numbreaks; %number of breakpoints
+md.basalforcings.ar_order         = 1;
+md.basalforcings.ma_order         = 1;
+md.basalforcings.arma_timestep    = 1.0; %timestep of the ARMA model [yr]
+md.basalforcings.arlag_coefs      = [0.0;0.1];
+md.basalforcings.malag_coefs      = [0.55;0.34];
+md.basalforcings.deepwater_elevation       = [-1000,-1520];
+md.basalforcings.upperwater_elevation      = [0,-50];
+md.basalforcings.upperwater_melting_rate   = [0.0,0.0];
+md.basalforcings.groundedice_melting_rate  = zeros(md.mesh.numberofvertices,1);
+
+% Covariance matrices
+sdvsmb       = [1,1];
+sdvclv       = [0.1,0.01];
+sdvdwm       = [300,300];
+vecsdv       = [sdvsmb,sdvclv,sdvdwm];
+corrmat      = [1,0,0,0,0,0;
+                0,1,0,0,0,0;
+                0,0,1,0.4,0.1,0.1;
+                0,0,0.4,1,0.1,0.1;
+                0,0,0.1,0.1,1,0.3;
+                0,0,0.1,0.1,0.3,1];
+covglob0     = diag(vecsdv)*corrmat*diag(vecsdv);
+covglob1     = 2*covglob0;
+multcov      = cat(3,covglob0,covglob1);
+tmcov        = [0,5];
+
+% Stochastic forcing
+md.stochasticforcing.isstochasticforcing = 1;
+md.stochasticforcing.fields              = [{'SMBarma'},{'DefaultCalving'},{'BasalforcingsDeepwaterMeltingRatearma'}];
+md.stochasticforcing.defaultdimension    = 2;
+md.stochasticforcing.default_id          = idb;
+md.stochasticforcing.covariance          = multcov; %global covariance among- and between-fields
+md.stochasticforcing.timecovariance      = tmcov; %simulation times when covariance matrix switches
+md.stochasticforcing.randomflag          = 0; %determines true/false randomness
+
+md.transient.ismovingfront     = 1;
+md.transient.requested_outputs = {'default','SmbMassBalance','BasalforcingsFloatingiceMeltingRate','BasalforcingsSpatialDeepwaterMeltingRate'};
+md.transient.isstressbalance = 1;
+md.transient.ismasstransport = 1;
+md.transient.issmb           = 1;
+md.transient.isthermal       = 0;
+md.transient.isgroundingline = 1;
+
+md.cluster=generic('name',oshostname(),'np',2);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names ={...
+   'Vx1' ,'Vy1' ,'Vel1' ,'Thickness1', 'SmbMassBalance1', 'BasalforcingsFloatingiceMeltingRate1', 'BasalforcingsSpatialDeepwaterMeltingRate1',...
+   'Vx2' ,'Vy2' ,'Vel2' ,'Thickness2', 'SmbMassBalance2' ,'BasalforcingsFloatingiceMeltingRate2', 'BasalforcingsSpatialDeepwaterMeltingRate2',...
+   'Vx3' ,'Vy3' ,'Vel3' ,'Thickness3', 'SmbMassBalance3' ,'BasalforcingsFloatingiceMeltingRate3', 'BasalforcingsSpatialDeepwaterMeltingRate3',...
+   };
+field_tolerances={...
+   1e-11,1e-11,2e-11,1e-11,1e-10,1e-9,1e-10,...
+   1e-11,1e-11,2e-11,9e-11,1e-10,1e-9,1e-10,...
+   2e-10,2e-10,2e-10,1e-10,1e-10,1e-9,1e-10,...
+   };
+field_values={...
+   (md.results.TransientSolution(1).Vx),...
+   (md.results.TransientSolution(1).Vy),...
+   (md.results.TransientSolution(1).Vel),...
+   (md.results.TransientSolution(1).Thickness),...
+   (md.results.TransientSolution(1).SmbMassBalance),...
+   (md.results.TransientSolution(1).BasalforcingsFloatingiceMeltingRate),...
+   (md.results.TransientSolution(1).BasalforcingsSpatialDeepwaterMeltingRate),...
+   (md.results.TransientSolution(5).Vx),...
+   (md.results.TransientSolution(5).Vy),...
+   (md.results.TransientSolution(5).Vel),...
+   (md.results.TransientSolution(5).Thickness),...
+   (md.results.TransientSolution(5).SmbMassBalance),...
+   (md.results.TransientSolution(5).BasalforcingsFloatingiceMeltingRate),...
+   (md.results.TransientSolution(5).BasalforcingsSpatialDeepwaterMeltingRate),...
+	(md.results.TransientSolution(10).Vx),...
+	(md.results.TransientSolution(10).Vy),...
+	(md.results.TransientSolution(10).Vel),...
+	(md.results.TransientSolution(10).Thickness),...
+   (md.results.TransientSolution(10).SmbMassBalance),...
+	(md.results.TransientSolution(10).BasalforcingsFloatingiceMeltingRate),...
+   (md.results.TransientSolution(10).BasalforcingsSpatialDeepwaterMeltingRate),...
+	};
Index: /issm/trunk/test/NightlyRun/test546.py
===================================================================
--- /issm/trunk/test/NightlyRun/test546.py	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test546.py	(revision 28013)
@@ -0,0 +1,156 @@
+#Test Name: PigTranARMAandStochasticforcings
+import numpy as np
+from linearbasalforcingsarma import linearbasalforcingsarma
+from SMBarma import SMBarma
+from stochasticforcing import stochasticforcing
+from socket import gethostname
+from model import *
+from parameterize import parameterize
+from setflowequation import setflowequation
+from setmask import setmask
+from solve import solve
+from triangle import triangle
+
+
+md = triangle(model(), '../Exp/Pig.exp', 8000)
+md = setmask(md, '../Exp/PigShelves.exp', '../Exp/PigIslands.exp')
+md = parameterize(md, '../Par/Pig.py')
+md = setflowequation(md, 'SSA', 'all')
+md.timestepping.start_time = 0
+md.timestepping.time_step = 1
+md.timestepping.final_time = 10
+
+# Basin separation
+idb = np.zeros((md.mesh.numberofelements,))
+iid1 = np.where(md.mesh.x >= -1.6e6)[0]
+for ii in range(md.mesh.numberofelements):
+    for vertex in range(3):
+        if md.mesh.elements[ii][vertex] - 1 in iid1:  # one vertex in basin 1; NOTE: offset because of 1-based vertex indexing
+            idb[ii] = 1
+    if idb[ii] == 0:  # no vertex was found in basin 1
+        for vertex in range(3):
+            idb[ii] = 2
+nb_bas = 2
+
+#SMB
+numparams  = 1;
+numbreaks  = 0;
+intercept     = np.array([[0.5],[0.01]])
+polynomialparams = np.copy(intercept)
+datebreaks = np.nan
+md.smb = SMBarma()
+md.smb.num_basins = nb_bas  # number of basins
+md.smb.basin_id = idb  # prescribe basin ID number to elements;
+md.smb.num_params       = 1*numparams
+md.smb.num_breaks       = 1*numbreaks
+md.smb.polynomialparams = 1*polynomialparams
+md.smb.datebreaks       = 1*datebreaks
+md.smb.ar_order = 4
+md.smb.ma_order = 4
+md.smb.arma_timestep = 2.0  #timestep of the ARMA model [yr]
+md.smb.arlag_coefs = np.array([[0.2,0.1,0.05,0.01],[0.4,0.2,-0.2,0.1]])
+md.smb.malag_coefs = np.array([[0.1,0.1,0.2,0.3],[0.5,0.8,1.3,2.4]])
+
+#Calving
+md.mask.ice_levelset = 1e4*(md.mask.ice_levelset + 0.5)
+md.calving.calvingrate = 0.1*np.ones((md.mesh.numberofvertices,))
+md.levelset.spclevelset = np.full((md.mesh.numberofvertices,), np.nan)
+md.levelset.migration_max = 10.0
+md.frontalforcings.meltingrate = np.zeros((md.mesh.numberofvertices,))
+
+#Basal forcing implementation
+numparams = 2
+numbreaks = 1
+intercept = np.array([[3.0,4.0],[1.0,0.5]])
+trendlin  = np.array([[0.0,0.1],[0,0]])
+polynomialparams = np.transpose(np.stack((intercept,trendlin)),(1,2,0))
+datebreaks = np.array([[6],[7]])
+
+md.basalforcings = linearbasalforcingsarma()
+md.basalforcings.num_basins = nb_bas
+md.basalforcings.basin_id  = idb
+md.basalforcings.const = np.array([[1.0, 2.50]])  # intercept values of DeepwaterMelt in basins [m/yr]
+md.basalforcings.trend  = np.array([[0.2, 0.01]])  # trend values of DeepwaterMelt in basins [m/yr^2]
+md.basalforcings.arma_initialtime = md.timestepping.start_time  # initial time in the ARMA model parameterization [yr]
+md.basalforcings.ar_order  = 1
+md.basalforcings.ma_order  = 1
+md.basalforcings.polynomialparams = 1*polynomialparams;
+md.basalforcings.datebreaks       = 1*datebreaks;
+md.basalforcings.num_params       = 1*numparams
+md.basalforcings.num_breaks       = 1*numbreaks
+md.basalforcings.arma_timestep  = 1.0  # timestep of the ARMA model [yr]
+md.basalforcings.arlag_coefs  = np.array([[0.0], [0.1]])  # autoregressive parameters
+md.basalforcings.malag_coefs  = np.array([[0.55], [0.34]])  # moving-average parameters
+md.basalforcings.deepwater_elevation = np.array([[-1000, -1520]])
+md.basalforcings.upperwater_elevation = np.array([[0, -50]])
+md.basalforcings.upperwater_melting_rate = np.array([[0,0]])
+md.basalforcings.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices,))
+
+#Covariance matrices
+sdvsmb  = np.array([1,1])
+sdvclv  = np.array([0.1,0.01])
+sdvdwm  = np.array([300,300])
+vecsdv  = np.concatenate((sdvsmb,sdvclv,sdvdwm))
+corrmat = np.array([[1.0, 0., 0., 0., 0., 0.],
+                    [0., 1.0, 0., 0., 0., 0.],
+                    [0., 0., 1.0, 0.4, 0.1, 0.1],
+                    [0., 0., 0.4, 1.0, 0.1, 0.1],
+                    [0., 0., 0.1, 0.1, 1.0, 0.3],
+                    [0., 0., 0.1, 0.1, 0.3, 1.0]])
+covglob0 = np.diag(vecsdv) @ corrmat @ np.diag(vecsdv)
+covglob1 = 2*covglob0
+multcov  = np.stack((covglob0,covglob1),axis=2) 
+tmcov    = np.array([[0,5]])
+
+#Stochastic forcing
+md.stochasticforcing.isstochasticforcing = 1
+md.stochasticforcing.fields = ['SMBarma', 'DefaultCalving', 'BasalforcingsDeepwaterMeltingRatearma']
+md.stochasticforcing.defaultdimension = 2
+md.stochasticforcing.default_id = idb
+md.stochasticforcing.covariance = multcov  # global covariance among- and between-fields
+md.stochasticforcing.timecovariance = tmcov  #simulation times when covariance matrix switches 
+md.stochasticforcing.randomflag = 0  # determines true/false randomness
+
+md.transient.ismovingfront = 1
+md.transient.requested_outputs = ['default', 'SmbMassBalance', 'BasalforcingsFloatingiceMeltingRate', 'BasalforcingsSpatialDeepwaterMeltingRate']
+md.transient.isstressbalance = 1
+md.transient.ismasstransport = 1
+md.transient.issmb = 1
+md.transient.isthermal = 0
+md.transient.isgroundingline = 1
+
+md.cluster = generic('name', gethostname(), 'np', 2)
+md = solve(md, 'Transient')
+
+# Fields and tolerances to track changes
+field_names = [
+    'Vx1', 'Vy1', 'Vel1', 'Thickness1', 'SmbMassBalance1', 'BasalforcingsFloatingiceMeltingRate1', 'BasalforcingsSpatialDeepwaterMeltingRate1',
+    'Vx5', 'Vy5', 'Vel5', 'Thickness5', 'SmbMassBalance5', 'BasalforcingsFloatingiceMeltingRate5', 'BasalforcingsSpatialDeepwaterMeltingRate5',
+    'Vx10', 'Vy10', 'Vel10', 'Thickness10', 'SmbMassBalance10', 'BasalforcingsFloatingiceMeltingRate10', 'BasalforcingsSpatialDeepwaterMeltingRate10']
+
+field_tolerances = [
+    1e-11, 1e-11, 2e-11, 1e-11, 1e10, 1e-9, 1e-10,
+    1e-11, 1e-11, 2e-11, 9e-11, 1e10, 1e-9, 1e-10,
+    2e-10, 2e-10, 2e-10, 1e-10, 1e10, 1e-9, 1e-10]
+field_values = [
+    md.results.TransientSolution[0].Vx,
+    md.results.TransientSolution[0].Vy,
+    md.results.TransientSolution[0].Vel,
+    md.results.TransientSolution[0].Thickness,
+    md.results.TransientSolution[0].SmbMassBalance,
+    md.results.TransientSolution[0].BasalforcingsFloatingiceMeltingRate,
+    md.results.TransientSolution[0].BasalforcingsSpatialDeepwaterMeltingRate,
+    md.results.TransientSolution[4].Vx,
+    md.results.TransientSolution[4].Vy,
+    md.results.TransientSolution[4].Vel,
+    md.results.TransientSolution[4].Thickness,
+    md.results.TransientSolution[4].SmbMassBalance,
+    md.results.TransientSolution[4].BasalforcingsFloatingiceMeltingRate,
+    md.results.TransientSolution[4].BasalforcingsSpatialDeepwaterMeltingRate,
+    md.results.TransientSolution[9].Vx,
+    md.results.TransientSolution[9].Vy,
+    md.results.TransientSolution[9].Vel,
+    md.results.TransientSolution[9].Thickness,
+    md.results.TransientSolution[9].SmbMassBalance,
+    md.results.TransientSolution[9].BasalforcingsFloatingiceMeltingRate,
+    md.results.TransientSolution[9].BasalforcingsSpatialDeepwaterMeltingRate]
Index: /issm/trunk/test/NightlyRun/test622.m
===================================================================
--- /issm/trunk/test/NightlyRun/test622.m	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test622.m	(revision 28013)
@@ -0,0 +1,111 @@
+%Test Name:79NorthHydrologyArmapw 
+md=triangle(model(),'../Exp/79North.exp',6000.);
+md=setmask(md,'../Exp/79NorthShelf.exp','');
+md=parameterize(md,'../Par/79North.par');
+md=setflowequation(md,'SSA','all');
+
+%Default friction
+md.friction             = friction();
+md.friction.coefficient = 30*ones(md.mesh.numberofvertices,1);
+md.friction.p           = 1*ones(md.mesh.numberofelements,1);
+md.friction.q           = 1*ones(md.mesh.numberofelements,1);
+
+% Basin separation default
+idb_df = zeros(md.mesh.numberofelements,1);
+iid1   = find(md.mesh.y<=-1.08e6);
+for ii=1:md.mesh.numberofelements
+    for vertex=1:3
+        if any(iid1==md.mesh.elements(ii,vertex)) %one vertex in basin 1
+            idb_df(ii) = 1;
+        end
+    end
+    if idb_df(ii)==0 %no vertex was found in basin 1
+        idb_df(ii) = 2;
+    end
+end
+%%% Covariance matrix %%%
+covPw  = [1e9,0;0,1e9];
+covSMB = [0.1,0;0,0.1];
+covGlob = blkdiag(covPw,covSMB);
+
+%%% Hydrology scheme %%%
+md.hydrology             = hydrologyarmapw();
+md.hydrology.num_basins            = 2;
+md.hydrology.basin_id              = idb_df;
+md.hydrology.monthlyfactors        = 1*ones(md.hydrology.num_basins,12);
+md.hydrology.monthlyfactors(:,1:3) = 0;
+md.hydrology.num_params            = 2; %number of parameters in the polynomial
+md.hydrology.num_breaks            = 2; %number of breakpoints
+termconst                          = [0.5*1e6,0.1*1e6,0.5e6;
+                                      0.5*1e6,0.1*1e6,0.5e6];
+termtrend                          = [1*1e5,0,0.;
+                                      0,1*1e5,0];
+md.hydrology.polynomialparams      = cat(3,termconst,termtrend);
+md.hydrology.datebreaks            = [20,40;20,40];
+md.hydrology.arma_timestep         = 1;
+md.hydrology.ar_order              = 1;
+md.hydrology.ma_order              = 1;
+md.hydrology.arlag_coefs           = [0.98;0.98];
+md.hydrology.malag_coefs           = [0;0];
+
+% SMB
+md.smb = SMBarma();
+md.smb.num_basins            = 2;
+md.smb.basin_id              = idb_df;
+md.smb.num_breaks            = 0;
+md.smb.num_params            = 1;
+md.smb.polynomialparams      = 0*[0.5;0.2];
+md.smb.ar_order              = 1;
+md.smb.ma_order              = 1;
+md.smb.arlag_coefs           = [0;0];
+md.smb.malag_coefs           = [0;0];
+md.smb.arma_timestep         = 1.0;
+
+%%% Stochastic forcing %%%
+md.stochasticforcing.isstochasticforcing = 1;
+md.stochasticforcing.fields              = [{'FrictionWaterPressure'},{'SMBarma'}];
+md.stochasticforcing.defaultdimension    = 2;
+md.stochasticforcing.default_id          = idb_df;
+md.stochasticforcing.covariance          = covGlob; %global covariance
+md.stochasticforcing.stochastictimestep  = 1; %time step of stochastic forcing
+md.stochasticforcing.randomflag          = 0; %determines true/false randomness
+
+md.transient.issmb              = 1;
+md.transient.ismasstransport    = 1;
+md.transient.isstressbalance    = 1;
+md.transient.isthermal          = 0;
+md.transient.isgroundingline    = 0;
+md.transient.ishydrology        = 1;
+
+md.transient.requested_outputs = {'default','SmbMassBalance','FrictionWaterPressure'};
+md.timestepping.start_time = 0;
+md.timestepping.time_step  = 1.0/12;
+md.timestepping.final_time = 2;
+md.cluster=generic('name',oshostname(),'np',3);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names      = {'Vel1','Thickness1','SmbMassBalance1','FrictionWaterPressure1',...
+                    'Vel12','Thickness12','SmbMassBalance12','FrictionWaterPressure12',...
+                    'Vel24','Thickness24','SmbMassBalance24','FrictionWaterPressure24'};
+field_tolerances={2e-10,2e-10,2e-10,2e-10,...
+                  4e-10,4e-10,4e-10,4e-10,...
+                  8e-10,8e-10,8e-10,8e-10};
+              
+field_values={...
+    (md.results.TransientSolution(1).Vel),...
+    (md.results.TransientSolution(1).Thickness),...
+    (md.results.TransientSolution(1).SmbMassBalance),...
+    (md.results.TransientSolution(1).FrictionWaterPressure),...
+    (md.results.TransientSolution(12).Vel),...
+    (md.results.TransientSolution(12).Thickness),...
+    (md.results.TransientSolution(12).SmbMassBalance),...
+    (md.results.TransientSolution(12).FrictionWaterPressure),...
+    (md.results.TransientSolution(24).Vel),...
+    (md.results.TransientSolution(24).Thickness),...
+    (md.results.TransientSolution(24).SmbMassBalance),...
+    (md.results.TransientSolution(24).FrictionWaterPressure),...
+    };
+
+
+
Index: /issm/trunk/test/NightlyRun/test622.py
===================================================================
--- /issm/trunk/test/NightlyRun/test622.py	(revision 28013)
+++ /issm/trunk/test/NightlyRun/test622.py	(revision 28013)
@@ -0,0 +1,112 @@
+#Test Name:79NorthHydrologyArmapw
+import numpy as np
+
+from socket import gethostname
+from model import *
+from parameterize import *
+from setflowequation import *
+from setmask import *
+from solve import *
+from triangle import *
+from SMBarma import SMBarma
+from hydrologyarmapw import hydrologyarmapw
+
+md = triangle(model(), '../Exp/79North.exp', 6000)
+md = setmask(md, '../Exp/79NorthShelf.exp', '')
+md = parameterize(md, '../Par/79North.py')
+md = setflowequation(md, 'SSA', 'all')
+
+# Default friction
+md.friction = friction()
+md.friction.coefficient = 30 * np.ones(md.mesh.numberofvertices)
+md.friction.p = np.ones((md.mesh.numberofelements))
+md.friction.q = np.ones((md.mesh.numberofelements))
+
+# Basin separation default
+idb_df = np.zeros((md.mesh.numberofelements))
+iid1 = np.where(md.mesh.y <= -1.08e6)[0]
+for ii in range(md.mesh.numberofelements):
+    for vertex in range(3):
+        if md.mesh.elements[ii][vertex] - 1 in iid1:  # one vertex in basin 1; NOTE: offset because of 1-based vertex indexing
+            idb_df[ii] = 1
+    if idb_df[ii] == 0: # no vertex was found in basin 1
+        for vertex in range(3):
+            idb_df[ii] = 2
+# Covariance matrix
+covGlob = np.array([[1e9,0,0,0],[0,1e9,0,0],[0,0,0.1,0],[0,0,0,0.1]])
+
+# Hydrology scheme
+md.hydrology                       = hydrologyarmapw()
+md.hydrology.num_basins            = 2
+md.hydrology.basin_id              = np.copy(idb_df).astype(int)
+md.hydrology.monthlyfactors        = 1 * np.ones((md.hydrology.num_basins,12));
+md.hydrology.monthlyfactors[:,0:3] = 0
+md.hydrology.num_params            = 2 # number of parameters in the polynomial
+md.hydrology.num_breaks            = 2 # number of breakpoints
+termconst                          = np.array([[0.5 * 1e6, 0.1 * 1e6, 0.5e6],[0.5 * 1e6, 0.1 * 1e6, 0.5e6]])
+termtrend                          = np.array([[1 * 1e5, 0, 0],[0, 1 * 1e5, 0]])
+md.hydrology.polynomialparams      = np.transpose(np.stack((termconst, termtrend)), (1, 2, 0))
+md.hydrology.datebreaks            = np.array([[20, 40], [20, 40]])
+md.hydrology.arma_timestep         = 1
+md.hydrology.ar_order              = 1
+md.hydrology.ma_order              = 1
+md.hydrology.arlag_coefs           = np.array([[0.98], [0.98]])
+md.hydrology.malag_coefs           = np.array([[0], [0]])
+
+# SMB
+md.smb                       = SMBarma()
+md.smb.num_basins            = 2
+md.smb.basin_id              = np.copy(idb_df)
+md.smb.num_breaks            = 0
+md.smb.num_params            = 1
+md.smb.polynomialparams      = 0 * np.array([[0.5], [0.2]])
+md.smb.ar_order              = 1
+md.smb.ma_order              = 1
+md.smb.arlag_coefs           = np.array([[0], [0]])
+md.smb.malag_coefs           = np.array([[0], [0]])
+md.smb.arma_timestep         = 1.0
+
+# Stochastic forcing
+md.stochasticforcing.isstochasticforcing = 1
+md.stochasticforcing.fields = ['FrictionWaterPressure','SMBarma']
+md.stochasticforcing.defaultdimension = 2
+md.stochasticforcing.default_id = idb_df
+md.stochasticforcing.covariance = covGlob # global covariance
+md.stochasticforcing.stochastictimestep  = 1; # time step of stochastic forcing
+md.stochasticforcing.randomflag = 0  # determines true/false randomness
+
+md.transient.issmb = 1
+md.transient.ismasstransport = 1
+md.transient.isstressbalance = 1
+md.transient.isthermal = 0
+md.transient.isgroundingline = 0
+md.transient.ishydrology = 1
+
+md.transient.requested_outputs = ['default','SmbMassBalance', 'FrictionWaterPressure']
+md.timestepping.start_time = 0
+md.timestepping.time_step = 1.0/12
+md.timestepping.final_time = 2
+md.cluster = generic('name', gethostname(), 'np', 3)
+md = solve(md, 'Transient')
+
+# Fields and tolerances to track changes
+field_names = ['Vel1',  'Thickness1', 'SmbMassBalance1',  'FrictionWaterPressure1',
+               'Vel12', 'Thickness12','SmbMassBalance12', 'FrictionWaterPressure12',
+               'Vel24', 'Thickness24','SmbMassBalance24', 'FrictionWaterPressure24']
+
+field_tolerances = [2e-10, 2e-10, 2e-10, 2e-10,
+                    4e-10, 4e-10, 4e-10, 4e-10,
+                    8e-10, 8e-10, 8e-10, 8e-10]
+
+field_values = [md.results.TransientSolution[0].Vel,
+                md.results.TransientSolution[0].Thickness,
+                md.results.TransientSolution[0].SmbMassBalance,
+                md.results.TransientSolution[0].FrictionWaterPressure,
+                md.results.TransientSolution[11].Vel,
+                md.results.TransientSolution[11].Thickness,
+                md.results.TransientSolution[11].SmbMassBalance,
+                md.results.TransientSolution[11].FrictionWaterPressure,
+                md.results.TransientSolution[23].Vel,
+                md.results.TransientSolution[23].Thickness,
+                md.results.TransientSolution[23].SmbMassBalance,
+                md.results.TransientSolution[23].FrictionWaterPressure]
Index: /issm/trunk/test/NightlyRun/test906.m
===================================================================
--- /issm/trunk/test/NightlyRun/test906.m	(revision 28012)
+++ /issm/trunk/test/NightlyRun/test906.m	(revision 28013)
@@ -85,8 +85,2 @@
 	      md.results.TransientSolution(3).EplHead,...
 	      md.results.TransientSolution(3).SedimentHeadResidual};
-
-for i=1:3
-	disp(md.results.TransientSolution(i).EplHead(1));
-	disp(md.results.TransientSolution(i).HydrologySubTime);
-	disp(md.results.TransientSolution(i).HydrologySubsteps);
-end
