Index: /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.m
===================================================================
--- /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 26856)
+++ /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 26857)
@@ -1,5 +1,5 @@
 function export_netCDF(md,filename)
 %verbosity of the code, 0 is no messages, 5 is chatty
-	verbose = 5;
+	verbose = 0;
 	if exist(filename),
 		delete(filename)
@@ -165,10 +165,12 @@
 
 			elseif isa(Var,'struct')  % structures need special treatment
+
 				if strcmp(groups{i}, 'results'),
 					klasstring='results.results';
 					netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					Listsize= length(md.(groups{i}).(fields{j}));
 					subgroupname=fields{j};
 					subgroupID=netcdf.defGrp(groupID,subgroupname);
-					klasstring = 'results.solutionstep';
+					klasstring='results.solutionstep';
 					netcdf.putAtt(subgroupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
 					subfields=fieldnames(md.(groups{i}).(fields{j}));
@@ -178,11 +180,21 @@
 					end
 					for k=1:length(subfields),
-						if verbose > 4,
-							disp(sprintf("=@@=creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k}));
-						end
-						Var = md.(groups{i}).(fields{j}).(subfields{k});
-						[DimSize,DimValue,varid]=CreateVar(ncid,Var,subgroupID,subfields{k},DimSize,DimValue);
-						if ~isempty(varid),
-							FillVar(Var,subgroupID,varid);
+						if ~ismember(subfields{k}, {'errlog', 'outlog'})
+							StackedVar=restable();
+							for l=1:Listsize
+								Var = md.(groups{i}).(fields{j})(l).(subfields{k});
+								lastindex=l;
+								StackedVar=StackedVar.update(Var);
+							end
+							if verbose > 4,
+								disp(sprintf("=@@=creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k}));
+							end
+							StackedVar=StackedVar.finalize(lastindex);
+							%StackedVar=StackedVar';  %transposing to get time as first dimension
+							[DimSize,DimValue,varid]=CreateVar(ncid,StackedVar,subgroupID,subfields{k},DimSize,DimValue);
+							if ~isempty(varid),
+								FillVar(StackedVar,subgroupID,varid);
+							end
+
 						end
 					end
@@ -323,4 +335,7 @@
 			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
 			varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
 		end
 
@@ -329,12 +344,33 @@
 			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
 			varid = netcdf.defVar(groupID,field,'NC_INT64',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
 		else
 			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
 			varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
 		end
 	elseif isa(Var,'cell'),
-		[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
-		varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
-
+		% cells can be a range of things, what are we dealing with here
+		if isempty(Var),
+			netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,'emptycell');
+			varid=[];
+		else
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			if isa(Var{1}, 'double'),
+				varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims);
+				if numel(Var)>1
+					netcdf.defVarDeflate(groupID,varid,true,true,4);
+				end
+			else
+				varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+				if numel(Var)>1
+					netcdf.defVarDeflate(groupID,varid,true,true,4);
+				end
+			end
+		end
 	elseif isa(Var,'struct'),
 		if isempty(fieldnames(Var)),
@@ -345,4 +381,7 @@
 			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
 			varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
 		end
 	else
@@ -370,25 +409,30 @@
 	elseif isa(Var,'char'),  % at this point this should be a character array
 		netcdf.putVar(groupID,varid,Var);
-	elseif isa(Var,'cell'),
-		if ~isempty(Var),
-			if length(Var)==0,
-				netcdf.putVar(groupID,varid,0,9,'emptycell');
+	elseif isa(Var,'cell'),  % there can be a number of things in a cell array
+		for i=1:length(Var),
+			if isa(Var{i},'char')  %for characters we limit the size to 40 for now
+				if length(Var)>1,
+					count=[min(length(Var{i}),40), 1];
+					startpoint=[0 i-1];
+				else
+					count=min(length(Var{i}),40);
+					startpoint=0;
+				end
+
+				if length(Var{i})>40,
+					netcdf.putVar(groupID,varid,startpoint,count,Var{i}(1:40));
+					disp(sprintf('some variable have been truncated'));
+				else
+					netcdf.putVar(groupID,varid,startpoint,count,Var{i});
+				end
+			elseif isa(Var{i},'double')
+				startpoint=[i-1];
+				count=[1 length(Var{i}) ndims(Var{i})]
+				for j=1:ndims(Var{i}),
+					startpoint=[startpoint 0]
+				end
+				netcdf.putVar(groupID,varid,startpoint,count,Var{i});
 			else
-				for i=1:length(Var),
-					if length(Var)>1,
-						count=[min(length(Var{i}),40), 1];
-						startpoint=[0 i-1];
-					else
-						count=min(length(Var{i}),40);
-						startpoint=0;
-					end
-
-					if length(Var{i})>40,
-						netcdf.putVar(groupID,varid,startpoint,count,Var{i}(1:40));
-						disp(sprintf('some variable have been truncated'));
-					else
-						netcdf.putVar(groupID,varid,startpoint,count,Var{i});
-					end
-				end
+				disp(sprintf("WARNING: cell of class %s is not supported.",class(Var{i})))
 			end
 		end
@@ -433,18 +477,26 @@
 function [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue)
 	dims=[];
-	% if isa(Var,'cell'),
-	% 	varsize=size(Var');
-	% else
-	if isa(Var,'struct')
+	celldims=[];
+	dim=ndims(Var);
+	if isa(Var,'struct'),
 		varsize=length(fieldnames(Var));
 	else
 		varsize=size(Var);
-	end
-
-	% dim=sum(varsize>1);
-	dim=ndims(Var);
+		if isa(Var, 'cell')
+			%we add the dimension of the cells themselves,
+			%that will most probably fail if cells have different sizes
+			for i=1:dim,
+				newdim=size(Var{i});
+				if ~ismember(newdim, celldims),
+					celldims=[celldims newdim];
+				end
+			end
+		end
+	end
+	varsize=[varsize celldims];
+	alldim=length(varsize);
 	if dim>0,
-		for i=1:dim,
-			if size(Var, i) >1
+		for i=1:alldim,
+			if size(Var, i)>1 || i>dim,  %we skip dimensions with zero lenght but want to add dimensions from cells
 				indsize=find(varsize(i)==DimValue);
 				if length(indsize)>0
@@ -460,12 +512,8 @@
 		end
 	end
-	%if we have an empty  cell variable we need to add a stringlength for the no data
-	% if isa(Var,'cell'),
-	% 	if isempty(dims)
-	% 		dims=[DimSize(4).index];
-	% 	else
-	% 		dims=[DimSize(4).index dims];
-	% 	end
-	% end
+	if isa(Var, 'cell') && isa(Var{1}, 'char'),
+		%if we have an cell variable with strings we need to add a stringlength
+		dims=[dims DimSize(4).index];
+	end
 	% struct also need an extra dimension 2, but only if non empty
 	if isa(Var,'struct'),
Index: /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py
===================================================================
--- /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py	(revision 26856)
+++ /issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py	(revision 26857)
@@ -13,5 +13,5 @@
 
     def update(self, stepvar):
-        #if we have a scalar we just add it to the en
+        #if we have a scalar we just add it to the end
         #we save the size of the current step for further treatment
         if len(np.shape(stepvar)) == 0:
@@ -21,35 +21,38 @@
         #we save the size of the current step for further treatment
         else:
-            self.sizes.append(len(stepvar))
-            for r in stepvar:
-                self.data.append(r)
+            self.sizes.append([np.shape(stepvar)])
+            stackdat = np.squeeze(stepvar.flatten())
+            self.data = np.hstack((self.data, stackdat))
 
     def finalize(self, rows):
         #we have more scalars than steps, so we have an array
         if len(self.data) > rows:
+            datasize = np.squeeze(self.sizes)
+            maxsize = []
+
+            try:
+                dims = np.arange(np.shape(datasize)[1])
+                for dim in dims:
+                    maxsize.append(np.nanmax(datasize[:, dim]))
+            except IndexError:
+                maxsize.append(np.nanmax(datasize[:]))
+
+            findim = np.insert(maxsize, 0, rows)
+
             #first check if all steps are the same size
-            SameSize = np.sum(np.asarray(self.sizes) - self.sizes[0]) == 0
+            SameSize = np.sum(np.abs(datasize - datasize[0])) == 0
             if SameSize:
                 #same size for all steps, just reshape
-                return np.reshape(self.data, newshape=(rows, int(len(self.data) / rows)))
+                return np.reshape(self.data, newshape=(findim))
             else:
                 #different sizes at each steps, first create a table big enough for the biggest step
                 startpoint = 0
-                datadim = len(np.shape(self.data))
-                if datadim == 1:
-                    outdat = np.nan * np.ones((rows, np.nanmax(self.sizes)))
-                    for step in range(rows):
-                        curlen = self.sizes[step]
-                        outdat[step, :curlen] = self.data[startpoint: startpoint + curlen]
-                        startpoint += curlen
-                elif datadim == 2:
-                    outdat = np.nan * np.ones((rows, np.nanmax(self.sizes), np.shape(self.data)[1]))
-                    for step in range(rows):
-                        curlen = self.sizes[step]
-                        outdat[step, :curlen, :] = self.data[startpoint: startpoint + curlen]
-                        startpoint += curlen
-
-                else:
-                    print("ERROR, reult treatment cant cope with dimensions above 2")
+                outdat = np.nan * np.ones(findim)
+                for step in range(rows):
+                    slicer = [slice(0, d) for d in datasize[step, :]]
+                    slicer = np.insert(slicer, 0, step)
+                    curlen = int(np.prod(datasize[step, :]))
+                    outdat[tuple(slicer)] = np.reshape(self.data[startpoint:startpoint + curlen], newshape=(datasize[step, :]))
+                    startpoint += curlen
                 return outdat
         #as much scalars as stpes (or less) so just one value per step
@@ -60,5 +63,5 @@
 def export_netCDF(md, filename):  # {{{
     #verbosity of the code, 0 is no messages, 5 is chatty
-    verbose = 5
+    verbose = 0
     if path.exists(filename):
         print('File {} allready exist'.format(filename))
@@ -308,4 +311,5 @@
         val_type = type(var)
 
+    print(val_type)
     # grab dimension
     if val_type in [collections.OrderedDict, dict]:
Index: /issm/trunk-jpl/src/m/io/loadmodel.py
===================================================================
--- /issm/trunk-jpl/src/m/io/loadmodel.py	(revision 26856)
+++ /issm/trunk-jpl/src/m/io/loadmodel.py	(revision 26857)
@@ -9,5 +9,5 @@
 
 
-def loadmodel(path, onlylast=False):
+def loadmodel(path, singletime=None):
     """LOADMODEL - load a model
 
@@ -31,5 +31,5 @@
     #       try:
     #recover model on file and name it md
-    struc = loadvars(path, onlylast=onlylast)
+    struc = loadvars(path, singletime=singletime)
     name = [key for key in list(struc.keys())]
     if len(name) > 1:
Index: /issm/trunk-jpl/src/m/io/loadvars.py
===================================================================
--- /issm/trunk-jpl/src/m/io/loadvars.py	(revision 26856)
+++ /issm/trunk-jpl/src/m/io/loadvars.py	(revision 26857)
@@ -9,4 +9,5 @@
 from netCDF4 import Dataset, chartostring
 import numpy as np
+from importlib import import_module
 from model import *
 
@@ -30,5 +31,5 @@
     filename = ''
     nvdict = {}
-    debug = False  #print messages if true
+    debug = False  # print messages if true
 
     if len(args) >= 1 and isinstance(args[0], str):
@@ -52,9 +53,9 @@
         raise TypeError("Unrecognized input arguments.")
 
-    onlylast = False
+    timeindex = False
 
     for key, value in kwargs.items():
-        if key == 'onlylast':
-            onlylast = value
+        if key == 'singletime':
+            timeindex = value
 
     if whichdb(filename):   #We used python pickle for the save
@@ -107,5 +108,5 @@
                     else:
                         #Time dimension is in all the variables so we take that as stepnumber for the results
-                        if onlylast:   #we load only the last result to save on time and memory
+                        if timeindex:   #we load only the last result to save on time and memory
                             nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(classtype[mod][1], listtype)()]
                             Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]]
@@ -127,5 +128,5 @@
                         Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]]
                     else:
-                        if onlylast:   #we load only the last result to save on time and memory
+                        if timeindex:   #we load only the last result to save on time and memory
                             nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(classtype[mod][1], listtype)()]
                             Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]]
@@ -155,4 +156,9 @@
                         print("WARNING: md.{}.{} is not initialized, hopefully that was done in the main group:".format(classtree[mod][0], classtree[mod][1]))
                     Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]]
+            elif classtype[mod][0] == 'SMBgemb.SMBgemb':
+                curclass = NCFile.groups[classtree[mod][0]]
+                modulename = split(r'\.', classtype[mod][0])[0]
+                nvdict['md'].__dict__[mod] = getattr(classtype[mod][1], modulename)(nvdict['md'].__dict__['mesh'], nvdict['md'].__dict__['geometry'])
+                Tree = nvdict['md'].__dict__[classtree[mod][0]]
             else:
                 curclass = NCFile.groups[classtree[mod][0]]
@@ -166,6 +172,9 @@
             #for i in range(0, max(1, len(curclass.groups))):
             if len(curclass.groups) > 0:  #that is presumably only for old style NC where each result step had its own group
-                if onlylast:
-                    groupclass = [curclass.groups[keylist[len(curclass.groups) - 1]]]
+                if timeindex:
+                    if timeindex < 0:
+                        groupclass = [curclass.groups[keylist[len(curclass.groups) - timeindex]]]
+                    else:
+                        groupclass = [curclass.groups[keylist[timeindex]]]
                 else:
                     groupclass = [curclass.groups[key] for key in keylist]
@@ -184,12 +193,17 @@
                         NewFormat = 'Time' in NCFile.dimensions
                         if type(Tree) == list:  # and NewFormat:
-                            if onlylast:
+                            if timeindex:
                                 if NewFormat:
                                     if vardim == 1:
-                                        Tree[0].__dict__[str(var)] = varval[-1].data
+                                        try:
+                                            Tree[0].__dict__[str(var)] = varval[timeindex].data
+                                        except IndexError:
+                                            print('WARNING: No data on index {} for {} reverting to last time.'.format(timeindex, str(var)))
+                                            Tree[0].__dict__[str(var)] = varval[-1].data
+
                                     elif vardim == 2:
-                                        Tree[0].__dict__[str(var)] = varval[-1, :].data
+                                        Tree[0].__dict__[str(var)] = varval[timeindex, :].data
                                     elif vardim == 3:
-                                        Tree[0].__dict__[str(var)] = varval[-1, :, :].data
+                                        Tree[0].__dict__[str(var)] = varval[timeindex, :, :].data
                                     else:
                                         print('table dimension greater than 3 not implemented yet')
@@ -198,5 +212,6 @@
                             else:
                                 if NewFormat:
-                                    incomplete = 'Time' not in varval.dimensions and NewFormat
+                                    print(varval.dimensions)
+                                    incomplete = 'Time' not in varval.dimensions
                                     if incomplete:
                                         chosendim = varval.dimensions[0]
@@ -224,5 +239,5 @@
                         else:
                             if vardim == 0:  #that is a scalar
-                                if str(varval[0]) == '' or str(varval[0]) == '--':  #no value
+                                if str(varval[0]) in['', '--', 'emptycell']:  #no value
                                     Tree.__dict__[str(var)] = []
                                 elif varval[0] == 'True':  #treatin bool
@@ -294,7 +309,4 @@
                         print("      ==> treating attribute {}".format(attr))
                     if attr != 'classtype':  #classtype is for treatment, don't get it back
-                        # attribute = str(attr).swapcase()  #there is a reason for swapcase, no sure what it isanymore
-                        # if attr == 'VARNAME':
-                        #     attribute = 'name'
                         if attr == 'varname':
                             attribute = 'name'
@@ -309,9 +321,12 @@
                                 Tree[0].__dict__[attribute] = str(listclass.getncattr(attr))
                         else:
-                            Tree.__dict__[attribute] = str(listclass.getncattr(attr))
                             if listclass.getncattr(attr) == 'True':
                                 Tree.__dict__[attribute] = True
                             elif listclass.getncattr(attr) == 'False':
                                 Tree.__dict__[attribute] = False
+                            elif listclass.getncattr(attr) == 'emptycell':
+                                Tree.__dict__[attribute] = []
+                            else:
+                                Tree.__dict__[attribute] = str(listclass.getncattr(attr))
                 # }}}
             # }}}
@@ -344,10 +359,10 @@
                     try:
                         modulename = split(r'\.', class_dict[classe][0])[0]
-                        class_dict[classe].append(__import__(modulename))
+                        class_dict[classe].append(import_module(modulename))
                     except ModuleNotFoundError:
                         #submodule probably has a different name
                         modulename = str(getattr(NCData.groups[group].groups[subgroup], 'classtype'))
                         print("WARNING importing {} rather than {}".format(modulename, class_dict[classe][0]))
-                        class_dict[classe].append(__import__(modulename))
+                        class_dict[classe].append(import_module(modulename))
                 class_tree[classe] = [group, subgroup]
         else:
@@ -360,5 +375,5 @@
                         print("WARNING: module {} does not exist anymore and is skipped".format(modulename))
                     else:
-                        class_dict[classe].append(__import__(modulename))
+                        class_dict[classe].append(import_module(modulename))
                         class_tree[classe] = [group, ]
             except AttributeError:
Index: /issm/trunk-jpl/test/NightlyRun/runme.m
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/runme.m	(revision 26856)
+++ /issm/trunk-jpl/test/NightlyRun/runme.m	(revision 26857)
@@ -2,5 +2,5 @@
 %RUNME - test deck for ISSM nightly runs
 %
-%   In a test deck directory (for example, test/NightlyRun) the following 
+%   In a test deck directory (for example, test/NightlyRun) the following
 %   command will launch all existing tests,
 %
@@ -29,4 +29,5 @@
 %                      'update':   update the archive
 %                      'valgrind': check for memory leaks (default value of md.debug.valgrind needs to be changed manually)
+%                      'ncExport': export netCDF file
 %      'stoponerror'   1 or 0
 %
@@ -69,5 +70,5 @@
 %GET procedure {{{
 procedure=getfieldvalue(options,'procedure','check');
-if ~ismember(procedure,{'check','update','valgrind'})
+if ~ismember(procedure,{'check','update','valgrind','ncExport'})
 	disp('runme warning: procedure not supported, defaulting to test ''check''')
 	procedure='check';
@@ -217,4 +218,8 @@
 			end
 
+		%CHECK for memory leaks?
+		elseif strcmpi(procedure,'ncExport'),
+			export_netCDF(md, ['test' num2str(id) 'ma.nc'])
+
 		%ELSE: CHECK TEST
 		else,
Index: /issm/trunk-jpl/test/NightlyRun/runme.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/runme.py	(revision 26856)
+++ /issm/trunk-jpl/test/NightlyRun/runme.py	(revision 26857)
@@ -10,5 +10,5 @@
 from glob import glob
 import os
-import re
+from re import search, split
 from sys import float_info
 from traceback import format_exc
@@ -26,4 +26,7 @@
 from IdToName import IdToName
 from parallelrange import parallelrange
+from loadmodel import loadmodel
+from solve import solve
+from importlib import import_module
 
 
@@ -31,5 +34,5 @@
     """RUNME - test deck for ISSM nightly runs
 
-    In a test deck directory (for example, test/NightlyRun) the following 
+    In a test deck directory (for example, test/NightlyRun) the following
     command will launch all existing tests,
 
@@ -41,8 +44,8 @@
 
     Options:
-        -i/--id         Followed by the list of ids or (parts of) test names 
+        -i/--id         Followed by the list of ids or (parts of) test names
                         requested
-        -e/--exclude    Ids or (parts of) test names to be excluded (same 
-                        format as id). Does nothing if 'id' is specified with 
+        -e/--exclude    Ids or (parts of) test names to be excluded (same
+                        format as id). Does nothing if 'id' is specified with
                         different values.
         -b/--benchmark  'all'           : (all of the tests)
@@ -60,4 +63,6 @@
         -p/--procedure  'check'         : run the test (default)
                         'update'        : update the archive
+                        'runFromNC'     : run from an existing nc file
+
 
     Usage:
@@ -80,8 +85,8 @@
 
     TODO:
-    - At '#disp test result', make sure precision of output matches that of 
+    - At '#disp test result', make sure precision of output matches that of
     MATLAB.
-    - Check for failures that do not raise exceptions (for example, 'Standard 
-    exception'; see also jenkins/jenkins.sh). These should be counted as 
+    - Check for failures that do not raise exceptions (for example, 'Standard
+    exception'; see also jenkins/jenkins.sh). These should be counted as
     failures.
     """
@@ -97,5 +102,5 @@
     # }}}
     #GET procedure {{{
-    if procedure not in ['check', 'update']:
+    if procedure not in ['check', 'update', 'runFromNC']:
         print(("runme warning: procedure '{}' not supported, defaulting to test 'check'.".format(procedure)))
         procedure = 'check'
@@ -112,5 +117,5 @@
     #GET ids  {{{
     flist = glob('test*.py')  #File name must start with 'test' and must end by '.py' and must be different than 'test.py'
-    list_ids = [int(re.search(r'\d+',file.split('.')[0]).group()) for file in flist if not file == 'test.py'] # Keep test id only (skip 'test' and '.py')
+    list_ids = [int(search(r'\d+',file.split('.')[0]).group()) for file in flist if not file == 'test.py'] # Keep test id only (skip 'test' and '.py')
 
     i1, i2 = parallelrange(rank, numprocs, len(list_ids))  #Get tests for this cpu only
@@ -165,5 +170,9 @@
             os.chdir(root)
             id_string = IdToName(id)
-            exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals())
+            print(("----------------runing-----------------------"))
+            if procedure == 'runFromNC':
+                Tmod = import_module('test{}'.format(id))
+            else:
+                exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals())
 
             #UPDATE ARCHIVE?
@@ -185,5 +194,53 @@
                     archwrite(archive_file, archive_name + '_field' + str(k + 1), field)
                 print(("File {} saved. \n".format(os.path.join('..', 'Archives', archive_name + '.arch'))))
-
+            elif procedure == 'runFromNC':
+                print(("----------------loadingNC-----------------------"))
+                mdl = loadmodel('test{}ma.nc'.format(id))
+                for key in mdl.results.__dict__.keys():
+                    if 'Solution' in key:
+                        solvetype = split('Solution', key)[0]
+                mdl.results = []
+                mdl = solve(mdl, solvetype)
+                for k, fieldname in enumerate(Tmod.field_names):
+                    try:
+                        if search(r'\d+$', fieldname):
+                            index = int(search(r'\d+$', fieldname).group()) - 1
+                            fieldname = fieldname[:search(r'\d+$', fieldname).start()]
+                        else:
+                            index = 0
+                        #Get field from nc run
+                        try:
+                            field = mdl.results.__dict__[solvetype + 'Solution'][index].__dict__[fieldname]
+                        except KeyError:
+                            print("WARNING: {} does not exist and checking will be skipped".format(fieldname))
+                            continue
+                            #Get reference from std run
+                        ref = Tmod.field_values[k]
+                        #Get tolerance
+                        tolerance = Tmod.field_tolerances[k]
+                        error_diff = np.amax(np.abs(ref - field), axis=0) / (np.amax(np.abs(ref), axis=0) + float_info.epsilon)
+                        if not np.isscalar(error_diff):
+                            error_diff = error_diff[0]
+
+                        #disp test result
+                        if (np.any(error_diff > tolerance) or np.isnan(error_diff)):
+                            print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
+                            errorcount += 1
+                            erroredtest_list.append(id)
+                        else:
+                            print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
+
+                    except Exception as message:
+                        #something went wrong, print failure message:
+                        print((format_exc()))
+                        if output == 'nightly':
+                            fid = open(os.path.join(ISSM_DIR, 'nightlylog', 'pythonerror.log'), 'a')
+                            fid.write('%s' % message)
+                            fid.write('\n------------------------------------------------------------------\n')
+                            fid.close()
+                            print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname)))
+                        else:
+                            print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname)))
+                            raise RuntimeError(message)
             #ELSE: CHECK TEST
             else:
