Index: /issm/trunk-jpl/src/m/netCDF/export_netCDF.m
===================================================================
--- /issm/trunk-jpl/src/m/netCDF/export_netCDF.m	(revision 27793)
+++ /issm/trunk-jpl/src/m/netCDF/export_netCDF.m	(revision 27793)
@@ -0,0 +1,515 @@
+function export_netCDF(md,filename)
+%verbosity of the code, 0 is no messages, 5 is chatty
+	verbose = 5;
+	if exist(filename),
+		delete(filename)
+		% disp(sprintf('File %s allready exist', filename));
+		% prompt = 'Give a new name or "delete" to replace: ';
+		% newname = input(prompt,'s');
+		% if strcmp(newname,'delete')
+		% 	delete(filename)
+		% else
+		% 	disp(sprintf('New file name is %s ', newname));
+		% 	filename=newname
+		% end
+	end
+	%open file and write description
+	mode = netcdf.getConstant('NC_NETCDF4');
+	mode = bitor(mode,netcdf.getConstant('NC_NOCLOBBER')); %NOCLOBBER to avoid overwrite
+	ncid = netcdf.create(filename,mode);
+	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'description',['Results for run ' md.miscellaneous.name]);
+	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'history',['Created ' datestr(now)]);
+
+	%gather geometry and timestepping as dimensions
+	if isempty(fieldnames(md.results)),
+		%results as no field so no time is present
+		Duration = 0;
+	else
+		resfields = fieldnames(md.results);
+		Duration = size(eval(['md.results. ' resfields{1} ]),2);
+	end
+	if Duration>0,
+		StepNum = Duration;
+	else
+		StepNum=1;
+	end
+	DimSize(1).index=netcdf.defDim(ncid,'Time',StepNum);  %time is the first dimension
+	[DimSize(1).name,DimSize(1).value]=netcdf.inqDim(ncid,DimSize(1).index);
+	DimValue(1)=DimSize(1).value;
+	DimSize(2).index=netcdf.defDim(ncid,'UnLim',netcdf.getConstant('NC_UNLIMITED')); % we add an unlimited dimension if needed
+	[DimSize(2).name,DimSize(2).value]=netcdf.inqDim(ncid,DimSize(2).index);
+	DimValue(2)=DimSize(2).value;
+	% adding mesh related dimensions
+	dimlist=[2 40 md.mesh.numberofelements md.mesh.numberofvertices size(md.mesh.elements,2)];
+	dimnames=["DictDummy" "StringLength" "EltNum" "VertNum" "VertPerElt"];
+	if isprop(md.mesh, 'edges'),
+		dimlist(end+1)=md.mesh.numberofedges;
+		dimnames(end+1)="EdgeNum";
+	else
+		dimlist(end+1)=0;
+		dimnames(end+1)="EdgeNum";
+	end
+	if verbose > 0,
+		disp('===Creating dimensions ===');
+	end
+	%define netcdf dimensions
+	for i=1:length(dimlist)
+		% do not add the dimension if it exists already
+		if sum(dimlist(i) == DimValue) == 0
+			DimSize(i+2).index=netcdf.defDim(ncid,dimnames(i),dimlist(i));
+			[DimSize(i+2).name,DimSize(i+2).value]=netcdf.inqDim(ncid,DimSize(i+2).index);
+			DimValue(i+2)=DimSize(i+2).value;
+		end
+	end
+	issmclasses = fieldnames(md)';
+	typelist={'half', 'single','double','int8','int16'...
+		  ,'int32','int64','uint8','uint16','uint32'...
+		  ,'uint64','logical','char','string'};  %all malab types that are 0D
+
+	for cl=1:length(issmclasses),
+		if isempty(md.(issmclasses{cl})),
+			disp(sprintf("md.%s is empty and will be left as default",issmclasses{cl}));
+		else
+			subclasses=fieldnames(md.(issmclasses{cl}))';
+			for sc=1:length(subclasses),
+				if sum(strcmp(class(md.(issmclasses{cl}).(subclasses{sc})), typelist)) == 0,
+					issmclasses = [issmclasses class(md.(issmclasses{cl}).(subclasses{sc}))];
+				end
+			end
+		end
+	end
+	%get all model classes and create respective groups
+	groups=fieldnames(md);
+	if verbose > 0,
+		disp('===Creating and populating groups===');
+	end
+	for i=1:length(groups),
+		if verbose >1,
+			disp(sprintf('===Now treating %s===',groups{i}));
+		end
+		if any(strcmp(groups{i}, {'qmu'})),
+			disp('qmu is skipped until it is more stable');
+			continue
+		end
+		if any(strcmp(groups{i},{'radaroverlay'})),
+			disp(sprintf('%s is skipped.',groups{i}));
+			continue
+		end
+		groupID=netcdf.defGrp(ncid,groups{i});
+		%In each group gather the fields of the class
+		if isempty(md.(groups{i})),
+			disp(sprintf("WARNING: md.%s is empty, we skip it.",groups{i}))
+			continue
+		end
+		fields=fieldnames(md.(groups{i}));
+		if isempty(fields),
+			disp(sprintf("WARNING: md.%s as no fields, we skip it.",groups{i}))
+			continue
+		end
+		%looping on fields in each group
+		for j=1:length(fields),
+			Var=md.(groups{i}).(fields{j});
+			%treatment for lists
+			if isa(Var,'cell')
+				Stdlist=false;  %first assume it is not a standard list
+				if length(Var) == 0
+					Stdlist=true;  %It is empty and so standard (for us)
+				else
+					for k=1:length(typelist)
+						if isa(Var{1},typelist{k})
+							Stdlist=true;  %if the list is of a known type (to matlab) if not it is probably some exotic ISSM stuff
+						end
+					end
+				end
+				%print the issm class as a classtype attribute
+				klass = class(md.(groups{i}));
+				klasstring = strcat(klass, '.',klass);
+				netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+				if(Stdlist)  % this is a standard or empty list just proceed
+					if verbose > 4,
+						disp(sprintf("=££=creating var for %s.%s with classtype : %s",groups{i}, fields{j}, klasstring))
+					end
+					if ~isempty(Var) && isa(Var{1}, 'char'),  % we have a char array, pad it to a given length
+						Var=char(Var)';
+					end
+					[DimSize,DimValue,varid]=CreateVar(ncid,Var,groupID,fields{j},DimSize,DimValue);
+					if ~isempty(varid),
+						FillVar(Var,groupID,varid);
+					end
+
+				else % this is a list of fields, specific treatment needed (perhaps)
+					if verbose > 4,
+						disp(sprintf("=??=we have a list of fields for %s.%s with classtype : %s",groups{i}, fields{j}, klasstring));
+					end
+					if strcmp(groups{i}, 'outputdefinition'),
+						listsize=length(Var);
+						for k=1:listsize,
+							subgroupname=md.(groups{i}).(fields{j}){k}.definitionstring;
+							subgroupID=netcdf.defGrp(groupID,subgroupname);
+							klass=class(md.(groups{i}).(fields{j}){k});
+							klasstring = strcat(klass, '.',klass);
+							netcdf.putAtt(subgroupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+							subfields=fieldnames(md.(groups{i}).(fields{j}){k});
+							for l=1:length(subfields)
+								if verbose > 4,
+									disp(sprintf("=--=creating var for %s.%s[%i].%s",groups{i}, fields{j}, k, subfields{l}));
+								end
+								Var = md.(groups{i}).(fields{j}){k}.(subfields{l});
+								if sum(numel(Var) == size(Var)) == 0,  %this is a 2D array or more (and not a vector with dimension 2 = 1)
+									Var = Var';
+								end
+								[DimSize,DimValue,varid]=CreateVar(ncid,Var,subgroupID,subfields{l},DimSize,DimValue);
+								if ~isempty(varid),
+									FillVar(Var,subgroupID,varid);
+								end
+							end
+						end
+					else
+						disp(sprintf("WARNING: unknown treatment for md.%s",groups{i}));
+					end
+				end
+			elseif sum(strcmp(class(Var), typelist))==1, %this is a standard matlab class with no subgrouping
+				if verbose > 4,
+					disp(sprintf("====creating var for %s.%s", groups{i}, fields{j}))
+				end
+				klass=class(md.(groups{i}));
+				klasstring = strcat(klass, '.',klass);
+				netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classgroup',groups{i});
+				netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+				if sum(numel(Var) == size(Var)) == 0,  %this is a 2D array or more (and not a vector with dimension 2 = 1)
+					Var = Var';
+				end
+
+				[DimSize,DimValue,varid]=CreateVar(ncid,Var,groupID,fields{j},DimSize,DimValue);
+				if ~isempty(varid),
+					FillVar(Var,groupID,varid);
+				end
+
+
+			elseif isa(Var,'struct')  % structures need special treatment
+				if strcmp(groups{i}, 'results'),
+					klasstring=strcat(groups{i} ,'.', groups{i});
+					netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					Listsize= length(md.(groups{i}).(fields{j}));
+					subgroupname=fields{j};
+					subgroupID=netcdf.defGrp(groupID,subgroupname);
+					klasstring='results.solutionstep';
+					netcdf.putAtt(subgroupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					subfields=fieldnames(md.(groups{i}).(fields{j}));
+					if isempty(subfields),
+						disp(sprintf("WARNING: md.%s.%s as no subfields, we skip it.",groups{i}, fields{j}));
+						continue
+					end
+					for k=1:length(subfields),
+						if ~ismember(subfields{k}, {'errlog', 'outlog', 'SolutionType'})
+							StackedVar=restable();
+							for l=1:Listsize,
+								Var = md.(groups{i}).(fields{j})(l).(subfields{k});
+								if length(Var) == 0,
+									%Some variables only have data on the first step
+									break
+								end
+								lastindex=l;
+								StackedVar=StackedVar.update(Var);
+							end
+							if verbose > 4,
+								disp(sprintf("=$$=creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k}));
+								disp(sprintf("last index on the list is %i",lastindex));
+							end
+							StackedVar=StackedVar.finalize(lastindex);
+							[DimSize,DimValue,varid]=CreateVar(ncid,StackedVar,subgroupID,subfields{k},DimSize,DimValue);
+							if ~isempty(varid),
+								FillVar(StackedVar,subgroupID,varid);
+							end
+						elseif ismember(subfields{k}, {'SolutionType'})
+							%We just add solution type once as an attribute
+							Var = md.(groups{i}).(fields{j})(1).(subfields{k});
+							[DimSize,DimValue,varid]=CreateVar(ncid,Var,subgroupID,subfields{k},DimSize,DimValue);
+							if ~isempty(varid),
+								FillVar(Var,subgroupID,varid);
+							end
+
+						end
+					end
+				elseif strcmp(groups{i}, 'toolkits'),
+					klasstring=strcat(groups{i} ,'.', groups{i});
+					netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					if verbose > 4,
+						disp(sprintf("=}{=creating var for %s.%s",groups{i}, fields{j}));
+					end
+
+					[DimSize,DimValue,varid]=CreateVar(ncid,Var,groupID,fields{j},DimSize,DimValue);
+					if ~isempty(varid),
+						FillVar(Var,groupID,varid);
+					end
+
+				elseif isempty(fieldnames(md.(groups{i}).(fields{j}))) % this is an empty struct, jus treat it as normal
+					klass=class(md.(groups{i}));
+					klasstring = strcat(klass, '.',klass);
+					netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					if verbose > 4,
+						disp(sprintf("=[]=creating var for %s.%s",groups{i}, fields{j}));
+					end
+
+					[DimSize,DimValue,varid]=CreateVar(ncid,Var,groupID,fields{j},DimSize,DimValue);
+					if ~isempty(varid),
+						FillVar(Var,groupID,varid);
+					end
+
+				else
+					disp(sprintf("WARNING, md.%s.%s is not treated as it does not fall in one of the existing cases with class '%s'.",groups{i}, fields{j}, class(md.(groups{i}).(fields{j}))))
+				end
+			elseif sum(strcmp(class(Var), issmclasses)) == 1,  % that is an issm class
+				if strcmp(class(Var), 'solution'),
+					if verbose > 4,
+						disp(sprintf("=$$=creating var for %s.%s",groups{i}, fields{j}))
+						disp("NEED treatment")
+					end
+				elseif strcmp(class(Var), 'dict'),  %we have potential for a dict in py not to sure what it translates to here.
+					if verbose > 4,
+						disp(sprintf("=WW=creating var for %s.%s",groups{i}, fields{j}))
+						disp("NEED Treatment")
+					end
+
+				else
+					klass=class(md.(groups{i}));
+					klasstring = strcat(klass, '.',klass);
+					netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					subgroupID=netcdf.defGrp(groupID,fields{j});
+					klass=class(md.(groups{i}).(fields{j}));
+					klasstring = strcat(klass, '.',klass);
+					netcdf.putAtt(subgroupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring);
+					subfields=fieldnames(Var);
+					for k=1:length(subfields),
+						if sum(strcmp(subfields{k},["outlog" "errlog"])) == 0,
+							if verbose > 4,
+								disp(sprintf("+==+creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k}))
+							end
+							Var=md.(groups{i}).(fields{j}).(subfields{k});
+							[DimSize,DimValue,varid]=CreateVar(ncid,Var,subgroupID,subfields{k},DimSize,DimValue);
+							if ~isempty(varid),
+								FillVar(Var,subgroupID,varid);
+							end
+						end
+					end
+
+				end
+			else
+				disp(sprintf("WARNING, md.%s.%s is not treated as it does not fall in one of the existing cases with class '%s'.",groups{i}, fields{j}, class(Var)))
+			end
+		end
+	end
+	netcdf.close(ncid);
+end
+
+function [DimSize,DimValue,varid]=CreateVar(ncid,Var,groupID,field,DimSize,DimValue)
+% Grab dimensions
+	varsize=size(Var);
+	varlength=length(Var);
+	% treating scalar string or bool as atribute
+	if isa(Var,'logical'),
+		if Var,
+			LogicString='True';
+		else,
+			LogicString='False';
+		end
+		netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,LogicString);
+		varid=[];
+
+	elseif isa(Var,'char'),
+		if strcmp(field,'name'),  % it looks like netCDF does not like attributes that are called "name"
+			field = 'varname';
+		end
+		if size(Var,1) <= 1  %that is a single string or empty
+			netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,Var);
+			varid=[];
+		else  % that is a character array
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
+		end
+
+	elseif isa(Var,'double'), %dealing with arrays
+		if all(mod(Var, 1) == 0, 'all')  %those are actually integers,
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			varid = netcdf.defVar(groupID,field,'NC_INT64',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
+		else
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
+		end
+	elseif isa(Var,'cell'),
+		% cells can be a range of things, what are we dealing with here
+		if isempty(Var),
+			netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,'emptycell');
+			varid=[];
+		else
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			if isa(Var{1}, 'double'),
+				varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims);
+				if numel(Var)>1
+					netcdf.defVarDeflate(groupID,varid,true,true,4);
+				end
+			else
+				varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+				if numel(Var)>1
+					netcdf.defVarDeflate(groupID,varid,true,true,4);
+				end
+			end
+		end
+	elseif isa(Var,'struct'),
+		if isempty(fieldnames(Var)),
+			netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,'emptystruct');
+			varid=[];
+		else
+			%Start by getting the structure fields and size
+			[dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue);
+			varid = netcdf.defVar(groupID,field,'NC_CHAR',dims);
+			if numel(Var)>1
+				netcdf.defVarDeflate(groupID,varid,true,true,4);
+			end
+		end
+	else
+		disp(sprintf('no support for class %s of field %s',class(Var),field));
+		varid=[];
+	end
+	return
+end
+
+
+function FillVar(Var,groupID,varid)
+% Grab dimensions
+	varsize=size(Var);
+	varlength=length(Var);
+	% treating scalar string or bool as atribute
+	if isa(Var,'double'), %dealing with arrays
+		if all(mod(Var, 1) == 0, 'all')  %those are actually integers,
+			Var = int64(Var);
+		end
+		if length(Var)==0,
+			netcdf.putVar(groupID,varid,NaN);
+		else
+			netcdf.putVar(groupID,varid,Var);
+		end
+	elseif isa(Var,'char'),  % at this point this should be a character array
+		netcdf.putVar(groupID,varid,Var);
+	elseif isa(Var,'cell'),  % there can be a number of things in a cell array
+		for i=1:length(Var),
+			if isa(Var{i},'char')  %for characters we limit the size to 40 for now
+				if length(Var)>1,
+					count=[min(length(Var{i}),40), 1];
+					startpoint=[0 i-1];
+				else
+					count=min(length(Var{i}),40);
+					startpoint=0;
+				end
+
+				if length(Var{i})>40,
+					netcdf.putVar(groupID,varid,startpoint,count,Var{i}(1:40));
+					disp(sprintf('some variable have been truncated'));
+				else
+					netcdf.putVar(groupID,varid,startpoint,count,Var{i});
+				end
+			elseif isa(Var{i},'double')
+				startpoint=[i-1];
+				count=[1 length(Var{i}) ndims(Var{i})];
+				for j=1:ndims(Var{i}),
+					startpoint=[startpoint 0];
+				end
+				netcdf.putVar(groupID,varid,startpoint,count,Var{i});
+			else
+				disp(sprintf("WARNING: cell of class %s is not supported.",class(Var{i})))
+			end
+		end
+	elseif isa(Var,'struct'),
+		%Start by getting the structure fields and size
+		locfields=fieldnames(Var);
+		for i=1:length(locfields),
+			for j=1:2,
+				if j==1,
+					CharVar=locfields{i}';
+					disp(size(CharVar))
+					if length(CharVar)==0
+						CharVar='emptystruct';
+					end
+					startpoint=[0,0,i-1]
+				else
+					if isa(Var.(locfields{i}),'char'),
+						CharVar=Var.(locfields{i})';
+					else
+						CharVar=num2str(Var.(locfields{i}))';
+					end
+					if length(CharVar)==0
+						CharVar='emptystruct';
+					end
+					startpoint=[0,1,i-1]
+				end
+
+				extent=[min(length(CharVar),40), 1, 1]
+				if length(CharVar)>40,
+					netcdf.putVar(groupID,varid,startpoint,extent,CharVar(1:40));
+					disp(sprintf('some variable have been truncated'));
+				else
+					netcdf.putVar(groupID,varid,startpoint,extent,CharVar);
+				end
+			end
+		end
+	else
+		disp(sprintf('no support for class %s',class(Var)));
+	end
+	return
+end
+
+function [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue)
+	dims=[];
+	celldims=[];
+	dim=ndims(Var);
+	if isa(Var,'struct'),
+		varsize=length(fieldnames(Var));
+	else
+		varsize=size(Var);
+		if isa(Var, 'cell')
+			%we add the dimension of the cells themselves,
+			%that will most probably fail if cells have different sizes
+			for i=1:dim,
+				newdim=size(Var{i});
+				if ~ismember(newdim, celldims),
+					celldims=[celldims newdim];
+				end
+			end
+		end
+	end
+	varsize=[varsize celldims];
+	alldim=length(varsize);
+	if dim>0,
+		for i=1:alldim,
+			if size(Var, i)>1 || i>dim || isa(Var, 'struct'),  %we skip dimensions with zero lenght but want to add dimensions from cells
+				indsize=find(varsize(i)==DimValue);
+				if length(indsize)>0
+					dims=[dims DimSize(indsize).index];
+				else
+					indsize=length(DimSize)+1;
+					DimSize(indsize).index=netcdf.defDim(ncid,['DimNum' num2str(indsize)],varsize(i));
+					[DimSize(indsize).name,DimSize(indsize).value]=netcdf.inqDim(ncid,DimSize(indsize).index);
+					DimValue(indsize)=DimSize(indsize).value;
+					dims=[dims DimSize(indsize).index];
+				end
+			end
+		end
+	end
+	if isa(Var, 'cell') && isa(Var{1}, 'char'),
+		%if we have an cell variable with strings we need to add a stringlength
+		dims=[dims DimSize(4).index];
+	end
+	% struct also need an extra dimension 2, but only if non empty
+	if isa(Var,'struct'),
+		dims=[DimSize(4).index DimSize(3).index, dims];
+	end
+end
Index: /issm/trunk-jpl/src/m/netCDF/export_netCDF.py
===================================================================
--- /issm/trunk-jpl/src/m/netCDF/export_netCDF.py	(revision 27793)
+++ /issm/trunk-jpl/src/m/netCDF/export_netCDF.py	(revision 27793)
@@ -0,0 +1,536 @@
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+import time
+import collections
+from inspect import isclass
+from os import path, remove
+
+
+class ResTable:
+    def __init__(self):
+        self.data = []
+        self.sizes = []
+
+    def update(self, stepvar):
+        #if we have a scalar we just add it to the end
+        #we save the size of the current step for further treatment
+        if len(np.shape(stepvar)) == 0:
+            self.sizes.append(1)
+            self.data.append(stepvar)
+        # if it is an array we add the values one by one
+        #we save the size of the current step for further treatment
+        else:
+            self.sizes.append([np.shape(stepvar)])
+            stackdat = np.squeeze(stepvar.flatten())
+            self.data = np.hstack((self.data, stackdat))
+
+    def finalize(self, rows):
+        #we have more scalars than steps, so we have an array
+        if len(self.data) > rows:
+            datasize = np.squeeze(self.sizes)
+            maxsize = []
+            try:
+                dims = np.arange(np.shape(datasize)[1])
+                for dim in dims:
+                    maxsize.append(np.nanmax(datasize[:, dim]))
+            except IndexError:
+                if datasize.ndim == 0:
+                    maxsize.append(datasize)
+                else:
+                    maxsize.append(np.nanmax(datasize[:]))
+            findim = np.insert(maxsize, 0, rows)
+            #first check if all steps are the same size
+            if datasize.ndim == 0:
+                SameSize = True
+            else:
+                SameSize = np.sum(np.abs(datasize - datasize[0])) == 0
+            if SameSize:
+                #same size for all steps, just reshape
+                return np.reshape(self.data, newshape=(findim))
+            else:
+                #different sizes at each steps, first create a table big enough for the biggest step
+                startpoint = 0
+                outdat = np.nan * np.ones(findim)
+                for step in range(rows):
+                    #slicer is the data slice in the final array
+                    slicer = [slice(0, d) for d in datasize[step, :]]
+                    slicer = np.insert(slicer, 0, step)
+                    curlen = int(np.prod(datasize[step, :]))
+                    outdat[tuple(slicer)] = np.reshape(self.data[startpoint:startpoint + curlen], newshape=(datasize[step, :]))
+                    startpoint += curlen
+                #outmasked = ma.masked_array(outdat, mask=np.where(np.isnan(outdat), 1, 0))
+                return outdat
+        #as much scalars as steps (or less) so just one value per step
+        else:
+            return np.squeeze(np.asarray(self.data))
+
+
+def export_netCDF(md, filename):  # {{{
+    #verbosity of the code, 0 is no messages, 5 is chatty
+    verbose = 0
+    if path.exists(filename):
+        print('File {} allready exist'.format(filename))
+        newname = input('Give a new name or "delete" to replace: ')
+        if newname == 'delete':
+            remove(filename)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+    #create file and define it
+    NCData = Dataset(filename, 'w', format='NETCDF4')
+    NCData.description = 'Results for run' + md.miscellaneous.name
+    NCData.history = 'Created ' + time.ctime(time.time())
+    # define netCDF dimensions
+    #grab time from Transient if it exists
+    try:
+        StepNum = len(md.results.TransientSolution)
+    except AttributeError:  #no transient so just one timestep
+        StepNum = 1
+    except TypeError:  #this isnot a result so no results in there
+        StepNum = 0
+    TimeDim = NCData.createDimension('Time', StepNum)  # time is first
+    DimDict = {len(TimeDim): 'Time'}
+    dimindex = 1
+    UnlimDim = NCData.createDimension('Unlim', None)  # unlimited dimension if needed
+    DimDict[len(UnlimDim)] = {'Inf'}
+    dimindex = 2
+    #add mesh related dimension that we know are needed
+    dimlist = [2, 40, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]]
+    dimnames = ['DictDummy', 'StringLength', 'EltNum', 'VertNum', 'VertPerElt']
+    try:
+        dimlist = dimlist + [md.mesh.numberofedges]
+        dimnames = dimnames + ['EdgeNum']
+    except AttributeError:
+        #no edges on this mesh, we fix it at 0
+        dimlist += [0]
+        dimnames += ['EdgeNum']
+    if verbose > 0:
+        print('===Creating dimensions ===')
+    for i, newdim in enumerate(dimlist):
+        if newdim not in list(DimDict.keys()):
+            dimindex += 1
+            NewDim = NCData.createDimension(dimnames[i], newdim)
+            DimDict[len(NewDim)] = dimnames[i]
+
+    typelist = [bool, str, int, float, complex,
+                collections.OrderedDict,
+                np.int64, np.ndarray, np.float64]
+
+    # get all model classes and create respective groups
+    groups = dict.keys(md.__dict__)
+    if verbose > 0:
+        print('===Creating and populating groups===')
+    for group in groups:
+        if verbose > 1:
+            print('===Now treating {}==='.format(group))
+        if group == 'qmu':
+            print("qmu is skipped until it is more stable")
+            continue
+        NCgroup = NCData.createGroup(str(group))
+        # In each group gather the fields of the class
+        try:
+            fields = dict.keys(md.__dict__[group].__dict__)
+        except AttributeError:
+            print("WARNING: md.{} as no fields, we skip it.".format(group))
+            continue
+        # looping on fields in each group
+        for field in fields:
+            Var = md.__dict__[group].__dict__[field]
+            # Special treatment for list fields
+            if type(Var) == list:
+                StdList = False
+                if len(Var) == 0:
+                    StdList = True  #this is an empty list
+                else:
+                    #returns False for exotic types (typicaly results)
+                    StdList = type(Var[0]) in typelist
+                klass = type(md.__dict__[group]).__module__ + '.' + type(md.__dict__[group]).__name__
+                NCgroup.__setattr__('classtype', klass)
+                if StdList:  # this is a standard or empty list just proceed
+                    if verbose > 4:
+                        print("=££=creating var for {}.{} with classtype : {}".format(group, field, klass))
+                    Var = SqueezeVar(Var)
+                    DimDict, ncvar = CreateVar(NCData, Var, field, NCgroup, DimDict)
+                    if ncvar is not None:
+                        FillVar(ncvar, Var)
+                else:  # this is a list of fields, specific treatment needed (usually results or outputdefinitions)
+                    if verbose > 4:
+                        print("=??=we have a list of fields for {}.{} with classtype : {}".format(group, field, klass))
+                    Listsize = len(Var)
+                    if group == 'results':  #for results we reshape the datas following time rather than subgrouping
+                        Subgroup = NCgroup.createGroup(str(field))
+                        try:
+                            #take the class of the first element to define nc class and get the list of variables
+                            klass = type(md.__dict__[group].__dict__[field][0]).__module__ + '.' + type(md.__dict__[group].__dict__[field][0]).__name__
+                            Subgroup.__setattr__('classtype', klass)
+                            subfields = dict.keys(md.__dict__[group].__dict__[field][0].__dict__)
+                        except (IndexError, AttributeError):
+                            klass = type(md.__dict__[group].__dict__[field]).__module__ + '.' + type(md.__dict__[group].__dict__[field]).__name__
+                            Subgroup.__setattr__('classtype', klass)
+                            subfields = dict.keys(md.__dict__[group].__dict__[field].__getitem__(0))
+                        for subfield in subfields:
+                            if subfield not in ['errlog', 'outlog']:
+                                StackedVar = ResTable()
+                                #first loop over the field (result type) to find the index of the last subfield (variable)
+                                for listindex in range(0, Listsize):
+                                    try:
+                                        Var = md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield]
+                                        lastindex = listindex + 1
+                                    except AttributeError:
+                                        Var = md.__dict__[group].__dict__[field].__getitem__(listindex)[subfield]
+                                    except KeyError:
+                                        #Some fields only exist for the first step
+                                        lastindex = listindex
+                                        continue
+                                    #Add the  subfield at the current step
+                                    Var = SqueezeVar(Var)
+                                    StackedVar.update(Var)
+                                if verbose > 4:
+                                    print("=@@=creating var for {}.{}.{}".format(group, field, subfield))
+                                    print("last index of the list is {}".format(lastindex))
+                                StackedVar = SqueezeVar(StackedVar.finalize(int(lastindex)))
+                                DimDict, ncvar = CreateVar(NCData, StackedVar, subfield, Subgroup, DimDict)
+                                #and fill it up
+                                if ncvar is not None:
+                                    FillVar(ncvar, StackedVar)
+                    elif group == 'outputdefinition':  #for outputdefinition we keep a subgroup format
+                        for listindex in range(0, Listsize):
+                            Subgroupname = str(md.__dict__[group].__dict__[field][listindex].definitionstring)
+                            Subgroup = NCgroup.createGroup(Subgroupname)
+                            klass = type(md.__dict__[group].__dict__[field][listindex]).__module__ + '.' + type(md.__dict__[group].__dict__[field][listindex]).__name__
+                            Subgroup.__setattr__('classtype', klass)
+                            subfields = dict.keys(md.__dict__[group].__dict__[field][listindex].__dict__)
+                            for subfield in subfields:
+                                Var = md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield]
+                                Var = SqueezeVar(Var)
+                                if verbose > 4:
+                                    print("=--=creating var for {}.{}[{}].{}".format(group, field, listindex, subfield))
+                                DimDict, ncvar = CreateVar(NCData, Var, subfield, Subgroup, DimDict)
+                                #and fill it up
+                                if ncvar is not None:
+                                    FillVar(ncvar, Var)
+                    else:
+                        print("WARNING: unknown treatment for md.{}".format(group))
+            # No subgroup, we directly treat the variable
+            elif type(md.__dict__[group].__dict__[field]) in typelist or field == 'bamg':
+                klass = type(md.__dict__[group]).__module__ + '.' + type(md.__dict__[group]).__name__
+                NCgroup.__setattr__('classtype', klass)
+                Var = md.__dict__[group].__dict__[field]
+                Var = SqueezeVar(Var)
+                if verbose > 4:
+                    print("====creating var for {}.{}".format(group, field))
+                DimDict, ncvar = CreateVar(NCData, Var, field, NCgroup, DimDict)
+                if ncvar is not None:
+                    FillVar(ncvar, Var)
+            # empty field, do nothing
+            elif md.__dict__[group].__dict__[field] is None:
+                print('field md.{}.{} is None'.format(group, field))
+            # if it is a masked array
+            elif type(md.__dict__[group].__dict__[field]) is np.ma.core.MaskedArray:
+                klass = type(md.__dict__[group]).__module__ + '.' + type(md.__dict__[group]).__name__
+                NCgroup.__setattr__('classtype', klass)
+                Var = md.__dict__[group].__dict__[field].data
+                Var = SqueezeVar(Var)
+                if verbose > 4:
+                    print("=++=creating var for {}.{}".format(group, field))
+                DimDict, ncvar = CreateVar(NCData, Var, field, NCgroup, DimDict)
+                if ncvar is not None:
+                    FillVar(ncvar, Var)
+            # this is an issm class
+            elif isclass(type(md.__dict__[group].__dict__[field])):
+                if type(md.__dict__[group].__dict__[field]).__name__ == 'solution':
+                    #for results we reshape the datas following time rather than subgrouping
+                    Listsize = len(md.__dict__[group].__dict__[field])
+                    Subgroup = NCgroup.createGroup(str(field))
+                    try:
+                        #take the class of the first element to define nc class and get the list of variables
+                        klass = type(md.__dict__[group].__dict__[field][0]).__module__ + '.' + type(md.__dict__[group].__dict__[field][0]).__name__
+                        Subgroup.__setattr__('classtype', klass)
+                        subfields = dict.keys(md.__dict__[group].__dict__[field][0].__dict__)
+                    except (IndexError, AttributeError):
+                        klass = type(md.__dict__[group].__dict__[field]).__module__ + '.' + type(md.__dict__[group].__dict__[field]).__name__
+                        Subgroup.__setattr__('classtype', klass)
+                        subfields = dict.keys(md.__dict__[group].__dict__[field].__getitem__(0))
+                    for subfield in subfields:
+                        if subfield not in ['errlog', 'outlog']:
+                            StackedVar = ResTable()
+                            for listindex in range(0, Listsize):
+                                try:
+                                    Var = md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield]
+                                    lastindex = listindex + 1
+                                except AttributeError:
+                                    Var = md.__dict__[group].__dict__[field].__dict__[subfield]
+                                    lastindex = listindex
+                                except KeyError:
+                                    #Some fields only exist for the first step
+                                    lastindex = listindex
+                                    break
+                                Var = SqueezeVar(Var)
+                                StackedVar.update(Var)
+                            if verbose > 4:
+                                print("=$$=creating var for {}.{}.{}".format(group, field, subfield))
+                                print("last index of the list is {}".format(lastindex))
+                            StackedVar = SqueezeVar(StackedVar.finalize(int(lastindex)))
+                            DimDict, ncvar = CreateVar(NCData, StackedVar, subfield, Subgroup, DimDict)
+                            #and fill it up
+                            if ncvar is not None:
+                                FillVar(ncvar, StackedVar)
+                elif type(md.__dict__[group].__dict__[field]).__name__ == 'dict':
+                    # designed for a dict in dummy but might be used elsewhere
+                    # there is no subgroup
+                    klass = type(md.__dict__[group]).__module__ + '.' + type(md.__dict__[group]).__name__
+                    NCgroup.__setattr__('classtype', klass)
+                    Var = md.__dict__[group].__dict__[field]
+                    Var = SqueezeVar(Var)
+                    if verbose > 4:
+                        print("=WW=creating var for {}.{}".format(group, field))
+                    DimDict, ncvar = CreateVar(NCData, Var, field, NCgroup, DimDict)
+                    if ncvar is not None:
+                        FillVar(ncvar, Var)
+                else:
+                    klass = type(md.__dict__[group]).__module__ + '.' + type(md.__dict__[group]).__name__
+                    NCgroup.__setattr__('classtype', klass)
+                    Subgroup = NCgroup.createGroup(str(field))
+                    klass = type(md.__dict__[group].__dict__[field]).__module__ + '.' + type(md.__dict__[group].__dict__[field]).__name__
+                    Subgroup.__setattr__('classtype', klass)
+                    subfields = dict.keys(md.__dict__[group].__dict__[field].__dict__)
+                    for subfield in subfields:
+                        if str(subfield) not in ['errlog', 'outlog']:
+                            Var = md.__dict__[group].__dict__[field].__dict__[subfield]
+                            Var = SqueezeVar(Var)
+                            if verbose > 4:
+                                print("+==+creating var for {}.{}.{}".format(group, field, subfield))
+                            DimDict, ncvar = CreateVar(NCData, Var, subfield, Subgroup, DimDict)
+                            if ncvar is not None:
+                                FillVar(ncvar, Var)
+            else:
+                print("WARNING, md.{}.{} is not treated as it does not fall in one of the existing cases.".format(group, field))
+
+    NCData.close()
+
+# }}}
+
+
+def CreateVar(NCData, var, field, Group, DimDict, *SupDim):  # {{{
+    #=================================================================
+    # Define the variables
+    #=================================================================
+    # grab type
+    try:
+        val_type = str(var.dtype)
+        if val_type.startswith('<U'):
+            val_type = 'stringarray'
+    except AttributeError:
+        val_type = type(var)
+
+    # grab dimension
+    if val_type in [collections.OrderedDict, dict]:
+        val_shape = len(var.keys())
+        val_dim = 2
+    else:
+        val_shape = np.shape(var)
+        val_dim = np.shape(val_shape)[0]
+    TypeDict = {float: 'f8',
+                'float64': 'f8',
+                np.float64: 'f8',
+                int: 'i8',
+                'int64': 'i8',
+                np.int64: 'i8',
+                str: str,
+                dict: str}
+
+    # Now define and fill up variable
+    # treating scalar string or bool as atribute
+    if val_type in [str, bool]:
+        if field == 'name':  # it looks like netCDF does not like attributes that are called "name"
+            field = 'varname'
+        Group.__setattr__(str(field), str(var))
+        ncvar = None
+    # numpy array of strings
+    elif val_type == "stringarray":
+        #if all strings are the same set it as an attribute
+        try:
+            samestring = all(var == var[0])
+        except IndexError:
+            #Only one string
+            samestring = True
+        if samestring:
+            if field == 'name':
+                field = 'varname'
+            try:
+                Group.__setattr__(str(field), str(var[0]))
+            except IndexError:
+                Group.__setattr__(str(field), str(var))
+            ncvar = None
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            ncvar = Group.createVariable(str(field), str, dimensions=dimensions, zlib=True)
+    # treating list as string table
+    elif val_type == list:
+        # try to get the type from the first element
+        try:
+            nctype = TypeDict[type(var[0])]
+        except IndexError:
+            nctype = str  # most probably an empty list take str for that
+
+        if val_shape in [(), (0,), 0]:
+            ncvar = Group.createVariable(str(field), nctype, zlib=True)
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            ncvar = Group.createVariable(str(field), nctype, dimensions=dimensions, zlib=True)
+    # treating  dict as string tables
+    elif val_type in [collections.OrderedDict, dict]:
+        if val_shape in [(), (0,), 0]:
+            ncvar = Group.createVariable(str(field), str, zlib=True)
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            ncvar = Group.createVariable(str(field), str, dimensions=dimensions, zlib=True)
+    # treating bool as integers
+    elif val_type == 'bool':
+        if val_shape in [(), (0,), 0]:
+            ncvar = Group.createVariable(str(field), int, zlib=True)
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            ncvar = Group.createVariable(str(field), int, dimensions=dimensions, zlib=True)
+    # Now dealing with doubles, we convert them to int if possible
+    elif val_type in [float, 'float64', np.float64]:
+        try:
+            #check if we are integer and under C long overflow also skip empty arrays
+            IsInt = np.sum(np.mod(var, 1)) == 0 and np.all(abs(var) < 2147483647) and len(var) > 0
+        except TypeError:
+            #check if we are integer and under C long overflow
+            IsInt = np.mod(var, 1) == 0 and abs(var) < 2147483647
+        if IsInt:
+            val_type = 'int64'
+        if val_shape in [(), (0,), 0] and not SupDim:
+            ncvar = Group.createVariable(str(field), TypeDict[val_type], zlib=True)
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            if SupDim:
+                dimensions = SupDim + dimensions
+            ncvar = Group.createVariable(str(field), TypeDict[val_type], dimensions=dimensions, zlib=True)
+    elif val_type in [int, 'int64']:
+        if val_shape in [(), (0,), 0] and not SupDim:
+            ncvar = Group.createVariable(str(field), TypeDict[val_type], zlib=True)
+        else:
+            dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim)
+            if SupDim:
+                dimensions = SupDim + dimensions
+            ncvar = Group.createVariable(str(field), TypeDict[val_type], dimensions=dimensions, zlib=True)
+    else:
+        print(('WARNING type "{}" is unknown for "{}.{}"'.format(val_type, Group.name, field)))
+        ncvar = None
+    return DimDict, ncvar
+# }}}
+
+
+def FillVar(ncvar, invar, *UnlimIndex):  # {{{
+    #=================================================================
+    # Define the variables
+    #=================================================================
+    # grab type
+    try:
+        val_type = str(invar.dtype)
+        if val_type.startswith('<U'):
+            val_type = 'stringarray'
+    except AttributeError:
+        val_type = type(invar)
+    # grab dimension
+    if val_type in [collections.OrderedDict, dict]:
+        val_shape = len(invar)
+    else:
+        val_shape = np.shape(invar)
+
+    # Now fill up variable
+    # treating list as string table
+    if val_type == list:
+        if val_shape == 0:
+            ncvar = []
+        else:
+            for elt in range(0, val_shape[0]):
+                ncvar[elt] = invar[elt]
+    # writing string table
+    elif val_type == "stringarray":
+        for elt in range(0, val_shape[0]):
+            ncvar[elt] = invar[elt]
+    # treating bool tables as string tables
+    elif val_type == 'bool':
+        for elt in range(0, val_shape[0]):
+            ncvar[elt] = int(invar[elt])  #str(invar[elt])
+    # treating dictionaries as tables of strings
+    elif val_type in [collections.OrderedDict, dict]:
+        for elt, key in enumerate(dict.keys(invar)):
+            ncvar[elt, 0] = key
+            ncvar[elt, 1] = str(invar[key])  # converting to str to avoid potential problems
+    # Now dealing with numeric variables
+    elif val_type in [float, 'float64', np.float64, int, 'int64']:
+        try:
+            nan_val = np.isnan(invar)
+            if nan_val.all():
+                naned = 'NaN'
+            else:
+                naned = invar
+        except TypeError:  # type does not accept nan, get vallue of the variable
+            naned = invar
+
+        if UnlimIndex:
+            if len(val_shape) == 0:
+                ncvar[UnlimIndex] = naned
+            elif len(val_shape) == 1:
+                ncvar[UnlimIndex, :] = naned
+            elif len(val_shape) == 2:
+                ncvar[UnlimIndex, :, :] = naned
+            elif len(val_shape) == 3:
+                ncvar[UnlimIndex, :, :, :] = naned
+            else:
+                print('WARNING: dimension not supported')
+        else:
+            ncvar[:] = naned
+    else:
+        print(('WARNING type "{}" is unknown'.format(val_type)))
+    return
+# }}}
+
+
+def GetDim(NCData, val_shape, val_type, DimDict, val_dim):  #{{{
+    # ============================================================================
+    # retriev the dimension tuple from a dictionnary
+    # ============================================================================
+    output = []
+    if val_type in [collections.OrderedDict, dict]:  # dealling with a dictionnary
+        try:
+            output = [str(DimDict[val_shape])]  # first try to get the coresponding dimension if ti exists
+            output = output + [DimDict[2]]  # DictDummy is 2 to treat with dict
+        except KeyError:
+            index = len(DimDict) + 1  # if the dimension does not exist, increment naming
+            NewDim = NCData.createDimension('DimNum' + str(index), val_shape)  # create dimension
+            DimDict[len(NewDim)] = 'DimNum' + str(index)  # and update the dimension dictionary
+            output = [str(DimDict[val_shape])] + [DimDict[2]]  # now proceed with the shape of the value
+    else:
+        # loop on dimensions
+        for dim in range(0, val_dim):  # loop on the dimensions
+            try:
+                output = output + [str(DimDict[val_shape[dim]])]  # test if the dimension allready exist
+            except KeyError:  # if not create it
+                if (val_shape[dim]) > 0:
+                    index = len(DimDict) + 1
+                    NewDim = NCData.createDimension('DimNum' + str(index), (val_shape[dim]))
+                    DimDict[len(NewDim)] = 'DimNum' + str(index)
+                    output = output + [str(DimDict[val_shape[dim]])]
+    return tuple(output), DimDict
+# }}}
+
+
+def SqueezeVar(Var):  # {{{
+    vardim = len(np.shape(Var))
+    if vardim > 1:
+        Var = np.squeeze(Var)
+
+    return Var
+# }}}
+
+
+def grow(self, row):  # {{{
+    np.append(self.data, row)
+# }}}
Index: /issm/trunk-jpl/src/m/netCDF/read_netCDF.m
===================================================================
--- /issm/trunk-jpl/src/m/netCDF/read_netCDF.m	(revision 27793)
+++ /issm/trunk-jpl/src/m/netCDF/read_netCDF.m	(revision 27793)
@@ -0,0 +1,110 @@
+function self=read_netCDF(filename)
+
+	% Different types in the netcdf standard are:
+	%   2 for char
+	%   4 for integer
+	%   6 for doubles	
+
+	ncid=netcdf.open(filename,'NC_NOWRITE');
+	groupIDs=netcdf.inqGrps(ncid);%retrieve group IDs
+	self=model;
+	%loop on groups
+	for i=1:length(groupIDs)
+		whichclass = netcdf.getAtt(groupIDs(i),netcdf.getConstant('NC_GLOBAL'),'classtype');
+		groupName = netcdf.inqGrpName(groupIDs(i));		
+		%results needs a special treatment as it is a structure
+		if strcmp(whichclass,'results'),
+			subgroupIDs=netcdf.inqGrps(groupIDs(i));%retrieve group IDs
+			%define the model structure
+			self=setfield(self,groupName,struct);
+			for j=1:length(subgroupIDs)
+				subclass = netcdf.getAtt(subgroupIDs(j),netcdf.getConstant('NC_GLOBAL'),'classtype');
+				self.results=setfield(self.results,subclass,struct);
+				[ndims nvar natts]=netcdf.inq(subgroupIDs(j));
+				varIDs=netcdf.inqVarIDs(subgroupIDs(j));
+				%first loop on group atributes
+				for k=1:natts,
+					attname = netcdf.inqAttName(subgroupIDs(j),netcdf.getConstant('NC_GLOBAL'),k-1);
+					[xtype,attlen] = netcdf.inqAtt(subgroupIDs(j),netcdf.getConstant('NC_GLOBAL'),attname);
+					disp(sprintf('In %s, Treating attribute %s of type %i',subclass,attname,xtype));
+					%classtype have done is job, no need to keep it any more
+					if ~strcmp(attname,'classtype'),
+						attval=netcdf.getAtt(subgroupIDs(j),netcdf.getConstant('NC_GLOBAL'),attname);
+						if strcmp(attval,'False'),
+							self.(groupName).(subclass).(attname)=false;
+						elseif strcmp(attval,'True')
+							self.(groupName).(subclass).(attname)=true;
+						else
+							self.(groupName).(subclass).(attname)=attval;
+						end
+					end
+				end
+				%now loop on variable in group
+				count=0;
+				for k=1:length(varIDs),
+					[varname, xtype, varDimIDs, varAtts] =netcdf.inqVar(subgroupIDs(j),varIDs(k));
+					disp(sprintf('In %s, Treating variable %s of type %i',whichclass,varname,xtype));
+					%time dimension seems to be last in our construction
+					for l=1:length(varDimIDs),
+						[dimname, dimlen] = netcdf.inqDim(ncid,varDimIDs(l));
+						count(l)=[dimlen];
+					end
+					startpoint=zeros(size(varDimIDs));
+					timestep=count(end);
+					count(end)=1;
+					for l=1:timestep,
+						data=netcdf.getVar(subgroupIDs(j),varIDs(k),startpoint,count);
+						self.(groupName).(subclass)(l).(varname)=data;
+						startpoint(end)=startpoint(end)+1;
+						self.(groupName).(subclass)(l).('errlog')='';
+						self.(groupName).(subclass)(l).('outlog')='';
+						self.(groupName).(subclass)(l).('SolutionType')=subclass;
+					end
+					count=0;
+				end
+			end
+		else,
+			%define the model structure
+			self.(groupName)=eval(whichclass);
+			varIDs=netcdf.inqVarIDs(groupIDs(i));
+			[ndims nvar natts]=netcdf.inq(groupIDs(i));
+			%first loop on group atributes
+			for j=1:natts,
+				attname = netcdf.inqAttName(groupIDs(i),netcdf.getConstant('NC_GLOBAL'),j-1);
+				[xtype,attlen] = netcdf.inqAtt(groupIDs(i),netcdf.getConstant('NC_GLOBAL'),attname);
+				disp(sprintf('In %s, Treating attribute %s of type %i',whichclass,attname,xtype));
+				%classtype have done is job, no need to keep it any more
+				if ~strcmp(attname,'classtype'),
+					attval=netcdf.getAtt(groupIDs(i),netcdf.getConstant('NC_GLOBAL'),attname);
+					if strcmp(attval,'False'),
+						self.(groupName).(attname)=false;
+					elseif strcmp(attval,'True')
+						self.(groupName).(attname)=true;
+					else
+						self.(groupName).(attname)=attval;
+					end
+				end
+			end
+			%now loop on variable in group
+			for j=1:length(varIDs),
+				[varname, xtype, varDimIDs, varAtts] =netcdf.inqVar(groupIDs(i),varIDs(j));
+				disp(sprintf('In %s, Treating variable %s of type %i',whichclass,varname,xtype));			
+				%if the value is a single string, we need to transpose it (cross check with python file is necessary)
+				if xtype==2
+					varval=netcdf.getVar(groupIDs(i),varIDs(j))';
+					varval=cellstr(varval)';
+					if strcmp(varval{1},'emptystruct'),
+						self.(groupName).(varname)=struct;
+					elseif strcmp(varval{1},'emptycell'),
+						self.(groupName).(varname)=cell(0,0);
+					else
+						self.(groupName).(varname)=varval;
+					end
+				else
+					self.(groupName).(varname)=netcdf.getVar(groupIDs(i),varIDs(j));
+				end
+			end
+		end
+	end
+	netcdf.close(ncid)
+end
Index: /issm/trunk-jpl/src/m/netCDF/read_netCDF.py
===================================================================
--- /issm/trunk-jpl/src/m/netCDF/read_netCDF.py	(revision 27793)
+++ /issm/trunk-jpl/src/m/netCDF/read_netCDF.py	(revision 27793)
@@ -0,0 +1,22 @@
+from netCDF4 import Dataset
+from os import path
+
+
+def netCDFRead(filename):
+    def walktree(data):
+        keys = list(data.groups.keys())
+        yield keys
+        for key in keys:
+            for children in walktree(data.groups[str(key)]):
+                yield children
+
+    if path.exists(filename):
+        print(('Opening {} for reading '.format(filename)))
+        NCData = Dataset(filename, 'r')
+        class_dict = {}
+
+        for children in walktree(NCData):
+            for child in children:
+                class_dict[str(child)] = str(getattr(NCData.groups[str(child)], 'classtype') + '()')
+
+        print(class_dict)
Index: /issm/trunk-jpl/src/m/netCDF/restable.m
===================================================================
--- /issm/trunk-jpl/src/m/netCDF/restable.m	(revision 27793)
+++ /issm/trunk-jpl/src/m/netCDF/restable.m	(revision 27793)
@@ -0,0 +1,60 @@
+classdef restable
+	properties(SetAccess=public)
+		data=[];
+		sizes=[];
+	end
+	methods
+		function self = update(self, stepvar)
+			if length(stepvar) == 1
+				%if we have a scalar we just add it to the end
+				%we save the size of the current step for further treatment
+				self.sizes=[self.sizes;1];
+				self.data=[self.data;stepvar];
+			else
+				% if it is an array we add the values one by one
+				%we save the size of the current step for further treatment
+				self.sizes=[self.sizes;fliplr(size(stepvar))];
+				%we need to transpose to follow the indexing
+				flatdata=reshape(stepvar', 1, []);
+				self.data=[self.data,flatdata];
+			end
+		end
+		function outdat = finalize(self, rows)
+			if length(self.data)>rows,
+				if size(self.sizes, 1)==1,
+					%just one step, data don't need treatment
+					outdat=self.data;
+				else,
+					%we have more scalars than steps, so we have an array
+					maxsize=[];
+					for d=1:size(self.sizes,2)
+						maxsize=[maxsize,max(self.sizes(:,d))];
+					end
+					findim=[maxsize, rows];
+					%first check if all steps are the same size
+					SameSize = sum(self.sizes - self.sizes(1, :))==0;
+					if SameSize,
+						%same size for all steps, just reshape
+						outdat=reshape(self.data, findim);
+					else,
+						%different sizes at each steps, first create a table big enough for the biggest step
+						startpoint=1;
+						datadim=ndims(self.data);
+						outdat=nan(findim);
+						for r=1:rows
+							curlen = prod(self.sizes(r, :));
+							outdat(1:self.sizes(r,1), 1:self.sizes(r,2), r) = reshape(self.data(startpoint:startpoint+ ...
+													  curlen-1),self.sizes(r,:));
+							startpoint = startpoint+curlen;
+						end
+
+					end
+				end,
+			else,
+				%as much scalars as steps (or less) so just one value per step
+				outdat=self.data;
+
+			end
+		end
+	end
+end
Index: /issm/trunk-jpl/src/m/paraview/exportVTK.m
===================================================================
--- /issm/trunk-jpl/src/m/paraview/exportVTK.m	(revision 27793)
+++ /issm/trunk-jpl/src/m/paraview/exportVTK.m	(revision 27793)
@@ -0,0 +1,197 @@
+function exportVTK(filename,model,varargin)
+%EXPORTVTK -  vtk export
+%
+%   function exportVTK(filename,model)
+%   creates a directory with the vtk files for displays in paraview
+%   (only work for triangle and wedges based on their number of nodes)
+%   By default only the results are exported, you can add whichever
+%   field you need as a string:
+%   add 'geometry' to export md.geometry
+%
+%   USAGE:
+%      exportVTK(filename,model,varargin)
+%
+%   EXAMPLE:
+%      exportVTK('ResultSimulation1',md)
+
+[path,name,ext]=fileparts(filename);
+separator=filesep;
+mkdir(filename);
+
+%get the element related variables
+if dimension(model.mesh)==2,
+	points=[model.mesh.x model.mesh.y zeros(model.mesh.numberofvertices,1)];
+else
+	points=[model.mesh.x model.mesh.y model.mesh.z];
+end
+
+[num_of_points,dim]=size(points);
+[num_of_elt]=size(model.mesh.elements,1);
+[point_per_elt]=size(model.mesh.elements,2);
+
+%Select the type of element function of the number of nodes per elements
+if point_per_elt==3;
+	celltype=5; %triangles
+elseif point_per_elt==6;
+	celltype=13; %wedges
+else
+	error('Your Element definition is not taken into account \n');
+end
+
+%this is the result structure
+res_struct=model.results;
+%checking for results
+if (length(fields(res_struct))>0);
+	%Getting all the solutions of the model
+	solnames=fields(res_struct);
+	num_of_sols=length(solnames);
+	num_of_timesteps=1;
+	%building solution structure 
+	for i=1:num_of_sols
+		sol_struct{i}=res_struct.(solnames{i});
+		%looking for multiple time steps
+		if(size(sol_struct{i},2)>num_of_timesteps);
+			num_of_timesteps=size(sol_struct{i},2);
+			if isa(model.timestepping,'timesteppingadaptive')
+				disp('Warning: timesteppingadaptive not totally supported!');				
+			elseif isa(model.timestepping,'timestepping')
+				outstep=model.timestepping.time_step*model.settings.output_frequency;
+			else
+				error('timestepping class not supported!');
+			end
+		end
+	end
+else
+	num_of_timesteps=1;
+end
+for step=1:num_of_timesteps;
+	
+	timestep=step;
+
+	fid = fopen(strcat(path,filesep,name,filesep,'timestep.vtk',int2str(timestep),'.vtk'),'w+');
+	fprintf(fid,'# vtk DataFile Version 2.0 \n');
+	fprintf(fid,'Data for run %s \n',model.miscellaneous.name);
+	fprintf(fid,'ASCII \n');
+	fprintf(fid,'DATASET UNSTRUCTURED_GRID \n');
+	
+	fprintf(fid,'POINTS %d float\n',num_of_points);
+	if(dim==3);
+		s='%f %f %f \n';
+	elseif(dim==2);
+		s='%f %f \n';
+  end
+	P=[points zeros(num_of_points,3-dim)];
+	fprintf(fid,s,P');
+	
+	fprintf(fid,'CELLS %d %d\n',num_of_elt,num_of_elt*(point_per_elt+1));
+	s='%d';
+	for j=1:point_per_elt
+		s=horzcat(s,{' %d'});
+  end
+	s=cell2mat(horzcat(s,{'\n'}));
+		fprintf(fid,s,[(point_per_elt)*ones(num_of_elt,1)	model.mesh.elements-1]');
+	
+	fprintf(fid,'CELL_TYPES %d\n',num_of_elt);
+	s='%d\n';
+	fprintf(fid,s,celltype*ones(num_of_elt,1));
+	fprintf(fid,'POINT_DATA %s \n',num2str(num_of_points));
+
+	%loop over the different solution structures
+	if (exist('num_of_sols'));
+		for j=1:num_of_sols
+			%dealing with results on different timesteps
+			if(size(sol_struct{j},2)>timestep);
+				timestep = step;
+			else
+				timestep = size(sol_struct{j},2);
+	    end
+			
+			%getting the number of fields in the solution
+			fieldnames=fields(sol_struct{j}(timestep));
+			num_of_fields=length(fieldnames);
+			
+			%check which field is a real result and print
+			for k=1:num_of_fields
+				if ((numel(sol_struct{j}(timestep).(fieldnames{k})))==num_of_points);
+					%paraview does not like NaN, replacing
+					nanval=find(isnan(sol_struct{j}(timestep).(fieldnames{k})));
+					sol_struct{j}(timestep).(fieldnames{k})(nanval)=-9999;
+					%also checking for verry small value that mess up
+					smallval=(abs(sol_struct{j}(timestep).(fieldnames{k}))<1.0e-20);
+					sol_struct{j}(timestep).(fieldnames{k})(smallval)=0.0;
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					s='%e\n';
+					fprintf(fid,s,sol_struct{j}(timestep).(fieldnames{k}));
+		    end		
+	    end 
+	  end
+  end
+	%loop on arguments, if something other than result is asked, do
+	%it now
+	for j= 1:nargin-2
+		res_struct=model.(varargin{j});
+		fieldnames=fields(res_struct);
+		num_of_fields=length(fieldnames);
+		for k=1:num_of_fields
+			if ((numel(res_struct.(fieldnames{k})))==num_of_points);
+				%paraview does not like NaN, replacing
+				nanval=find(isnan(res_struct.(fieldnames{k})));
+				res_struct.(fieldnames{k})(nanval)=-9999;
+				%also checking for verry small value that mess up
+				smallval=(abs(res_struct.(fieldnames{k}))<1.0e-20);
+				res_struct.(fieldnames{k})(smallval)=0.0;
+				fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+				fprintf(fid,'LOOKUP_TABLE default\n');
+				s='%e\n';
+				fprintf(fid,s,res_struct.(fieldnames{k}));
+				%check for forcings	
+			elseif (size(res_struct.(fieldnames{k}),1)==num_of_points+1);
+				%paraview does not like NaN, replacing
+				nanval=find(isnan(res_struct.(fieldnames{k})));
+				res_struct.(fieldnames{k})(nanval)=-9999;
+				%also checking for verry small value that mess up
+				smallval=(abs(res_struct.(fieldnames{k}))<1.0e-20);
+				res_struct.(fieldnames{k})(smallval)=0.0;
+				if (size(res_struct.(fieldnames{k}),2)==num_of_timesteps),
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					s='%e\n';
+					fprintf(fid,s,res_struct.(fieldnames{k})(1:end-1,timestep));
+				else,
+					%forcing and results not on the same timestep,need some treatment
+					fprintf(fid,'SCALARS %s float 1 \n',fieldnames{k});
+					fprintf(fid,'LOOKUP_TABLE default\n');
+					index=1;
+					currenttime=((timestep-1)*outstep)+model.timestepping.start_time;
+					while (res_struct.(fieldnames{k})(end,index)<=currenttime);
+						if index==size(res_struct.(fieldnames{k}),2)
+							break
+						end	
+						index=index+1;
+		      end
+					uptime=res_struct.(fieldnames{k})(end,index);
+					uplim=res_struct.(fieldnames{k})(1:end-1,index);
+					while (res_struct.(fieldnames{k})(end,index)>=currenttime);
+						if index==1
+							break
+			      end
+						index=index-1;
+		      end
+					lowtime=res_struct.(fieldnames{k})(end,index);
+					lowlim=res_struct.(fieldnames{k})(1:end-1,index);
+					if uptime==currenttime,
+						interp=uplim;
+					elseif lowtime==currenttime,
+						interp=lowlim;
+					else
+						interp=lowlim+(uplim-lowlim)*((currenttime-lowtime)/(uptime-lowtime));
+					end
+					s='%e\n';
+					fprintf(fid,s,interp);
+				end
+		  end		
+		end 
+	end
+	fclose(fid);
+end
Index: /issm/trunk-jpl/src/m/paraview/exportVTK.py
===================================================================
--- /issm/trunk-jpl/src/m/paraview/exportVTK.py	(revision 27793)
+++ /issm/trunk-jpl/src/m/paraview/exportVTK.py	(revision 27793)
@@ -0,0 +1,537 @@
+import numpy as np
+from os import path, remove, mkdir
+from glob import glob
+
+
+def exportVTK(filename, md, *args, enveloppe=False, **kwargs):
+    '''
+    vtk export
+    function exportVTK(filename, md)
+    creates a directory with the vtk files for displays in paraview
+    (only work for triangle and wedges based on their number of nodes)
+
+    Usage:
+    exportVTK('DirName', md)
+    exportVTK('DirName', md, 'geometry', 'mesh')
+    exportVTK('DirName', md, 'geometry', 'mesh', enveloppe = True)
+
+    DirName is the name of the output directory, each timestep then has it
+    own file ('Timestep.vtkX.vtk') with X the number of the output step
+    enveloppe is an option keeping only the enveloppe of the md (it is False by default)
+
+    Options:
+        - clipping : allows to reduce your domain (cliping=[Xmin, Xmax, Ymin, Ymax])
+        - coarsetime : output one timestep every X (coarsetime=X, with X an integer)
+        - singletime : output only timestep X (singletime=X, with X an integer or -1 for last)
+
+    TODO: - make time easily accessible
+
+    Basile de Fleurian:
+    '''
+    #verbosity of the code, 0 is no messages, 5 is chatty
+    verbose = 0
+
+    print("""
+    =========================================
+    #     A                                 #
+    #    / \      exportVTK is now obsolete #
+    #   / | \     You should use export VTU #
+    #  /  |  \    faster, smaller files     #
+    # /   o   \   and more capacities       #
+    # ---------                             #
+    #========================================
+    """)
+
+
+    for key in kwargs.keys():
+        if key not in ['clipping', 'coarsetime', 'singletime']:
+            raise BadOption('Provided option "{}" is not supported possibilities are : {}'.format(key, ['cliping', 'coarsetime', 'singletime']))
+
+    if 'coarsetime' in kwargs.keys() and 'singletime' in kwargs.keys():
+        raise BadOption("You can't specify both 'coarsetime' and 'singletime'")
+
+    # File checking and creation {{{
+    Dir = path.basename(filename)
+    Path = filename[:-len(Dir)]
+    if path.exists(filename):
+        print(('File {} allready exist'.format(filename)))
+        newname = input('Give a new name or "delete" to replace: ')
+        if newname == 'delete':
+            filelist = glob(filename + '/* ')
+            for oldfile in filelist:
+                remove(oldfile)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+            mkdir(filename)
+    else:
+        mkdir(filename)
+    # }}}
+
+    # this is the result structure {{{
+    if verbose > 3:
+        print('Getting accessorie variables')
+    res_struct = md.results
+    moving_mesh = False
+    if(type(res_struct) != list):
+        #Getting all the solutions of the md
+        solnames = dict.keys(res_struct.__dict__)
+        num_of_sols = len(solnames)
+        num_of_timesteps = 1
+        #%building solutionstructure
+        for solution in solnames:
+            #looking for multiple time steps
+            try:
+                if len(res_struct.__dict__[solution]) > num_of_timesteps:
+                    num_of_timesteps = len(res_struct.__dict__[solution])
+                    num_of_timesteps = int(num_of_timesteps)
+                    if 'Surface' in dict.keys(res_struct.__dict__[solution][0].__dict__):
+                        moving_mesh = True
+            except TypeError:
+                continue
+    else:
+        num_of_timesteps = 1
+    # }}}
+
+    # get the element related variables {{{
+    if verbose > 3:
+        print('Now treating  the mesh')
+    #first get the general things
+    dim = int(md.mesh.domaintype()[0])
+    every_nodes = md.mesh.numberofvertices
+    every_cells = md.mesh.numberofelements
+    try:
+        every_edges = md.mesh.numberofedges
+    except AttributeError:
+        #3D meshes do not have edges
+        every_edges = 0
+
+    if np.shape(md.mesh.elements)[1] == 3 or enveloppe:
+        point_per_elt = 3
+        celltype = 5  #triangles
+    elif np.shape(md.mesh.elements)[1] == 6:
+        point_per_elt = 6
+        celltype = 13  #wedges
+    else:
+        raise BadDimension('exportVTK does not support your element type')
+
+    #only keep the envelope and not the bulk of the results.
+    if enveloppe:
+        if dim == 3:
+            mesh_alti = '1'
+            is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface)
+            enveloppe_index = np.where(is_enveloppe)[0]
+            convert_index = np.nan * np.ones(np.shape(md.mesh.x))
+            convert_index = np.asarray([[i, np.where(enveloppe_index == i)[0][0]] for i, val in enumerate(convert_index) if any(enveloppe_index == i)])
+
+            num_of_points = np.size(enveloppe_index)
+            points = np.column_stack((md.mesh.x[enveloppe_index],
+                                      md.mesh.y[enveloppe_index],
+                                      md.mesh.z[enveloppe_index]))
+
+            num_of_elt = np.size(np.where(np.isnan(md.mesh.lowerelements))) + np.size(np.where(np.isnan(md.mesh.upperelements)))
+            connect = md.mesh.elements[np.where(is_enveloppe[md.mesh.elements - 1])].reshape(int(num_of_elt), 3) - 1
+            for elt in range(0, num_of_elt):
+                connect[elt, 0] = convert_index[np.where(convert_index == connect[elt, 0])[0], 1][0]
+                connect[elt, 1] = convert_index[np.where(convert_index == connect[elt, 1])[0], 1][0]
+                connect[elt, 2] = convert_index[np.where(convert_index == connect[elt, 2])[0], 1][0]
+
+            num_of_edges = every_edges  #looks like edges is only defined on the 2d mesh
+            if num_of_edges > 0:
+                edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+
+        else:
+            raise BadDimension("exportVTK can't get an enveloppe for  dimension {}".format(dim))
+
+    else:
+        #we get all the mesh, mainly defining dummies
+        num_of_elt = every_cells
+        connect = md.mesh.elements - 1
+        num_of_edges = every_edges
+        if num_of_edges > 0:
+            edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+        enveloppe_index = np.arange(0, np.size(md.mesh.x))
+        num_of_points = every_nodes
+        if dim == 2:
+            mesh_alti = input('''This is a 2D model, what should be the 3rd dimension of the mesh :
+                                        1 : md.geometry.surface
+                                        2 : md.geometry.base
+                                        3 : md.geometry.bed
+                                        4 : 0
+                                        5 : Custom\n''')
+            if mesh_alti == '1':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+            elif mesh_alti == '2':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.base))
+            elif mesh_alti == '3':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.bed))
+            elif mesh_alti == '4':
+                points = np.column_stack((md.mesh.x, md.mesh.y, 0. * md.mesh.x))
+            elif mesh_alti == '5':
+                alti_field = input("Which field should be used as 3rd dimension: ")
+                alti_var = eval(alti_field)
+                if np.shape(np.squeeze(alti_var)) == np.shape(md.mesh.x):
+                    points = np.column_stack((md.mesh.x, md.mesh.y, np.squeeze(alti_var)))
+                else:
+                    raise BadDimension('field given for 3rd dimension should be defined on vertices {} is not.'.format(alti_field))
+            else:
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+        elif dim == 3:
+            mesh_alti = '1'
+            points = np.column_stack((md.mesh.x, md.mesh.y, md.mesh.z))
+        else:
+            raise BadDimension('exportVTK does not support dimension {}'.format(dim))
+
+    if 'clipping' in kwargs.keys():
+        if kwargs['clipping'] is not None:
+            # first get the boundaries and check them
+            [Xmin, Xmax, Ymin, Ymax] = kwargs['clipping']
+            if Xmin > Xmax:
+                raise ClipError('Xmax ({}) should be larger than Xmin ({})'.format(Xmax, Xmin))
+            if Ymin > Ymax:
+                raise ClipError('Ymax ({}) should be larger than Ymin ({})'.format(Ymax, Ymin))
+            if Xmin > np.nanmax(points[:, 0]) or Xmax < np.nanmin(points[:, 0]):
+                raise ClipError('Your X boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Xmin, Xmax, np.nanmin(points[:, 0]), np.nanmax(points[:, 0])))
+            if Ymin > np.nanmax(points[:, 1]) or Ymax < np.nanmin(points[:, 1]):
+                raise ClipError('Your Y boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Ymin, Ymax, np.nanmin(points[:, 1]), np.nanmax(points[:, 1])))
+
+            #boundaries should be fine lets do stuff
+            InX = np.where(np.logical_and(points[:, 0] >= Xmin, points[:, 0] <= Xmax))
+            InY = np.where(np.logical_and(points[:, 1] >= Ymin, points[:, 1] <= Ymax))
+
+            Isinside = np.zeros(np.shape(points)[0], dtype=bool)
+            clip_convert_index = np.nan * np.ones(np.shape(points)[0])
+
+            #define the vertices that are within clipping window
+            Inclipping = np.intersect1d(InX, InY)
+            Isinside[Inclipping] = True
+            points = points[Inclipping, :]
+            num_of_points = np.shape(points)[0]
+
+            #go thorough the elements and keep those for which one node is in the clipped arrea
+            clipconnect = np.asarray([], dtype=int)
+            for elt in connect:
+                if set(elt).issubset(Inclipping):
+                    clipconnect = np.append(clipconnect, elt, axis=0)
+
+            #reshape
+            num_of_elt = int(np.size(clipconnect) / 3)
+            connect = clipconnect.reshape(num_of_elt, 3)
+
+            clip_convert_index = np.asarray([[i, np.where(Inclipping == i)[0][0]] for i, val in enumerate(clip_convert_index) if any(Inclipping == i)])
+            enveloppe_index = enveloppe_index[clip_convert_index[:, 0]]
+
+            #convert indexing and exclude elements that are partly outside of the region
+            for elt in range(0, num_of_elt):
+                try:
+                    connect[elt, 0] = clip_convert_index[np.where(clip_convert_index == connect[elt, 0])[0], 1][0]
+                except IndexError:
+                    connect[elt, 0] = -1
+                try:
+                    connect[elt, 1] = clip_convert_index[np.where(clip_convert_index == connect[elt, 1])[0], 1][0]
+                except IndexError:
+                    connect[elt, 1] = -1
+                try:
+                    connect[elt, 2] = clip_convert_index[np.where(clip_convert_index == connect[elt, 2])[0], 1][0]
+                except IndexError:
+                    connect[elt, 2] = -1
+
+            connect = connect[np.where(connect != -1)[0], :]
+            num_of_elt = np.shape(connect)[0]
+
+            if num_of_edges > 0:
+                clipedges = np.asarray([], dtype=int)
+                for edge in edges:
+                    if set(edge).issubset(Inclipping):
+                        clipedges = np.append(clipedges, edge, axis=0)
+
+                num_of_edges = int(np.size(clipedges) / 2)
+                edges = clipedges.reshape(num_of_edges, 2)
+
+                for edge in range(0, num_of_edges):
+                    try:
+                        edges[edge, 0] = clip_convert_index[np.where(clip_convert_index == edges[edge, 0])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 0] = -1
+                    try:
+                        edges[edge, 1] = clip_convert_index[np.where(clip_convert_index == edges[edge, 1])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 1] = -1
+                edges = edges[np.where(edges != -1)[0], :]
+                num_of_edges = np.shape(edges)[0]
+
+    # }}}
+
+    # write header and mesh {{{
+    if verbose > 3:
+        print('Now starting to write stuff')
+
+    if 'coarsetime' in kwargs.keys():
+        steplist = range(0, num_of_timesteps, kwargs['coarsetime'])
+    elif 'singletime' in kwargs.keys():
+        steplist = [kwargs['singletime']]
+    else:
+        steplist = range(0, num_of_timesteps)
+
+    for step in steplist:
+        if verbose > 2:
+            print('Writing for step {}'.format(step))
+        saved_cells = {}
+        saved_edges = {}
+        timestep = step
+        with open((filename + '/Timestep.vtk' + str(timestep) + '.vtk'), 'w+') as fid:
+            fid.write('# vtk DataFile Version 3.0 \n')
+            fid.write('Data for run {} \n'.format(md.miscellaneous.name))
+            fid.write('ASCII \n')
+            fid.write('DATASET UNSTRUCTURED_GRID \n')
+            fid.write('POINTS {:d} float\n'.format(num_of_points))
+            #updating z for mesh evolution
+            if moving_mesh and mesh_alti in ['1', '2']:
+                base = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Base'][enveloppe_index])
+                thick_change_ratio = (np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Thickness'][enveloppe_index]) / md.geometry.thickness[enveloppe_index])
+                above_bed = points[:, 2] - md.geometry.base[enveloppe_index]
+                altitude = base + thick_change_ratio * above_bed
+            else:
+                altitude = points[:, 2]
+            for index, point in enumerate(points):
+                fid.write('{:f} {:f} {:f} \n'.format(point[0], point[1], altitude[index]))
+
+            fid.write('CELLS {:d} {:d}\n'.format((num_of_elt + num_of_edges), num_of_elt  * (point_per_elt + 1) + num_of_edges * 3))
+
+            for elt in range(0, num_of_elt):
+                if celltype == 5:
+                    fid.write('3 {:d} {:d} {:d}\n'.format(connect[elt, 0],
+                                                          connect[elt, 1],
+                                                          connect[elt, 2]))
+                elif celltype == 13:
+                    fid.write('6 {:d} {:d} {:d} {:d} {:d} {:d}\n'.format(connect[elt, 0],
+                                                                         connect[elt, 1],
+                                                                         connect[elt, 2],
+                                                                         connect[elt, 3],
+                                                                         connect[elt, 4],
+                                                                         connect[elt, 5]))
+            for edge in range(0, num_of_edges):
+                fid.write('2 {:d} {:d}\n'.format(edges[edge, 0],
+                                                 edges[edge, 1]))
+
+            fid.write('CELL_TYPES {:d}\n'.format(num_of_elt + num_of_edges))
+            for elt in range(0, num_of_elt):
+                fid.write('{:d}\n'.format(celltype))
+                for edge in range(0, num_of_edges):
+                    fid.write('3\n')  #3 is for lines
+
+            fid.write('POINT_DATA {:s} \n'.format(str(num_of_points)))
+            # }}}
+            # {{{loop over the different solution structures
+            # first check if there are solutions to grab
+            if 'solnames' in locals():
+                for sol in solnames:
+                    treated_res = []
+                    #dealing with results on different timesteps
+                    try:
+                        if(len(res_struct.__dict__[sol]) > timestep):
+                            timestep = step
+                        else:
+                            timestep = np.size(res_struct.__dict__[sol])
+                    except TypeError:
+                        #result as no len() so no timesteps
+                        timestep = 1
+
+                    #getting the  fields in the solution
+                    if(type(res_struct.__dict__[sol]).__name__ == 'solution'):
+                        spe_res_struct = res_struct.__dict__[sol].__getitem__(timestep)
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    elif(type(res_struct.__dict__[sol]).__name__ == 'solutionstep'):
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    elif(type(res_struct.__dict__[sol]).__name__ == 'results'):  #this is a result without steps
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                    else:
+                        print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                        spe_res_struct = res_struct.__dict__[sol]
+                        fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                    #Sorting scalars, vectors and tensors
+                    tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']]
+                    non_tensor = [field for field in fieldnames if field not in tensors]
+                    vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z'] and field[-4:] not in ['Flux']]
+                    #check which field is a real result and print
+                    for field in fieldnames:
+                        if verbose > 2:
+                            print("Treating {}".format(field))
+                        if field in treated_res:
+                            if verbose > 2:
+                                print("{} is already done".format(field))
+                            continue
+                        elif field in vectors:
+                            if verbose > 2:
+                                print("{} is a vector".format(field))
+                            try:
+                                Vxstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'x'])
+                                Vystruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'y'])
+                                treated_res += [field[:-1] + 'x', field[:-1] + 'y']
+                                if dim == 3 and field[:-1] + 'z' in fieldnames:
+                                    #some fields like adjoint or always 2D
+                                    Vzstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'z'])
+                                    treated_res += [field[:-1] + 'z']
+
+                            except KeyError:
+                                fieldnames += field
+                                vectors.remove(field)
+
+                            fid.write('VECTORS {} float \n'.format(field[:-1]))
+                            for node in range(0, num_of_points):
+                                Vx = cleanOutliers(Vxstruct[enveloppe_index[node]])
+                                Vy = cleanOutliers(Vystruct[enveloppe_index[node]])
+                                if dim == 3 and field[:-1] + 'z' in fieldnames:
+                                    Vz = cleanOutliers(Vzstruct[enveloppe_index[node]])
+                                    fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, Vz))
+                                else:
+                                    fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, 0))
+
+                        elif field in tensors:
+                            if verbose > 2:
+                                print("{} is a tensor".format(field))
+                            try:
+                                Txxstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xx'])
+                                Txystruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xy'])
+                                Tyystruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'yy'])
+                                treated_res += [field[:-2] + 'xx', field[:-2] + 'xy', field[:-2] + 'yy']
+                                if dim == 3:
+                                    Tzzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'zz'])
+                                    Txzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'xz'])
+                                    Tyzstruct = np.squeeze(spe_res_struct.__dict__[field[:-2] + 'yz'])
+                                    treated_res += [field[:-2] + 'zz', field[:-2] + 'xz', field[:-2] + 'yz']
+
+                            except KeyError:
+                                fieldnames += field
+                                tensors.remove(field)
+
+                            fid.write('TENSORS {} float \n'.format(field[:-2]))
+                            for node in range(0, num_of_points):
+                                Txx = cleanOutliers(Txxstruct[enveloppe_index[node]])
+                                Tyy = cleanOutliers(Tyystruct[enveloppe_index[node]])
+                                Txy = cleanOutliers(Txystruct[enveloppe_index[node]])
+                                if dim == 3:
+                                    Tzz = cleanOutliers(Tzzstruct[enveloppe_index[node]])
+                                    Txz = cleanOutliers(Txzstruct[enveloppe_index[node]])
+                                    Tyz = cleanOutliers(Tyzstruct[enveloppe_index[node]])
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txx, Txy, Txz))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txy, Tyy, Tyz))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txz, Tyz, Tzz))
+                                elif dim == 2:
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txx, Txy, 0))
+                                    fid.write('{:f} {:f} {:f}\n'.format(Txy, Tyy, 0))
+                                    fid.write('{:f} {:f} {:f}\n'.format(0, 0, 0))
+                        else:
+                            if np.size(spe_res_struct.__dict__[field]) == 1:
+                                if field == 'time':
+                                    current_time = spe_res_struct.__dict__[field]
+                                    #skipping integers
+                                continue
+                            elif np.size(spe_res_struct.__dict__[field]) == every_nodes:
+                                fid.write('SCALARS {} float 1 \n'.format(field))
+                                fid.write('LOOKUP_TABLE default\n')
+                                for node in range(0, num_of_points):
+                                    outval = cleanOutliers(np.squeeze(spe_res_struct.__dict__[field][enveloppe_index[node]]))
+                                    fid.write('{:f}\n'.format(outval))
+                            elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_cells:
+                                saved_cells[field] = np.squeeze(spe_res_struct.__dict__[field])
+                            elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_edges:
+                                saved_edges[field] = np.squeeze(spe_res_struct.__dict__[field])
+                            else:
+                                print("format for field {}.{} is not suported, field is skipped".format(sol, field))
+            # }}}
+            # loop on arguments, if something other than result is asked, do it now {{{
+            for other in args:
+                other_struct = md.__dict__[other]
+                othernames = (dict.keys(other_struct.__dict__))
+                for field in othernames:
+                    if np.size(other_struct.__dict__[field]) == 1:
+                        #skipping integers
+                        continue
+                    elif np.size(other_struct.__dict__[field]) == every_nodes:
+                        fid.write('SCALARS {} float 1 \n'.format(field))
+                        fid.write('LOOKUP_TABLE default\n')
+                        for node in range(0, num_of_points):
+                            outval = cleanOutliers(other_struct.__dict__[field][enveloppe_index[node]])
+                            fid.write('{:f}\n'.format(outval))
+                    elif np.shape(other_struct.__dict__[field])[0] == every_nodes + 1:
+                        #we are dealing with a forcing of some kind.
+                        forcing_time = other_struct.__dict__[field][-1, :]
+                        if any(forcing_time == current_time):
+                            forcing_index = np.where(forcing_time == current_time)
+                            forcing_val = other_struct.__dict__[field][:, forcing_index]
+                        elif forcing_time[0] > current_time:
+                            forcing_val = other_struct.__dict__[field][:, 0]
+                        elif forcing_time[-1] < current_time:
+                            forcing_val = other_struct.__dict__[field][:, -1]
+                        else:
+                            forcing_index = np.where(forcing_time < current_time)[-1][-1]
+                            delta_time = forcing_time[forcing_index + 1] - forcing_time[forcing_index]  #compute forcing Dt
+                            delta_current = current_time - forcing_time[forcing_index]  # time since last forcing
+                            ratio = delta_current / delta_time  #compute weighting factor for preceding forcing vallue
+                            forcing_evol = (other_struct.__dict__[field][:, forcing_index + 1] - other_struct.__dict__[field][:, forcing_index]) * ratio
+                            forcing_val = other_struct.__dict__[field][:, forcing_index] + forcing_evol
+                        # and now write it down
+                        fid.write('SCALARS {}_{} float 1 \n'.format(other, field))
+                        fid.write('LOOKUP_TABLE default\n')
+                        for node in range(0, num_of_points):
+                            outval = cleanOutliers(forcing_val[enveloppe_index[node]])
+                            fid.write('{:f}\n'.format(outval))
+                    elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_cells:
+                        saved_cells[field] = other_struct.__dict__[field]
+                    elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_edges:
+                        saved_edges[field] = other_struct.__dict__[field]
+                    else:
+                        print("format for field {}.{} is not suported, field is skipped".format(other, field))
+                        continue
+            # }}}
+            # Now writting cell variables {{{
+            if np.size(list(saved_cells.keys())) > 0:
+                fid.write('CELL_DATA {:d} \n'.format(num_of_elt + num_of_edges))
+                for key in list(saved_cells.keys()):
+                    fid.write('SCALARS {} float 1 \n'.format(key))
+                    fid.write('LOOKUP_TABLE default\n')
+                    for cell in range(0, num_of_elt):
+                        outval = cleanOutliers(saved_cells[key][cell])
+                        fid.write('{:f}\n'.format(outval))
+                    for edge in range(0, num_of_edges):
+                        fid.write('{:f}\n'.format(-9999.999))
+            # }}}
+            # Now writting edge variables {{{
+            if np.size(list(saved_edges.keys())) > 0:
+                for key in list(saved_edges.keys()):
+                    fid.write('SCALARS {} float 1 \n'.format(key))
+                    fid.write('LOOKUP_TABLE default\n')
+                    for cell in range(0, num_of_elt):
+                        fid.write('{:f}\n'.format(-9999.999))
+                    for edge in range(0, num_of_edges):
+                        outval = cleanOutliers(saved_edges[key][edge])
+                        fid.write('{:f}\n'.format(outval))
+    # }}}
+
+
+def cleanOutliers(Val):
+    #paraview does not like NaN, replacing
+    if np.isnan(Val):
+        CleanVal = -9999.999
+    #also checking for very small value that mess up
+    elif (abs(Val) < 1.0e-20):
+        CleanVal = 0.0
+    else:
+        CleanVal = Val
+    return CleanVal
+
+
+class BadDimension(Exception):
+    """The required dimension is not supported yet."""
+
+
+class BadOption(Exception):
+    """The given option does not exist."""
+
+
+class ClipError(Exception):
+    """Error while trying to clip the domain."""
Index: /issm/trunk-jpl/src/m/paraview/exportVTU.py
===================================================================
--- /issm/trunk-jpl/src/m/paraview/exportVTU.py	(revision 27793)
+++ /issm/trunk-jpl/src/m/paraview/exportVTU.py	(revision 27793)
@@ -0,0 +1,723 @@
+import numpy as np
+from base64 import b64encode
+from os import path, remove, mkdir
+from glob import glob
+
+
+def exportVTU(filename, md, *args, enveloppe=False, fmtout="binary", **kwargs):
+    '''
+    vtu export
+    function exportVTU(filename, md)
+    Exports resluts in XML based vtu format for visualisation in Paraview.
+    Hopefully it is based on the treatment for export VTK and only the output part is modified.
+    (only work for triangle and wedges based on their number of nodes)
+
+    Usage:
+    exportVTU('FileName', md)
+    exportVTU('FileName', md, 'geometry', 'mesh')
+    exportVTU('FileName', md, 'geometry', 'mesh', enveloppe = True)
+
+    DirName is the name of the output directory, each timestep then has it
+    own file ('Timestep.vtkX.vtk') with X the number of the output step
+    enveloppe is an option keeping only the enveloppe of the md (it is False by default)
+
+    Options:
+        - clipping : allows to reduce your domain (cliping=[Xmin, Xmax, Ymin, Ymax])
+        - coarsetime : output one timestep every X (coarsetime=X, with X an integer)
+        - singletime : output only timestep X (singletime=X, with X an integer or -1 for last)
+
+    TODO: - make time easily accessible
+
+    Basile de Fleurian:
+    '''
+    #verbosity of the code, 0 is no messages, 5 is chatty
+    verbose = 0
+
+    #first check if the user asked for some options to be applied
+    for key in kwargs.keys():
+        if key not in ['clipping', 'coarsetime', 'singletime']:
+            raise BadOption('Provided option "{}" is not supported possibilities are : {}'.format(key, ['cliping', 'coarsetime', 'singletime']))
+
+    if 'coarsetime' in kwargs.keys() and 'singletime' in kwargs.keys():
+        raise BadOption("You can't specify both 'coarsetime' and 'singletime'")
+
+    # File checking and creation {{{
+    Dir = path.basename(filename)
+    if path.exists(filename):
+        print(('File {} allready exist'.format(filename)))
+        newname = input('Give a new name or "delete" to replace: ')
+        if newname == 'delete':
+            filelist = glob(filename + '/* ')
+            for oldfile in filelist:
+                remove(oldfile)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+            mkdir(filename)
+    else:
+        mkdir(filename)
+
+    # }}}
+
+    # make an alias for results {{{
+    if verbose > 3:
+        print('Getting accessory variables')
+    res_struct = md.results
+    moving_mesh = False
+    if(type(res_struct) != list):
+        #Getting all the solutions of the md
+        solnames = dict.keys(res_struct.__dict__)
+        num_of_timesteps = 1
+        #%building solutionstructure
+        for solution in solnames:
+            #looking for multiple time steps
+            try:
+                if len(res_struct.__dict__[solution]) > num_of_timesteps:
+                    num_of_timesteps = len(res_struct.__dict__[solution])
+                    num_of_timesteps = int(num_of_timesteps)
+                    #If Suface is in the resluts we considet that we have a moving mesh
+                    if 'Surface' in dict.keys(res_struct.__dict__[solution][0].__dict__):
+                        moving_mesh = True
+            except TypeError:
+                continue
+    else:
+        num_of_timesteps = 1
+    # }}}
+
+    # get the mesh related variables {{{
+    if verbose > 3:
+        print('Now treating  the mesh')
+    #first get the general things
+    dim = int(md.mesh.domaintype()[0])
+    every_nodes = md.mesh.numberofvertices
+    every_cells = md.mesh.numberofelements
+    try:
+        every_edges = md.mesh.numberofedges
+    except AttributeError:
+        #3D meshes do not have edges
+        every_edges = 0
+
+    if np.shape(md.mesh.elements)[1] == 3 or enveloppe:
+        point_per_elt = 3
+        celltype = 5  #triangles
+    elif np.shape(md.mesh.elements)[1] == 6:
+        point_per_elt = 6
+        celltype = 13  #wedges
+    else:
+        raise BadDimension('exportVTU does not support your element type')
+
+    #only keep the envelope and not the bulk of the results.
+    if enveloppe:  #Treating enveloppe{{{
+        if dim == 3:
+            mesh_alti = '0'
+            is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface)
+            enveloppe_index = np.where(is_enveloppe)[0]
+            convert_index = np.nan * np.ones(np.shape(md.mesh.x))
+            convert_index = np.asarray([[i, np.where(enveloppe_index == i)[0][0]] for i, val in enumerate(convert_index) if any(enveloppe_index == i)])
+
+            num_of_points = np.size(enveloppe_index)
+            points = np.column_stack((md.mesh.x[enveloppe_index],
+                                      md.mesh.y[enveloppe_index],
+                                      md.mesh.z[enveloppe_index]))
+
+            num_of_elt = np.size(np.where(np.isnan(md.mesh.lowerelements))) + np.size(np.where(np.isnan(md.mesh.upperelements)))
+            connect = md.mesh.elements[np.where(is_enveloppe[md.mesh.elements - 1])].reshape(int(num_of_elt), 3) - 1
+            for elt in range(0, num_of_elt):
+                connect[elt, 0] = convert_index[np.where(convert_index == connect[elt, 0])[0], 1][0]
+                connect[elt, 1] = convert_index[np.where(convert_index == connect[elt, 1])[0], 1][0]
+                connect[elt, 2] = convert_index[np.where(convert_index == connect[elt, 2])[0], 1][0]
+
+            num_of_edges = every_edges  #looks like edges is only defined on the 2d mesh
+            if num_of_edges > 0:
+                edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+
+        else:
+            raise BadDimension("exportVTU can't get an enveloppe for  dimension {}".format(dim))
+    # }}}
+
+    else:  #treating mesh{{{
+        #we get all the mesh, mainly defining dummies
+        num_of_elt = every_cells
+        connect = md.mesh.elements - 1
+        num_of_edges = every_edges
+        if num_of_edges > 0:
+            edges = md.mesh.edges[:, 0:2].reshape(int(num_of_edges), 2) - 1
+        enveloppe_index = np.arange(0, np.size(md.mesh.x))
+        num_of_points = every_nodes
+        if dim == 2:
+            mesh_alti = input('''This is a 2D model, what should be the 3rd dimension of the mesh :
+                                        1 : md.geometry.surface
+                                        2 : md.geometry.base
+                                        3 : md.geometry.bed
+                                        4 : 0
+                                        5 : Custom\n''')
+            if mesh_alti == '1':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+            elif mesh_alti == '2':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.base))
+            elif mesh_alti == '3':
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.bed))
+            elif mesh_alti == '4':
+                points = np.column_stack((md.mesh.x, md.mesh.y, 0. * md.mesh.x))
+            elif mesh_alti == '5':
+                alti_field = input("Which field should be used as 3rd dimension: ")
+                alti_var = eval(alti_field)
+                if np.shape(np.squeeze(alti_var)) == np.shape(md.mesh.x):
+                    points = np.column_stack((md.mesh.x, md.mesh.y, np.squeeze(alti_var)))
+                else:
+                    raise BadDimension('field given for 3rd dimension should be defined on vertices {} is not.'.format(alti_field))
+            else:
+                points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface))
+        elif dim == 3:
+            mesh_alti = '0'
+            points = np.column_stack((md.mesh.x, md.mesh.y, md.mesh.z))
+        else:
+            raise BadDimension('exportVTU does not support dimension {}'.format(dim))
+    # }}}
+
+    if 'clipping' in kwargs.keys():
+        if kwargs['clipping'] is not None:
+            # first get the boundaries and check them
+            [Xmin, Xmax, Ymin, Ymax] = kwargs['clipping']
+            if Xmin > Xmax:
+                raise ClipError('Xmax ({}) should be larger than Xmin ({})'.format(Xmax, Xmin))
+            if Ymin > Ymax:
+                raise ClipError('Ymax ({}) should be larger than Ymin ({})'.format(Ymax, Ymin))
+            if Xmin > np.nanmax(points[:, 0]) or Xmax < np.nanmin(points[:, 0]):
+                raise ClipError('Your X boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Xmin, Xmax, np.nanmin(points[:, 0]), np.nanmax(points[:, 0])))
+            if Ymin > np.nanmax(points[:, 1]) or Ymax < np.nanmin(points[:, 1]):
+                raise ClipError('Your Y boundaries [{}, {}] are outside of the model domain [{},{}]'.format(Ymin, Ymax, np.nanmin(points[:, 1]), np.nanmax(points[:, 1])))
+
+            #boundaries should be fine lets do stuff
+            InX = np.where(np.logical_and(points[:, 0] >= Xmin, points[:, 0] <= Xmax))
+            InY = np.where(np.logical_and(points[:, 1] >= Ymin, points[:, 1] <= Ymax))
+
+            Isinside = np.zeros(np.shape(points)[0], dtype=bool)
+            clip_convert_index = np.nan * np.ones(np.shape(points)[0])
+
+            #define the vertices that are within clipping window
+            Inclipping = np.intersect1d(InX, InY)
+            Isinside[Inclipping] = True
+            points = points[Inclipping, :]
+            num_of_points = np.shape(points)[0]
+
+            #go thorough the elements and keep those for which one node is in the clipped arrea
+            clipconnect = np.asarray([], dtype=int)
+            for elt in connect:
+                if set(elt).issubset(Inclipping):
+                    clipconnect = np.append(clipconnect, elt, axis=0)
+
+            #reshape
+            num_of_elt = int(np.size(clipconnect) / 3)
+            connect = clipconnect.reshape(num_of_elt, 3)
+
+            clip_convert_index = np.asarray([[i, np.where(Inclipping == i)[0][0]] for i, val in enumerate(clip_convert_index) if any(Inclipping == i)])
+            enveloppe_index = enveloppe_index[clip_convert_index[:, 0]]
+
+            #convert indexing and exclude elements that are partly outside of the region
+            for elt in range(0, num_of_elt):
+                try:
+                    connect[elt, 0] = clip_convert_index[np.where(clip_convert_index == connect[elt, 0])[0], 1][0]
+                except IndexError:
+                    connect[elt, 0] = -1
+                try:
+                    connect[elt, 1] = clip_convert_index[np.where(clip_convert_index == connect[elt, 1])[0], 1][0]
+                except IndexError:
+                    connect[elt, 1] = -1
+                try:
+                    connect[elt, 2] = clip_convert_index[np.where(clip_convert_index == connect[elt, 2])[0], 1][0]
+                except IndexError:
+                    connect[elt, 2] = -1
+
+            connect = connect[np.where(connect != -1)[0], :]
+            num_of_elt = np.shape(connect)[0]
+
+            if num_of_edges > 0:
+                clipedges = np.asarray([], dtype=int)
+                for edge in edges:
+                    if set(edge).issubset(Inclipping):
+                        clipedges = np.append(clipedges, edge, axis=0)
+
+                num_of_edges = int(np.size(clipedges) / 2)
+                edges = clipedges.reshape(num_of_edges, 2)
+
+                for edge in range(0, num_of_edges):
+                    try:
+                        edges[edge, 0] = clip_convert_index[np.where(clip_convert_index == edges[edge, 0])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 0] = -1
+                    try:
+                        edges[edge, 1] = clip_convert_index[np.where(clip_convert_index == edges[edge, 1])[0], 1][0]
+                    except IndexError:
+                        edges[edge, 1] = -1
+                edges = edges[np.where(edges != -1)[0], :]
+                num_of_edges = np.shape(edges)[0]
+
+    # }}}
+
+    # write header and mesh {{{
+    if verbose > 3:
+        print('Now starting to write stuff')
+
+    if 'coarsetime' in kwargs.keys():
+        steplist = range(0, num_of_timesteps, kwargs['coarsetime'])
+    elif 'singletime' in kwargs.keys():
+        steplist = [kwargs['singletime']]
+    else:
+        steplist = range(0, num_of_timesteps)
+
+    for step in steplist:
+        if verbose > 2:
+            print('Writing for step {}'.format(step))
+
+        with open(('{}/{}_{}.vtu').format(filename, Dir, step), 'w+') as fid:
+            fid.write('<?xml version="1.0"?>\n')
+            fid.write('<VTKFile type="UnstructuredGrid" version="1.0" byte_order="LittleEndian">\n')
+            fid.write('  <UnstructuredGrid>\n')
+            fid.write('    <Piece NumberOfPoints="{}"  NumberOfCells="{}">\n'.format(num_of_points, num_of_elt + num_of_edges))
+            tensors = []
+            vectors = []
+            scalars = []
+            for sol in solnames:
+                #getting the  fields in the solution
+                if type(res_struct.__dict__[sol]).__name__ == 'solution':
+                    spe_res_struct = res_struct.__dict__[sol].__getitem__(0)
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif type(res_struct.__dict__[sol]).__name__ in ['solutionstep', 'results']:
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                else:
+                    print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                loctensors, locvectors, locscalars = SortFields(fieldnames)
+                tensors.extend(loctensors)
+                vectors.extend(locvectors)
+                scalars.extend(locscalars)
+            for other in args:
+                other_struct = md.__dict__[other]
+                othernames = list(dict.keys(other_struct.__dict__))
+
+                loctensors, locvectors, locscalars = SortFields(othernames)
+                tensors.extend(loctensors)
+                vectors.extend(locvectors)
+                scalars.extend(locscalars)
+
+            fid.write('      <PointData Scalars="{}"'.format(scalars))
+            if len(vectors) > 0:
+                fid.write(' Vectors="{}"'.format(vectors[:-1]))
+            if len(tensors) > 0:
+                fid.write(' Tensors="{}"'.format(tensors[:-2]))
+            fid.write('>\n')
+
+            saved_cells = {}
+            saved_edges = {}
+            saved_const = {}
+            timestep = step
+
+            # }}}
+            # {{{loop over the different solution structures
+            # first check if there are solutions to grab
+            for sol in solnames:
+                treated_res = []
+                #dealing with results on different timesteps
+                try:
+                    if(len(res_struct.__dict__[sol]) > timestep):
+                        timestep = step
+                    else:
+                        timestep = np.size(res_struct.__dict__[sol])
+                except TypeError:
+                    #result as no len() so no timesteps
+                    timestep = 1
+
+                #getting the  fields in the solution
+                if(type(res_struct.__dict__[sol]).__name__ == 'solution'):
+                    spe_res_struct = res_struct.__dict__[sol].__getitem__(timestep)
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif(type(res_struct.__dict__[sol]).__name__ == 'solutionstep'):
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                elif(type(res_struct.__dict__[sol]).__name__ == 'results'):  #this is a result without steps
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+                else:
+                    print("WARNING, solution type '{}' is not recognise, exported results might be wrong".format(type(res_struct.__dict__[sol])))
+                    spe_res_struct = res_struct.__dict__[sol]
+                    fieldnames = list(dict.keys(spe_res_struct.__dict__))
+
+                tensors, vectors, ScalarNames = SortFields(fieldnames)
+
+                #check which field is a real result and print
+                for field in fieldnames:
+                    if field in treated_res:
+                        if verbose > 2:
+                            print("{}.{} is already done".format(sol, field))
+                        continue
+
+                    elif field in vectors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a vector ".format(sol, field))
+                        TreatVector(fid, fmtout, spe_res_struct, sol, field, treated_res, enveloppe_index)
+
+                    elif field in tensors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a tensor ".format(sol, field))
+                        TreatTensor(fid, fmtout, spe_res_struct, sol, field, treated_res, enveloppe_index)
+
+                    else:
+                        if np.size(spe_res_struct.__dict__[field]) == 1:
+                            if verbose > 2:
+                                print("Treating {}.{} as a constant ".format(sol, field))
+                            if field == 'time':
+                                current_time = spe_res_struct.__dict__[field]
+                            saved_const[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        elif np.size(spe_res_struct.__dict__[field]) == every_nodes:
+                            if verbose > 2:
+                                print("Treating {}.{} as a node variable ".format(sol, field))
+                            TreatScalar(fid, fmtout, spe_res_struct, sol, field, enveloppe_index)
+
+                        elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_cells:
+                            saved_cells[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        elif np.shape(spe_res_struct.__dict__[field])[0] == np.size(spe_res_struct.__dict__[field]) == every_edges and num_of_edges > 0:
+                            saved_edges[".".join((sol, field))] = np.squeeze(spe_res_struct.__dict__[field])
+
+                        else:
+                            print("format for field {}.{} is not suported, field is skipped".format(sol, field))
+            # }}}
+            # loop on arguments, if something other than result is asked, do it now {{{
+            for other in args:
+                treated_res = []
+                if verbose > 3:
+                    print("Now treating {}".format(other))
+                other_struct = md.__dict__[other]
+                othernames = list(dict.keys(other_struct.__dict__))
+                tensors, vectors, ScalarNames = SortFields(othernames)
+                for field in othernames:
+                    if field in treated_res:
+                        if verbose > 2:
+                            print("{}.{} is already done".format(other, field))
+                        continue
+                    elif field in vectors:
+                        TreatVector(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index)
+
+                    elif field in tensors:
+                        if verbose > 2:
+                            print("Treating {}.{} as a tensor ".format(sol, field))
+                        TreatTensor(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index)
+                        #now treating fields that are not vectors or tensors
+
+                    else:
+                        if np.size(other_struct.__dict__[field]) == 1:
+                            if verbose > 2:
+                                print("Treating {}.{} as an constant ".format(other, field))
+                            if field == 'time':
+                                current_time = other_struct.__dict__[field]
+                            saved_const[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        elif np.size(other_struct.__dict__[field]) == every_nodes:
+                            if verbose > 2:
+                                print("Treating {}.{} as a node variable ".format(other, field))
+                            TreatScalar(fid, fmtout, other_struct, other, field, enveloppe_index)
+
+                        elif np.shape(other_struct.__dict__[field])[0] == every_nodes + 1:
+                            if verbose > 3:
+                                print("Treating {}.{} as a node forcing variable".format(other, field))
+                            TreatForcing(fid, fmtout, other_struct, other, field, treated_res, enveloppe_index, current_time)
+
+                        elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_cells:
+                            if verbose > 3:
+                                print("Treating {}.{} as a cell variable".format(other, field))
+                            saved_cells[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        elif np.shape(other_struct.__dict__[field])[0] == np.size(other_struct.__dict__[field]) == every_edges and num_of_edges > 0:
+                            if verbose > 3:
+                                print("Treating {}.{} as an edge variable".format(other, field))
+                            saved_edges[".".join((other, field))] = np.squeeze(other_struct.__dict__[field])
+
+                        else:
+                            print("format for field {}.{} is not suported, field is skipped".format(other, field))
+            fid.write('      </PointData>\n')
+            # }}}
+            # Now writting cell variables {{{
+            if np.size(list(saved_cells.keys())) > 0 or np.size(list(saved_edges.keys())) > 0:
+                cellkeys = list(saved_cells.keys())
+                edgekeys = list(saved_edges.keys())
+                if len(cellkeys) > 0 and len(edgekeys) > 0:
+                    savekeys = list(saved_cells.keys())
+                    savekeys.extend(edgekeys)
+                elif len(cellkeys) > 0:
+                    savekeys = cellkeys
+                elif len(edgekeys) > 0:
+                    savekeys = edgekeys
+                if verbose > 3:
+                    print("Saving cell for {}".format(savekeys))
+                fid.write('      <CellData Scalars="{}">\n'.format(savekeys))
+
+            if np.size(list(saved_cells.keys())) > 0:
+                for key in cellkeys:
+                    outval = saved_cells[key]
+                    if num_of_edges > 0:
+                        if fmtout == "binary":
+                            outval = np.append(outval, np.nan * np.ones((num_of_edges)))
+                        else:
+                            outval = np.append(outval, -9999.999 * np.ones((num_of_edges)))
+                    if verbose > 3:
+                        print("writing {} values of type {} for {}".format(len(outval), outval.dtype, key))
+
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(outval, fid, fmtout)
+                    fid.write('        </DataArray>\n')
+
+            # }}}
+            # Now writting edge variables {{{
+            if np.size(list(saved_edges.keys())) > 0:
+                for key in list(saved_edges.keys()):
+                    if fmtout == "binary":
+                        outval = np.nan * np.ones((num_of_elt))
+                    else:
+                        outval = -9999.999 * np.ones((num_of_elt))
+                    outval = np.append(outval, saved_edges[key])
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(outval, fid, fmtout)
+                    fid.write('        </DataArray>\n')
+            if np.size(list(saved_cells.keys())) > 0 or np.size(list(saved_edges.keys())) > 0:
+                fid.write('      </CellData>\n')
+            # }}}
+
+            # Now writting constants # {{{
+            if np.size(list(saved_const.keys())) > 0:
+                fid.write('      <FieldData>\n')
+                for key in list(saved_const.keys()):
+                    fid.write('        <DataArray type="Float32" Name="{}" format="{}">\n'.format(key, fmtout))
+                    WriteIt(saved_const[key], fid, fmtout)
+                    fid.write('        </DataArray>\n')
+                fid.write('      </FieldData>\n')
+            # }}}
+
+            #Mesh Treatment and write, it needs to loop to allow variable geometry {{{
+            #updating z for mesh evolution
+            if moving_mesh and mesh_alti == '1':
+                points[:, 2] = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Surface'][enveloppe_index])
+            elif moving_mesh and mesh_alti == '2':
+                points[:, 2] = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Base'][enveloppe_index])
+
+            #Now write points locations
+            fid.write('      <Points>\n')
+            fid.write('        <DataArray type="Float32" Name="Points" NumberOfComponents="3" format="{}">\n'.format(fmtout))
+            WriteIt(points, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('      </Points>\n')
+
+            #cells are a combination of element and edges
+            # we need node conectivity offsets and types
+            #offsets is the cummulative index of the last elemant of each cell (1 indexed)
+            flat_elt = connect.flatten()
+            elt_offset = np.arange(0, num_of_elt * point_per_elt, point_per_elt, dtype=np.int64) + point_per_elt
+            elt_type = celltype * np.ones((num_of_elt), dtype=np.uint8)
+            if num_of_edges > 0:
+                flat_edges = edges.flatten()
+                flat_cells = np.hstack((flat_elt, flat_edges))
+                edge_offset = np.arange(0, num_of_edges * 2, 2) + 2 + elt_offset[-1]
+                cell_offset = np.hstack((elt_offset, edge_offset))
+                edge_type = 3 * np.ones((num_of_edges), dtype=np.uint8)
+                cell_type = np.hstack((elt_type, edge_type))
+            else:
+                flat_cells = flat_elt
+                cell_offset = elt_offset
+                cell_type = elt_type
+
+            if verbose > 3:
+                print("""writing mesh structure:
+                                  connectivity of shape {}
+                                  cell offset of shape {}
+                                  cell types of shape{}""".format(np.shape(flat_cells), np.shape(cell_offset), np.shape(cell_type)))
+            #write cells Informations
+            fid.write('      <Cells>\n')
+            fid.write('        <DataArray type="Int64" Name="connectivity" format="{}">\n'.format(fmtout))
+            WriteIt(flat_cells, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('        <DataArray type="Int64" Name="offsets" format="{}">\n'.format(fmtout))
+            WriteIt(cell_offset, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('        <DataArray type="UInt8" Name="types" format="{}">\n'.format(fmtout))
+            WriteIt(cell_type, fid, fmtout)
+            fid.write('        </DataArray>\n')
+            fid.write('      </Cells>\n')
+            fid.write('    </Piece>\n')
+            fid.write('  </UnstructuredGrid>\n')
+            fid.write('</VTKFile>\n')
+            # }}}
+
+
+def SortFields(fieldnames):
+    #we check on sizes so there is a slight chance that logs can be picked as results, we remove them to avoid that
+    for trashfield in ['errlog', 'outlog']:
+        if trashfield in fieldnames:
+            fieldnames.remove(trashfield)
+
+    #Sorting scalars, vectors and tensors
+    tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']]
+    non_tensor = [field for field in fieldnames if field not in tensors]
+    vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z']]
+    #get the name of scalar fields remove, vectors, tensors and things that are not proper results
+    scalars = [field for field in fieldnames if field not in tensors + vectors]
+    dump = ["ConvergenceNumSteps", "step", "time"]
+    for trash in dump:
+        try:
+            scalars.remove(trash)
+        except ValueError:
+            [scalars.remove(name) for name in scalars if trash in name]
+            continue
+    #clean up vector and tensors that might be here and should not
+    # we check that at least two of the vector component are here
+    for namelist in [vectors, tensors]:
+        for name in list(namelist):
+            coord = name[-1]
+            if coord == 'x' and name[:-1] + 'y' in namelist:
+                continue
+            elif coord == 'y' and name[:-1] + 'x' in namelist:
+                continue
+            elif coord == 'z' and name[:-1] + 'x' in namelist:
+                continue
+            else:
+                scalars.extend([name])
+                namelist.remove(name)
+    return tensors, vectors, scalars
+
+
+def TreatScalar(fid, fmtout, structure, structname, fieldname, enveloppe_index):
+    array = np.squeeze(structure.__dict__[fieldname][enveloppe_index])
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="1" format="{}">\n'.format(".".join((structname, fieldname)), fmtout))
+    WriteIt(array, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatVector(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index):
+    Vxstruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'x'])
+    Vystruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'y'])
+    Vx = Vxstruct[enveloppe_index]
+    Vy = Vystruct[enveloppe_index]
+    treated_res += [fieldname[:-1] + 'x', fieldname[:-1] + 'y']
+    try:
+        Vzstruct = np.squeeze(structure.__dict__[fieldname[:-1] + 'z'])
+        treated_res += [fieldname[:-1] + 'z']
+        Vz = Vzstruct[enveloppe_index]
+    except KeyError:
+        Vz = np.zeros(np.shape(Vx))
+    Vector = (np.vstack((Vx, Vy, Vz)).T).flatten()
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="3" format="{}">\n'.format(".".join((structname, fieldname[:-1])), fmtout))
+    WriteIt(Vector, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatTensor(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index):
+    Txxstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xx'])
+    Txystruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xy'])
+    Tyystruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'yy'])
+    treated_res += [fieldname[:-2] + 'xx', fieldname[:-2] + 'xy', fieldname[:-2] + 'yy']
+    Txx = Txxstruct[enveloppe_index]
+    Tyy = Tyystruct[enveloppe_index]
+    Txy = Txystruct[enveloppe_index]
+    try:
+        Tzzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'zz'])
+        Txzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'xz'])
+        Tyzstruct = np.squeeze(structure.__dict__[fieldname[:-2] + 'yz'])
+        treated_res += [fieldname[:-2] + 'zz', fieldname[:-2] + 'xz', fieldname[:-2] + 'yz']
+        Tzz = Tzzstruct[enveloppe_index]
+        Txz = Txzstruct[enveloppe_index]
+        Tyz = Tyzstruct[enveloppe_index]
+    except KeyError:
+        Tzz = np.zeros(np.shape(Txx))
+        Txz = np.zeros(np.shape(Txx))
+        Tyz = np.zeros(np.shape(Txx))
+
+    Tensor = (np.vstack((Txx, Tyy, Tzz, Txy, Tyz, Txz)).T).flatten()
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="6" format="{}">\n'.format(".".join((structname, fieldname[:-1])), fmtout))
+    WriteIt(Tensor, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def TreatForcing(fid, fmtout, structure, structname, fieldname, treated_res, enveloppe_index, current_time):
+    #we are dealing with a forcing of some kind.
+    forcing_time = structure.__dict__[fieldname][-1, :]
+    if any(forcing_time == current_time):
+        forcing_index = np.where(forcing_time == current_time)
+        forcing_val = structure.__dict__[fieldname][:, forcing_index]
+    elif forcing_time[0] > current_time:
+        forcing_val = structure.__dict__[fieldname][:, 0]
+    elif forcing_time[-1] < current_time:
+        forcing_val = structure.__dict__[fieldname][:, -1]
+    else:
+        forcing_index = np.where(forcing_time < current_time)[-1][-1]
+        delta_time = forcing_time[forcing_index + 1] - forcing_time[forcing_index]  #compute forcing Dt
+        delta_current = current_time - forcing_time[forcing_index]  # time since last forcing
+        ratio = delta_current / delta_time  #compute weighting factor for preceding forcing vallue
+        forcing_evol = (structure.__dict__[fieldname][:, forcing_index + 1] - structure.__dict__[fieldname][:, forcing_index]) * ratio
+        forcing_val = structure.__dict__[fieldname][:, forcing_index] + forcing_evol
+    array = forcing_val[enveloppe_index]
+    # and now write it down
+    fid.write('        <DataArray type="Float32" Name="{}" NumberOfComponents="1" format="{}">\n'.format(".".join((structname, fieldname)), fmtout))
+    WriteIt(array, fid, fmtout)
+    fid.write('        </DataArray>\n')
+
+
+def WriteIt(Data, fid, fmtout):
+    vtu_to_numpy_type = {
+        "Float32": np.dtype(np.float32),
+        "Float64": np.dtype(np.float64),
+        "Int8": np.dtype(np.int8),
+        "Int16": np.dtype(np.int16),
+        "Int32": np.dtype(np.int32),
+        "Int64": np.dtype(np.int64),
+        "UInt8": np.dtype(np.uint8),
+        "UInt16": np.dtype(np.uint16),
+        "UInt32": np.dtype(np.uint32),
+        "UInt64": np.dtype(np.uint64),
+    }
+    if fmtout == 'binary':
+        try:
+            datatype = Data.dtype
+        except AttributeError:
+            datatype = type(Data)
+        if datatype == np.float64:
+            Data = np.float32(Data)
+        try:
+            data_bytes = Data.tobytes()
+        except AttributeError:
+            data_bytes = np.asarray(Data).tobytes()
+        # collect header
+        header = np.array(len(data_bytes), dtype=vtu_to_numpy_type['UInt32'])
+        fid.write(b64encode(header.tobytes() + data_bytes).decode())
+        fid.write('\n')
+        #cell_type.tofile(fid)
+    elif fmtout == 'ascii':
+        np.savetxt(fid, Data, fmt='%g')
+
+
+def cleanOutliers(Val, fmtout):
+    #paraview does not like NaN in ascii files, replacing
+    if np.isnan(Val):
+        if fmtout == 'ascii':
+            CleanVal = -9999.999
+
+    #also checking for very small value that mess up
+    elif (abs(Val) < 1.0e-20):
+        CleanVal = 0.0
+    else:
+        CleanVal = Val
+    return CleanVal
+
+
+class BadDimension(Exception):
+    """The required dimension is not supported yet."""
+
+
+class BadOption(Exception):
+    """The given option does not exist."""
+
+
+class ClipError(Exception):
+    """Error while trying to clip the domain."""
