Index: /issm/trunk/src/m/contrib/musselman/read_netCDF.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/read_netCDF.py	(revision 27882)
+++ /issm/trunk/src/m/contrib/musselman/read_netCDF.py	(revision 27882)
@@ -0,0 +1,319 @@
+# imports
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+from os import path, remove
+from model import *
+import re
+from results import *
+from m1qn3inversion import m1qn3inversion
+from taoinversion import taoinversion
+from collections import OrderedDict
+
+
+
+'''
+Given a NetCDF4 file, this set of functions will perform the following:
+    1. Enter each group of the file.
+    2. For each variable in each group, update an empty model with the variable's data
+    3. Enter nested groups and repeat
+'''
+
+
+# make a model framework to fill that is in the scope of this file
+model_copy = model()
+
+
+def read_netCDF(filename):
+    print('NetCDF42C v1.1.13')
+
+    # check if path exists
+    if path.exists(filename):
+        print('Opening {} for reading'.format(filename))
+
+        # open the given netCDF4 file
+        global NCData   
+        NCData = Dataset(filename, 'r')
+        # remove masks from numpy arrays for easy conversion
+        NCData.set_auto_mask(False)
+    else:
+        print('The file you entered does not exist or cannot be found in the current directory')
+    
+    # continuation of band-aid for results class
+    try:
+        NCData.groups['results']
+        make_results_subclasses()
+    except:
+        pass
+
+    # similarly, we need to check and see if we have an m1qn3inversion class instance
+    try:
+        NCData.groups['inversion']
+        check_inversion_class()
+    except:
+        pass
+    
+    # walk through each group looking for subgroups and variables
+    for group in NCData.groups.keys():
+        if 'debris' in group:
+            pass
+        else:
+            # have to send a custom name to this function: filename.groups['group']
+            name = "NCData.groups['" + str(group) + "']"
+            walk_nested_groups(name)
+    
+    print("Model Successfully Recreated.")
+    return model_copy
+
+
+def make_results_subclasses():
+    '''
+        There are 3 possible subclasses: solution, solutionstep, resultsdakota.
+        In the NetCDF file these are saved as a list of strings. Ie, say there are 2
+        instances of solution under results, StressbalanceSolution and TransientSolution. 
+        In the NetCDF file we would see solution = "StressbalanceSolution", "TransientSolution"
+        To deconstruct this, we need to iteratively assign md.results.StressbalanceSolution = solution()
+        and md.results.TransientSolution = solution() and whatever else.
+    '''
+    # start with the subclasses
+    for subclass in NCData.groups['results'].variables.keys():
+        class_instance = subclass + '()'
+
+        # now handle the instances
+        for instance in NCData.groups['results'].variables[subclass][:]:
+            # this is an ndarray of numpy bytes_ that we have to convert to strings
+            class_instance_name = instance.tobytes().decode('utf-8').strip()
+            # from here we can make new subclasses named as they were in the model saved
+            setattr(model_copy.results, class_instance_name, eval(class_instance))
+            print(f'Successfully created results subclass instance {class_instance} named {class_instance_name}.')
+
+
+def check_inversion_class():
+    # get the name of the inversion class: either inversion or m1qn3inversion or taoinversion
+    inversion_class_is = NCData.groups['inversion'].variables['inversion_class_name'][:][...].tobytes().decode()
+    if inversion_class_is == 'm1qn3inversion':
+        # if it is m1qn3inversion we need to instantiate that class since it's not native to model()
+        model_copy.inversion = m1qn3inversion(model_copy.inversion)
+        print('Conversion successful')
+    elif inversion_class_is == 'taoinversion':
+        # if it is taoinversion we need to instantiate that class since it's not native to model()
+        model_copy.inversion = taoinverion()
+        print('Conversion successful')
+    else: pass
+
+
+
+def walk_nested_groups(group_location_in_file):
+    # first, we enter the group by: filename.groups['group_name']
+    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
+    # at this step we check for multidimensional structure arrays and filter them out
+    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
+    # if a variables exists, copy the data to the model framework by calling copy function
+    # if a nested groups exist, repeat all
+
+    for variable in eval(group_location_in_file + '.variables.keys()'):
+        if variable == 'this_is_a_nested' and 'results' in group_location_in_file:
+            # have to do some string deconstruction to get the name of the class instance/last group from 'NetCDF.groups['group1'].groups['group1.1']'
+            pattern = r"\['(.*?)'\]"
+            matches = re.findall(pattern, group_location_in_file)
+            name_of_struct = matches[-1] #eval(group_location_in_file + ".variables['solution']") 
+            copy_multidimensional_results_struct(group_location_in_file, name_of_struct)
+            istruct = True
+
+        elif variable == 'this_is_a_nested' and 'qmu' in group_location_in_file:
+            print('encountered qmu structure that is not yet supported.')
+            # have to do some string deconstruction to get the name of the class instance/last group from 'NetCDF.groups['group1'].groups['group1.1']'
+            #pattern = r"\['(.*?)'\]"
+            #matches = re.findall(pattern, group_location_in_file)
+            #name_of_struct = matches[-1] #eval(group_location_in_file + ".variables['solution']") 
+            #name_of_struct = eval(group_location_in_file + ".variables['']")
+            #copy_multidimensional_qmu_struct(group_location_in_file, name_of_struct)
+            isstruct = True
+    
+        else:
+            location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
+            # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
+            # Define the regex pattern to match the groups within brackets
+            pattern = r"\['(.*?)'\]"
+            # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
+            matches = re.findall(pattern, location_of_variable_in_file)
+            variable_name = matches[-1]
+            location_of_variable_in_model = '.'.join(matches[:-1])
+            copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name)
+            
+    if 'istruct' in locals():
+        pass
+    else:
+        for nested_group in eval(group_location_in_file + '.groups.keys()'):
+            new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
+            walk_nested_groups(new_nested_group)
+
+
+
+'''
+    MATLAB has Multidimensional Structure Arrays in 2 known classes: results and qmu.
+    The python classes results.py and qmu.py emulate this MATLAB object in their own
+    unique ways. The functions in this script will assign data to either of these 
+    classes such that the final structure is compatible with its parent class.
+'''
+
+def copy_multidimensional_results_struct(group_location_in_file, name_of_struct):
+    '''
+    A common multidimensional array is the 1xn md.results.TransientSolution object.
+
+    The way that this object emulates the MATLAB mutli-dim. struct. array is with 
+    the solution().steps attr. which is a list of solutionstep() instances
+        The process to recreate is as follows:
+            1. Get instance of solution() with solution variable (the instance is made in make_results_subclasses)
+            2. For each subgroup, create a solutionstep() class instance
+             2a. Populate the instance with the key:value pairs
+             2b. Append the instance to the solution().steps list
+    '''
+    # step 1
+    class_instance_name = name_of_struct
+    
+    # for some reason steps is not already a list
+    setattr(model_copy.results.__dict__[class_instance_name], 'steps', list())
+
+    steps = model_copy.results.__dict__[class_instance_name].steps
+    
+    # step 2
+    layer = 1
+    for subgroup in eval(group_location_in_file + ".groups.keys()"):
+        solutionstep_instance = solutionstep()
+        # step 2a
+        subgroup_location_in_file = group_location_in_file + ".groups['" + subgroup + "']"
+        for key in eval(subgroup_location_in_file + ".variables.keys()"):
+            value = eval(subgroup_location_in_file + ".variables['" + str(key) + "'][:]")
+            setattr(solutionstep_instance, key, value)
+        # step 2b
+        steps.append(solutionstep_instance)
+        print('Succesfully saved layer ' + str(layer) + ' to results.' + str(class_instance_name) + ' struct.')
+        layer += 1
+
+    print('Successfully recreated results structure ' + str(class_instance_name))
+
+
+
+def copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name):
+    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
+    # NetCDF4 has a property called "_FillValue" that sometimes saves empty lists, so we have to catch those
+    FillValue = -9223372036854775806
+    try:
+        # results band-aid...
+        #print(str(location_of_variable_in_model + '.' + variable_name))
+        if str(location_of_variable_in_model + '.' + variable_name) in ['results.solutionstep', 'results.solution', 'results.resultsdakota']:
+            pass
+        # qmu band-aid
+        elif 'qmu.statistics.method' in str(location_of_variable_in_model + '.' + variable_name):
+            pass
+        # handle any strings:
+        elif 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
+            setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:][...].tobytes().decode()'))
+        # handle ndarrays + lists
+        elif len(eval(location_of_variable_in_file + '[:]'))>1:
+            # check for bool
+            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
+                if eval(location_of_variable_in_file + '.units') == 'bool':
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool))
+                else:
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+            except:
+                setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
+        # catch everything else
+        else:
+            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
+            try:
+                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
+                var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
+                if FillValue == var_to_save:
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, [])
+                else:
+                    if var_to_save.is_integer():
+                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, int(var_to_save))
+                    else:
+                        # we have to convert numpy datatypes to native python types with .item()
+                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, var_to_save.item())                        
+            except:
+                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:].item()'))
+    except AttributeError:
+        copy_variable_data_to_new_model_dict(location_of_variable_in_file, location_of_variable_in_model)
+
+    print('Successfully saved ' + location_of_variable_in_model + '.' + variable_name + ' to model.')
+
+
+
+
+
+
+def copy_variable_data_to_new_model_dict(location_of_variable_in_file, location_of_variable_in_model):
+    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
+
+    # NetCDF4 has a property called "_FillValue" that sometimes saves empty lists, so we have to catch those
+    FillValue = -9223372036854775806
+
+    # the key will be the last item in the location
+    key = ''.join(location_of_variable_in_model.split('.')[-1])
+
+    # update the location to point to the dict instead of the dict key
+    location_of_variable_in_model = '.'.join(location_of_variable_in_model.split('.')[:-1])
+
+    # verify we're working with a dict:
+    if isinstance(eval('model_copy.' + location_of_variable_in_model), OrderedDict):
+        dict_object = eval('model_copy.' + location_of_variable_in_model)
+        
+        # handle any strings:
+        if 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
+            data = eval(location_of_variable_in_file + '[:][...].tobytes().decode()')
+            dict_object.update({key: data})
+            
+        # handle ndarrays + lists
+        elif len(eval(location_of_variable_in_file + '[:]'))>1:
+            # check for bool
+            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
+                if eval(location_of_variable_in_file + '.units') == 'bool':
+                    data = np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool)
+                    dict_object.update({key: data})
+                else:
+                    data = eval(location_of_variable_in_file + '[:]')
+                    dict_object.update({key: data})
+            except:
+                data = eval(location_of_variable_in_file + '[:]')
+                dict_object.update({key: data})
+        # catch everything else
+        else:
+            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
+            try:
+                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
+                if FillValue == eval(location_of_variable_in_file + '[:][0]'):
+                    dict_object.update({key: []})
+                else:
+                    # we have to convert numpy datatypes to native python types with .item()
+                    var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
+                    dict_object.update({key:  var_to_save.item()})
+            except:
+                data = eval(location_of_variable_in_file + '[:]')
+                dict_object.update({key: data})
+    else:
+        print(f"Unrecognized object was saved and cannot be reconstructed: {location_of_variable_in_model}")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Index: sm/trunk/src/m/contrib/musselman/read_netCDF_beta.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/read_netCDF_beta.py	(revision 27881)
+++ 	(revision )
@@ -1,90 +1,0 @@
-# imports
-from netCDF4 import Dataset
-import numpy as np
-import numpy.ma as ma
-from os import path, remove
-from model import *
-import re
-
-
-'''
-Given a NetCDF4 file, this set of functions will perform the following:
-    1. Enter each group of the file.
-    2. For each variable in each group, update an empty model with the variable's data
-'''
-
-
-# make a model framework to fill that is in the scope of this file
-model_copy = model()
-
-
-def read_netCDF(filename):
-    # check if path exists
-    if path.exists(filename):
-        print('Opening {} for reading'.format(filename))
-
-        # open the given netCDF4 file
-        global NCData   
-        NCData = Dataset(filename, 'r')
-        # remove masks from numpy arrays for easy conversion
-        NCData.set_auto_mask(False)
-    
-
-    # read the contents of the groups
-
-    '''
-    this function navigates like: 
-
-    filename.groups.keys() -> filename.groups['group1'] -> 
-    filename.groups['group1'].groups.keys() -> filename.groups['group1'].groups['group1.1'] ->
-    filename.groups['group1'].groups['group1.1'].groups.keys() ->
-    filename.groups['group1'].groups['group1.1'].groups['group1.1.1'] etc. etc.
-    '''
-    # walk through each group looking for subgroups and variables
-    for group in NCData.groups.keys():
-        print('walking ' + str(group))
-        # have to send a custom name to this function: filename.groups['group']
-        name = "NCData.groups['" + str(group) + "']"
-        print('name sent to walker is: ' + name)
-        walk_nested_groups(name)
-    
-    return model_copy
-
-
-def walk_nested_groups(group_location_in_file):
-    # first, we enter the group by: filename.groups['group_name']
-    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
-    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
-    # if the variables exist, copy the data to the model framework by calling a custom function
-    # if the nested groups exist, repeat. 
-
-    for variable in eval(group_location_in_file + '.variables.keys()'):
-        print('got a variable: ' + str(variable))
-        location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
-        # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
-        # Define the regex pattern to match the groups within brackets
-        pattern = r"\['(.*?)'\]"
-        # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
-        matches = re.findall(pattern, location_of_variable_in_file)
-        variable_name = matches[-1]
-        location_of_variable_in_model = '.'.join(matches[:-1])
-        copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name)
-
-    for nested_group in eval(group_location_in_file + '.groups.keys()'):
-        print('got a nested group: ' + nested_group)
-        new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
-        print('the location of this nested group in the file is: ' + new_nested_group)
-        walk_nested_groups(new_nested_group)
-
-
-
-def copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name):
-    # this should be as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
-    print('adress in file: ' + location_of_variable_in_file)
-    print('adress in model: ' + location_of_variable_in_model)
-    print('the value of the variable is: ')
-    print(eval(location_of_variable_in_file + '[:]'))
-    print('the name of the varialbe is: ' + variable_name)
-    print('the type of the variable is: ' + str(type(eval(location_of_variable_in_file + '[:]'))))
-    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
-    print('successfully saved var to model')
Index: sm/trunk/src/m/contrib/musselman/read_netCDF_commit.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/read_netCDF_commit.py	(revision 27881)
+++ 	(revision )
@@ -1,319 +1,0 @@
-# imports
-from netCDF4 import Dataset
-import numpy as np
-import numpy.ma as ma
-from os import path, remove
-from model import *
-import re
-from results import *
-from m1qn3inversion import m1qn3inversion
-from taoinversion import taoinversion
-from collections import OrderedDict
-
-
-
-'''
-Given a NetCDF4 file, this set of functions will perform the following:
-    1. Enter each group of the file.
-    2. For each variable in each group, update an empty model with the variable's data
-    3. Enter nested groups and repeat
-'''
-
-
-# make a model framework to fill that is in the scope of this file
-model_copy = model()
-
-
-def read_netCDF(filename):
-    print('NetCDF42C v1.1.13')
-
-    # check if path exists
-    if path.exists(filename):
-        print('Opening {} for reading'.format(filename))
-
-        # open the given netCDF4 file
-        global NCData   
-        NCData = Dataset(filename, 'r')
-        # remove masks from numpy arrays for easy conversion
-        NCData.set_auto_mask(False)
-    else:
-        print('The file you entered does not exist or cannot be found in the current directory')
-    
-    # continuation of band-aid for results class
-    try:
-        NCData.groups['results']
-        make_results_subclasses()
-    except:
-        pass
-
-    # similarly, we need to check and see if we have an m1qn3inversion class instance
-    try:
-        NCData.groups['inversion']
-        check_inversion_class()
-    except:
-        pass
-    
-    # walk through each group looking for subgroups and variables
-    for group in NCData.groups.keys():
-        if 'debris' in group:
-            pass
-        else:
-            # have to send a custom name to this function: filename.groups['group']
-            name = "NCData.groups['" + str(group) + "']"
-            walk_nested_groups(name)
-    
-    print("Model Successfully Recreated.")
-    return model_copy
-
-
-def make_results_subclasses():
-    '''
-        There are 3 possible subclasses: solution, solutionstep, resultsdakota.
-        In the NetCDF file these are saved as a list of strings. Ie, say there are 2
-        instances of solution under results, StressbalanceSolution and TransientSolution. 
-        In the NetCDF file we would see solution = "StressbalanceSolution", "TransientSolution"
-        To deconstruct this, we need to iteratively assign md.results.StressbalanceSolution = solution()
-        and md.results.TransientSolution = solution() and whatever else.
-    '''
-    # start with the subclasses
-    for subclass in NCData.groups['results'].variables.keys():
-        class_instance = subclass + '()'
-
-        # now handle the instances
-        for instance in NCData.groups['results'].variables[subclass][:]:
-            # this is an ndarray of numpy bytes_ that we have to convert to strings
-            class_instance_name = instance.tobytes().decode('utf-8').strip()
-            # from here we can make new subclasses named as they were in the model saved
-            setattr(model_copy.results, class_instance_name, eval(class_instance))
-            print(f'Successfully created results subclass instance {class_instance} named {class_instance_name}.')
-
-
-def check_inversion_class():
-    # get the name of the inversion class: either inversion or m1qn3inversion or taoinversion
-    inversion_class_is = NCData.groups['inversion'].variables['inversion_class_name'][:][...].tobytes().decode()
-    if inversion_class_is == 'm1qn3inversion':
-        # if it is m1qn3inversion we need to instantiate that class since it's not native to model()
-        model_copy.inversion = m1qn3inversion(model_copy.inversion)
-        print('Conversion successful')
-    elif inversion_class_is == 'taoinversion':
-        # if it is taoinversion we need to instantiate that class since it's not native to model()
-        model_copy.inversion = taoinverion()
-        print('Conversion successful')
-    else: pass
-
-
-
-def walk_nested_groups(group_location_in_file):
-    # first, we enter the group by: filename.groups['group_name']
-    # second we search the current level for variables: filename.groups['group_name'].variables.keys()
-    # at this step we check for multidimensional structure arrays and filter them out
-    # third we get nested group keys by: filename.groups['group_name'].groups.keys()
-    # if a variables exists, copy the data to the model framework by calling copy function
-    # if a nested groups exist, repeat all
-
-    for variable in eval(group_location_in_file + '.variables.keys()'):
-        if variable == 'this_is_a_nested' and 'results' in group_location_in_file:
-            # have to do some string deconstruction to get the name of the class instance/last group from 'NetCDF.groups['group1'].groups['group1.1']'
-            pattern = r"\['(.*?)'\]"
-            matches = re.findall(pattern, group_location_in_file)
-            name_of_struct = matches[-1] #eval(group_location_in_file + ".variables['solution']") 
-            copy_multidimensional_results_struct(group_location_in_file, name_of_struct)
-            istruct = True
-
-        elif variable == 'this_is_a_nested' and 'qmu' in group_location_in_file:
-            print('encountered qmu structure that is not yet supported.')
-            # have to do some string deconstruction to get the name of the class instance/last group from 'NetCDF.groups['group1'].groups['group1.1']'
-            #pattern = r"\['(.*?)'\]"
-            #matches = re.findall(pattern, group_location_in_file)
-            #name_of_struct = matches[-1] #eval(group_location_in_file + ".variables['solution']") 
-            #name_of_struct = eval(group_location_in_file + ".variables['']")
-            #copy_multidimensional_qmu_struct(group_location_in_file, name_of_struct)
-            isstruct = True
-    
-        else:
-            location_of_variable_in_file = group_location_in_file + ".variables['" + str(variable) + "']"
-            # group_location_in_file is like filename.groups['group1'].groups['group1.1'].groups['group1.1.1']
-            # Define the regex pattern to match the groups within brackets
-            pattern = r"\['(.*?)'\]"
-            # Use regex to find all matches and return something like 'group1.group1.1.group1.1.1 ...' where the last value is the name of the variable
-            matches = re.findall(pattern, location_of_variable_in_file)
-            variable_name = matches[-1]
-            location_of_variable_in_model = '.'.join(matches[:-1])
-            copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name)
-            
-    if 'istruct' in locals():
-        pass
-    else:
-        for nested_group in eval(group_location_in_file + '.groups.keys()'):
-            new_nested_group = group_location_in_file + ".groups['" + str(nested_group) + "']"
-            walk_nested_groups(new_nested_group)
-
-
-
-'''
-    MATLAB has Multidimensional Structure Arrays in 2 known classes: results and qmu.
-    The python classes results.py and qmu.py emulate this MATLAB object in their own
-    unique ways. The functions in this script will assign data to either of these 
-    classes such that the final structure is compatible with its parent class.
-'''
-
-def copy_multidimensional_results_struct(group_location_in_file, name_of_struct):
-    '''
-    A common multidimensional array is the 1xn md.results.TransientSolution object.
-
-    The way that this object emulates the MATLAB mutli-dim. struct. array is with 
-    the solution().steps attr. which is a list of solutionstep() instances
-        The process to recreate is as follows:
-            1. Get instance of solution() with solution variable (the instance is made in make_results_subclasses)
-            2. For each subgroup, create a solutionstep() class instance
-             2a. Populate the instance with the key:value pairs
-             2b. Append the instance to the solution().steps list
-    '''
-    # step 1
-    class_instance_name = name_of_struct
-    
-    # for some reason steps is not already a list
-    setattr(model_copy.results.__dict__[class_instance_name], 'steps', list())
-
-    steps = model_copy.results.__dict__[class_instance_name].steps
-    
-    # step 2
-    layer = 1
-    for subgroup in eval(group_location_in_file + ".groups.keys()"):
-        solutionstep_instance = solutionstep()
-        # step 2a
-        subgroup_location_in_file = group_location_in_file + ".groups['" + subgroup + "']"
-        for key in eval(subgroup_location_in_file + ".variables.keys()"):
-            value = eval(subgroup_location_in_file + ".variables['" + str(key) + "'][:]")
-            setattr(solutionstep_instance, key, value)
-        # step 2b
-        steps.append(solutionstep_instance)
-        print('Succesfully saved layer ' + str(layer) + ' to results.' + str(class_instance_name) + ' struct.')
-        layer += 1
-
-    print('Successfully recreated results structure ' + str(class_instance_name))
-
-
-
-def copy_variable_data_to_new_model(location_of_variable_in_file, location_of_variable_in_model, variable_name):
-    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
-    # NetCDF4 has a property called "_FillValue" that sometimes saves empty lists, so we have to catch those
-    FillValue = -9223372036854775806
-    try:
-        # results band-aid...
-        #print(str(location_of_variable_in_model + '.' + variable_name))
-        if str(location_of_variable_in_model + '.' + variable_name) in ['results.solutionstep', 'results.solution', 'results.resultsdakota']:
-            pass
-        # qmu band-aid
-        elif 'qmu.statistics.method' in str(location_of_variable_in_model + '.' + variable_name):
-            pass
-        # handle any strings:
-        elif 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
-            setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:][...].tobytes().decode()'))
-        # handle ndarrays + lists
-        elif len(eval(location_of_variable_in_file + '[:]'))>1:
-            # check for bool
-            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
-                if eval(location_of_variable_in_file + '.units') == 'bool':
-                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool))
-                else:
-                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
-            except:
-                setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:]'))
-        # catch everything else
-        else:
-            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
-            try:
-                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
-                var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
-                if FillValue == var_to_save:
-                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, [])
-                else:
-                    if var_to_save.is_integer():
-                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, int(var_to_save))
-                    else:
-                        # we have to convert numpy datatypes to native python types with .item()
-                        setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, var_to_save.item())                        
-            except:
-                    setattr(eval('model_copy.' + location_of_variable_in_model), variable_name, eval(location_of_variable_in_file + '[:].item()'))
-    except AttributeError:
-        copy_variable_data_to_new_model_dict(location_of_variable_in_file, location_of_variable_in_model)
-
-    print('Successfully saved ' + location_of_variable_in_model + '.' + variable_name + ' to model.')
-
-
-
-
-
-
-def copy_variable_data_to_new_model_dict(location_of_variable_in_file, location_of_variable_in_model):
-    # as simple as navigating to the location_of_variable_in_model and setting it equal to the location_of_variable_in_file
-
-    # NetCDF4 has a property called "_FillValue" that sometimes saves empty lists, so we have to catch those
-    FillValue = -9223372036854775806
-
-    # the key will be the last item in the location
-    key = ''.join(location_of_variable_in_model.split('.')[-1])
-
-    # update the location to point to the dict instead of the dict key
-    location_of_variable_in_model = '.'.join(location_of_variable_in_model.split('.')[:-1])
-
-    # verify we're working with a dict:
-    if isinstance(eval('model_copy.' + location_of_variable_in_model), OrderedDict):
-        dict_object = eval('model_copy.' + location_of_variable_in_model)
-        
-        # handle any strings:
-        if 'char' in eval(location_of_variable_in_file + '.dimensions[0]'):
-            data = eval(location_of_variable_in_file + '[:][...].tobytes().decode()')
-            dict_object.update({key: data})
-            
-        # handle ndarrays + lists
-        elif len(eval(location_of_variable_in_file + '[:]'))>1:
-            # check for bool
-            try: # there is only one datatype assigned the attribute 'units' and that is bool, so anything else will go right to except
-                if eval(location_of_variable_in_file + '.units') == 'bool':
-                    data = np.array(eval(location_of_variable_in_file + '[:]'), dtype = bool)
-                    dict_object.update({key: data})
-                else:
-                    data = eval(location_of_variable_in_file + '[:]')
-                    dict_object.update({key: data})
-            except:
-                data = eval(location_of_variable_in_file + '[:]')
-                dict_object.update({key: data})
-        # catch everything else
-        else:
-            # check for FillValue. use try/except because try block will only work on datatypes like int64, float, single element lists/arrays ect and not nd-arrays/n-lists etc
-            try:
-                # this try block will only work on single ints/floats/doubles and will skip to the except block for all other cases
-                if FillValue == eval(location_of_variable_in_file + '[:][0]'):
-                    dict_object.update({key: []})
-                else:
-                    # we have to convert numpy datatypes to native python types with .item()
-                    var_to_save = eval(location_of_variable_in_file + '[:][0]')  # note the [0] on the end
-                    dict_object.update({key:  var_to_save.item()})
-            except:
-                data = eval(location_of_variable_in_file + '[:]')
-                dict_object.update({key: data})
-    else:
-        print(f"Unrecognized object was saved and cannot be reconstructed: {location_of_variable_in_model}")
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Index: /issm/trunk/src/m/contrib/musselman/write_netCDF.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/write_netCDF.py	(revision 27882)
+++ /issm/trunk/src/m/contrib/musselman/write_netCDF.py	(revision 27882)
@@ -0,0 +1,483 @@
+# imports
+import netCDF4
+from netCDF4 import Dataset
+import numpy as np
+import numpy.ma as ma
+import time
+import os
+from model import *
+from results import *
+from m1qn3inversion import m1qn3inversion
+from taoinversion import taoinversion
+#import OrderedStruct
+
+
+'''
+Given a md, this set of functions will perform the following:
+    1. Enter each nested class of the md.
+    2. View each attribute of each nested class.
+    3. Compare state of attribute in the model to an empty model class.
+    4. If states are identical, pass.
+    5. Otherwise, create nested groups named after class structure.
+    6. Create variable named after class attribute and assign value to it.
+'''
+
+
+def write_netCDF(md, filename: str):
+    print('Python C2NetCDF4 v1.1.14')
+    '''
+    md = model() class instance to be saved
+    filename = path and name to save file under
+    '''
+    
+    # Create a NetCDF file to write to
+    make_NetCDF(filename)
+    
+    # Create an instance of an empty md class to compare md_var against
+    global empty_model
+    empty_model = model()
+
+    # Walk through the md class and compare subclass states to empty_model
+    walk_through_model(md)
+
+    # in order to handle some subclasses in the results class, we have to utilize this band-aid
+    # there will likely be more band-aids added unless a class name library is created with all class names that might be added to a md
+    try:
+        # if results has meaningful data, save the name of the subclass and class instance
+        NetCDF.groups['results']
+        results_subclasses_bandaid(md)
+        # otherwise, ignore
+    except KeyError:
+        pass
+        
+    NetCDF.close()
+    print('Model successfully saved as NetCDF4')
+    
+
+def results_subclasses_bandaid(md):
+    # since the results class may have nested classes within it, we need to record the name of the 
+    # nested class instance variable as it appears in the md that we're trying to save
+    quality_control = []
+
+    # we save lists of instances to the netcdf
+    solutions = []
+    solutionsteps = []
+    resultsdakotas = []
+    
+    for class_instance_name in md.results.__dict__.keys():
+        print(class_instance_name)
+        # for each class instance in results, see which class its from and record that info in the netcdf to recreate structure later
+        # check to see if there is a solutionstep class instance
+        if isinstance(md.results.__dict__[class_instance_name],solutionstep):
+            quality_control.append(1)
+            solutionsteps.append(class_instance_name)
+
+        # check to see if there is a solution class instance
+        if isinstance(md.results.__dict__[class_instance_name],solution):
+            quality_control.append(1)
+            solutions.append(class_instance_name)
+
+        # check to see if there is a resultsdakota class instance
+        if isinstance(md.results.__dict__[class_instance_name],resultsdakota):
+            quality_control.append(1)
+            resultsdakotas.append(class_instance_name)
+
+    if solutionsteps != []:
+        write_string_to_netcdf(variable_name=str('solutionstep'), address_of_child=solutionsteps, group=NetCDF.groups['results'], list=True)
+
+    if solutions != []:
+        write_string_to_netcdf(variable_name=str('solution'), address_of_child=solutions, group=NetCDF.groups['results'], list=True)
+
+    if resultsdakotas != []:
+        write_string_to_netcdf(variable_name=str('resultsdakota'), address_of_child=resultsdakotas, group=NetCDF.groups['results'], list=True)
+
+    
+    if len(quality_control) != len(md.results.__dict__.keys()):
+        print('Error: The class instance within your md.results class is not currently supported by this application')
+        print(type(md.results.__dict__[class_instance_name]))
+    else:
+        print('The results class was successfully stored on disk')
+
+
+def make_NetCDF(filename: str):
+    # If file already exists delete / rename it
+    if os.path.exists(filename):
+        print('File {} allready exist'.format(filename))
+    
+        # If so, inqure for a new name or to do delete the existing file
+        newname = input('Give a new name or "delete" to replace: ')
+
+        if newname == 'delete':
+            os.remove(filename)
+        else:
+            print(('New file name is {}'.format(newname)))
+            filename = newname
+    else:
+        # Otherwise create the file and define it globally so other functions can call it
+        global NetCDF
+        NetCDF = Dataset(filename, 'w', format='NETCDF4')
+        NetCDF.history = 'Created ' + time.ctime(time.time())
+        NetCDF.createDimension('Unlim', None)  # unlimited dimension
+        NetCDF.createDimension('float', 1)     # single integer dimension
+        NetCDF.createDimension('int', 1)       # single float dimension
+    
+    print('Successfully created ' + filename)
+
+
+def walk_through_model(md):
+    # Iterate over first layer of md_var attributes and assume this first layer is only classes
+    for group in md.__dict__.keys():
+        address = md.__dict__[group]
+        empty_address = empty_model.__dict__[group]
+        # we need to record the layers of the md so we can save them to the netcdf file
+        layers = [group]
+
+        # Recursively walk through subclasses
+        walk_through_subclasses(address, empty_address, layers)       
+
+
+def walk_through_subclasses(address, empty_address, layers: list):
+    # See if we have an object with keys or a not
+    try:
+        address.__dict__.keys()
+        is_object = True
+    except: is_object = False # this is not an object with keys
+
+    if is_object:
+        # enter the subclass, see if it has nested classes and/or attributes
+        # then compare attributes between mds and write to netCDF if they differ
+        # if subclass found, walk through it and repeat
+        for child in address.__dict__.keys():
+            # record the current location
+            current_layer = layers.copy()
+            current_layer.append(child)
+            
+            # navigate to child in each md
+            address_of_child = address.__dict__[child]
+            
+            # if the current object is a results.<solution> object and has the steps attr it needs special treatment
+            if isinstance(address_of_child, solution) and len(address_of_child.steps) != 0:
+                create_group(address_of_child, current_layer, is_struct = True)
+
+            # if the variable is an array, assume it has relevant data (this is because the next line cannot evaluate "==" with an array)
+            elif isinstance(address_of_child, np.ndarray):
+                create_group(address_of_child, current_layer)
+            
+            # see if the child exists in the empty md. If not, record it in the netcdf
+            else:
+                try: 
+                    address_of_child_in_empty_class = empty_address.__dict__[child]
+                    # if that line worked, we can see how the mds' attributes at this layer compare:
+    
+                    # if the attributes are identical we don't need to save anything
+                    if address_of_child == address_of_child_in_empty_class:
+                        walk_through_subclasses(address_of_child, address_of_child_in_empty_class, current_layer)
+    
+                    # If it has been modified, record it in the NetCDF file
+                    else:
+                        create_group(address_of_child, current_layer)
+                        walk_through_subclasses(address_of_child, address_of_child_in_empty_class, current_layer)
+    
+                except KeyError: # record in netcdf and continue to walk thru md
+                    walk_through_subclasses(address_of_child, empty_address, current_layer)
+                    create_group(address_of_child, current_layer)
+    else: pass
+
+
+def create_group(address_of_child, layers, is_struct = False):
+
+    # Handle the first layer of the group(s)
+    group_name = layers[0]
+    try:
+        group = NetCDF.createGroup(str(group_name))
+    except:
+        group = NetCDF.groups[str(group_name)]
+
+    # need to check if inversion or m1qn3inversion class
+    if group_name == 'inversion':
+        check_inversion_class(address_of_child)
+    else: pass
+
+    # if the data is nested, create nested groups to match class structure
+    if len(layers) > 2:
+        for name in layers[1:-1]:
+            try:
+                group = group.createGroup(str(name))
+            except:
+                group = NetCDF.groups[str(name)]
+    else: pass
+
+    # Lastly, handle the variable(s)
+    if is_struct:
+        parent_struct_name = layers[-1]
+        copy_nested_results_struct(parent_struct_name, address_of_child, group)
+    
+    else:
+        variable_name = layers[-1]
+        create_var(variable_name, address_of_child, group)
+            
+
+
+def singleton(func):
+    """
+    A decorator to ensure a function is only executed once.
+    """
+    def wrapper(*args, **kwargs):
+        if not wrapper.has_run:
+            wrapper.result = func(*args, **kwargs)
+            wrapper.has_run = True
+        return wrapper.result
+    wrapper.has_run = False
+    wrapper.result = None
+    return wrapper
+    
+
+@singleton
+def check_inversion_class(address_of_child):
+    print('inversion ... ')
+    # need to make sure that we have the right inversion class: inversion, m1qn3inversion, taoinversion
+    if isinstance(address_of_child, m1qn3inversion):
+        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('m1qn3inversion'), group=NetCDF.groups['inversion'])
+        print('Successfully saved inversion class instance ' + 'm1qn3inversion')
+    elif isinstance(address_of_child, taoinversion):
+        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('taoinversion'), group=NetCDF.groups['inversion'])
+        print('Successfully saved inversion class instance ' + 'taoinversion')
+    else:
+        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('inversion'), group=NetCDF.groups['inversion'])
+        print('Successfully saved inversion class instance ' + 'inversion')
+
+
+
+def copy_nested_results_struct(parent_struct_name, address_of_struct, group):
+    '''
+        This function takes a solution class instance and saves the solutionstep instances from <solution>.steps to the netcdf. 
+
+        To do this, we get the number of dimensions (substructs) of the parent struct.
+        Next, we iterate through each substruct and record the data. 
+        For each substruct, we create a subgroup of the main struct.
+        For each variable, we create dimensions that are assigned to each subgroup uniquely.
+    '''
+    print("Beginning transfer of nested MATLAB struct to the NetCDF")
+    
+    # make a new subgroup to contain all the others:
+    group = group.createGroup(str(parent_struct_name))
+
+    # make sure other systems can flag the nested struct type
+    write_string_to_netcdf('this_is_a_nested', 'struct', group, list=False)
+
+    # other systems know the name of the parent struct because it's covered by the results/qmu functions above
+    no_of_dims = len(address_of_struct)
+    for substruct in range(0, no_of_dims):
+        # we start by making subgroups with nice names like "1x4"
+        name_of_subgroup = '1x' + str(substruct)
+        subgroup = group.createGroup(str(name_of_subgroup))
+
+        # do some housekeeping to keep track of the current layer
+        current_substruct = address_of_struct[substruct]
+        substruct_fields = current_substruct.__dict__.keys()
+
+        # now we need to iterate over each variable of the nested struct and save it to this new subgroup
+        for variable in substruct_fields:
+            address_of_child = current_substruct.__dict__[variable]
+            create_var(variable, address_of_child, subgroup)
+
+    print(f'Successfully transferred struct {parent_struct_name} to the NetCDF\n')
+    
+        
+def create_var(variable_name, address_of_child, group):
+    # There are lots of different variable types that we need to handle from the md class
+    
+    # This first conditional statement will catch numpy arrays of any dimension and save them
+    if isinstance(address_of_child, np.ndarray):
+        write_numpy_array_to_netcdf(variable_name, address_of_child, group)
+    
+    # check if it's an int
+    elif isinstance(address_of_child, int) or isinstance(address_of_child, np.integer):
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = address_of_child
+    
+    # or a float
+    elif isinstance(address_of_child, float) or isinstance(address_of_child, np.floating):
+        variable = group.createVariable(variable_name, float, ('float',))
+        variable[:] = address_of_child
+
+    # or a string
+    elif isinstance(address_of_child, str):
+        write_string_to_netcdf(variable_name, address_of_child, group)
+
+    #or a bool
+    elif isinstance(address_of_child, bool) or isinstance(address_of_child, np.bool_):
+        # netcdf4 can't handle bool types like True/False so we convert all to int 1/0 and add an attribute named units with value 'bool'
+        variable = group.createVariable(variable_name, int, ('int',))
+        variable[:] = int(address_of_child)
+        variable.units = "bool"
+        
+    # or an empty list
+    elif isinstance(address_of_child, list) and len(address_of_child)==0:
+        variable = group.createVariable(variable_name, int, ('int',))
+
+    # or a list of strings -- this needs work as it can only handle a list of 1 string
+    elif isinstance(address_of_child,list) and isinstance(address_of_child[0],str):
+        for string in address_of_child:
+            write_string_to_netcdf(variable_name, string, group, list=True)
+
+    # or a regular list
+    elif isinstance(address_of_child, list):
+        print('made list w/ unlim dim')
+        variable = group.createVariable(variable_name, type(address_of_child[0]), ('Unlim',))
+        variable[:] = address_of_child
+
+    # anything else... (will likely need to add more cases; ie helpers.OrderedStruct)
+    else:
+        try:
+            variable = group.createVariable(variable_name, type(address_of_child), ('Unlim',))
+            variable[:] = address_of_child
+            print('Used Unlim Dim')
+        except TypeError: pass # this would mean that we have an object, so we just let this continue to feed thru the recursive function above
+        except Exception as e:
+            print(f'There was error with {variable_name} in {group}')
+            print("The error message is:")
+            print(e)
+            print('Datatype given: ' + str(type(address_of_child)))
+
+    print(f'Successfully transferred data from {variable_name} to the NetCDF')
+    
+
+def write_string_to_netcdf(variable_name, address_of_child, group, list=False):
+    # netcdf and strings dont get along.. we have to do it 'custom':
+    # if we hand it an address we need to do it this way:
+    if list == True:
+        """
+        Save a list of strings to a NetCDF file.
+    
+        Convert a list of strings to a numpy.char_array with utf-8 encoded elements
+        and size rows x cols with each row the same # of cols and save to NetCDF
+        as char array.
+        """
+        try:
+            strings = address_of_child
+            # get dims of array to save
+            rows = len(strings)
+            cols = len(max(strings, key = len))
+    
+            # Define dimensions for the strings
+            rows_name = 'rows' + str(rows)
+            cols_name = 'cols' + str(cols)
+            try:
+                group.createDimension(rows_name, rows)
+            except: pass
+
+            try:
+                group.createDimension(cols_name, cols)
+            except: pass
+                
+            # Create a variable to store the strings
+            string_var = group.createVariable(str(variable_name), 'S1', (rows_name, cols_name))
+    
+            # break the list into a list of lists of words with the same length as the longest word:
+            # make words same sizes by adding spaces 
+            modded_strings = [word + ' ' * (len(max(strings, key=len)) - len(word)) for word in strings]
+            # encoded words into list of encoded lists
+            new_list = [[s.encode('utf-8') for s in word] for word in modded_strings]
+    
+            # make numpy char array with dims rows x cols
+            arr = np.chararray((rows, cols))
+    
+            # fill array with list of encoded lists
+            for i in range(len(new_list)):
+                arr[i] = new_list[i]
+    
+            # save array to netcdf file
+            string_var[:] = arr
+    
+            print(f'Saved {len(modded_strings)} strings to {variable_name}')
+    
+        except Exception as e:
+            print(f'Error: {e}')
+        
+    else:
+        the_string_to_save = address_of_child
+        length_of_the_string = len(the_string_to_save)
+        numpy_datatype = 'S' + str(length_of_the_string)
+        str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))        
+    
+        # we'll need to make a new dimension for the string if it doesn't already exist
+        name_of_dimension = 'char' + str(length_of_the_string)
+        try: 
+            group.createDimension(name_of_dimension, length_of_the_string)
+        except: pass
+        # this is another band-aid to the results sub classes...
+        try:
+            # now we can make a variable in this dimension:
+            string = group.createVariable(variable_name, 'S1', (name_of_dimension))
+            #finally we can write the variable:
+            string[:] = str_out
+        #except RuntimeError: pass
+        except Exception as e:
+            print(f'There was an error saving a string from {variable_name}')
+            print(e)
+
+
+
+def write_numpy_array_to_netcdf(variable_name, address_of_child, group):
+    # to make a nested array in netCDF, we have to get the dimensions of the array,
+    # create corresponding dimensions in the netCDF file, then we can make a variable
+    # in the netCDF with dimensions identical to those in the original array
+    
+    # start by getting the data type at the lowest level in the array:
+    typeis = address_of_child.dtype
+
+    # catch boolean arrays here
+    if typeis == bool:
+        # sometimes an array has just 1 element in it, we account for those cases here:
+        if len(address_of_child) == 1:
+            variable = group.createVariable(variable_name, int, ('int',))
+            variable[:] = int(address_of_child)
+            variable.units = "bool"
+        else:
+            # make the dimensions
+            dimensions = []
+            for dimension in np.shape(address_of_child):
+                dimensions.append(str('dim' + str(dimension)))
+                # if the dimension already exists we can't have a duplicate
+                try:
+                    group.createDimension(str('dim' + str(dimension)), dimension)
+                except: pass # this would mean that the dimension already exists
+    
+            # create the variable:
+            variable = group.createVariable(variable_name, int, tuple(dimensions))
+            # write the variable:
+            variable[:] = address_of_child.astype(int)
+            variable.units = "bool"
+
+    # handle all other datatypes here
+    else:
+        # sometimes an array has just 1 element in it, we account for those cases here:
+        if len(address_of_child) == 1:
+            if typeis is np.dtype('float64'):
+                variable = group.createVariable(variable_name, typeis, ('float',))
+                variable[:] = address_of_child[0]
+            elif typeis is np.dtype('int64'):
+                variable = group.createVariable(variable_name, typeis, ('int',))
+                variable[:] = address_of_child[0]
+            else:
+                print('Encountered single datatype that was not float64 or int64, saving under unlimited dimension, may cause errors.')
+                variable = group.createVariable(variable_name, typeis, ('Unlim',))
+                variable[:] = address_of_child[0]
+    
+        # This catches all arrays/lists:
+        else:
+            # make the dimensions
+            dimensions = []
+            for dimension in np.shape(address_of_child):
+                dimensions.append(str('dim' + str(dimension)))
+                # if the dimension already exists we can't have a duplicate
+                try:
+                    group.createDimension(str('dim' + str(dimension)), dimension)
+                except: pass # this would mean that the dimension already exists
+    
+            # create the variable:
+            variable = group.createVariable(variable_name, typeis, tuple(dimensions))
+    
+            # write the variable:
+            variable[:] = address_of_child
Index: sm/trunk/src/m/contrib/musselman/write_netCDF_beta.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/write_netCDF_beta.py	(revision 27881)
+++ 	(revision )
@@ -1,220 +1,0 @@
-# imports
-from netCDF4 import Dataset
-import numpy as np
-import numpy.ma as ma
-import time
-from os import path, remove
-from model import *
-
-
-'''
-this is like my todo list
-
-Given a model, this set of functions will perform the following:
-    1. Enter each nested class of the model.
-    2. View each attribute of each nested class.
-    3. Compare state of attribute in the model to an empty model class.
-    4. If states are identical, pass.
-    5. Else, create group named after original class.
-    6. Create variable named after nested class attribute and assign value to it.
-    7. 
-'''
-
-'''
-need to add cases for nested arrays, dicts, etc...
-To do this I need to: 
-    know exactly the data types that are generating problems
-'''
-
-
-def write_netCDF(model_var, model_name: str, filename: str):
-    '''
-    model_var = class object to be saved
-    model_name = name of class instance variable but inside quotation marks: ie if md = model(), then model_name = 'md'
-    filename = path and name to save file under
-    '''
-      
-    globals()[model_name] = model_var
-
-    # a sanity check
-    print('sanity check to make sure names are defined: ' + str(model_var == eval(model_name)))
-    
-    # Create a NetCDF file to write to
-    make_NetCDF(filename)
-    
-    # Create an instance of an empty model class to compare model_var against
-    global empty_model
-    empty_model = model()
-
-    # Walk through the model_var class and compare subclass states to empty_model
-    walk_through_model(model_var, model_name)
-    
-    NetCDF.close()
-    
-
-    
-def make_NetCDF(filename: str):
-    # Check if file already exists
-    if path.exists(filename):
-        print('File {} allready exist'.format(filename))
-    
-        # If so, inqure for a new name or to do delete existing file
-        newname = input('Give a new name or "delete" to replace: ')
-
-        if newname == 'delete':
-            remove(filename)
-        else:
-            print(('New file name is {}'.format(newname)))
-            filename = newname
-
-    # Create file and define it globally (global variables are stored in memory/global namespace)
-    global NetCDF
-    NetCDF = Dataset(filename, 'w', format='NETCDF4')
-    NetCDF.history = 'Created ' + time.ctime(time.time())
-    NetCDF.createDimension('Unlim', None)  # unlimited dimension
-    NetCDF.createDimension('float', 1)     # single integer dimension
-    NetCDF.createDimension('int', 1)       # single float dimension
-    
-    print('Successfully created ' + filename)
-
-
-    
-def walk_through_model(model_var, model_name: str):
-    # Iterate over first layer of model_var attributes and assume this first layer is only classes
-    for group in model_var.__dict__.keys():
-        print(str(group))
-        adress = str(model_name + '.' + str(group))
-        print(adress)
-        # Recursively walk through subclasses
-        walk_through_subclasses(model_var, adress, model_name)       
-        
-
-        
-def walk_through_subclasses(model_var, adress: str, model_name: str):
-    # Iterate over each subclass' attributes
-    # Use try/except since it's either a class or it's not, no unknown exceptions
-    try:
-        # enter the subclass, see if it has nested classes and/or attributes
-        # then compare attributes between models and write to netCDF if they differ
-        # if subclass found, walk through it and repeat
-        for child in eval(adress + '.__dict__.keys()'):
-            # make a string variable so we can send thru this func again
-            adress_of_child = str(adress + '.' + str(child))
-            print('adress_of_child: ' + adress_of_child)
-            # If the attribute is unchanged, move onto the next layer
-            adress_of_child_in_empty_class = 'empty_model' + adress_of_child.removeprefix(str(model_name))
-            print('adress_of_child_in_empty_class: '+ adress_of_child_in_empty_class + '\n')
-            if type(child) == type(eval(adress_of_child_in_empty_class)):
-                print('passed a non-variable\n')
-                walk_through_subclasses(model_var, adress_of_child, model_name)
-            # If it has been modified, record it in the NetCDF file
-            else:
-                create_group(model_var, adress_of_child)
-                walk_through_subclasses(model_var, adress_of_child, model_name)
-    except Exception as e: print(e)
-
-
-    
-def create_group(model_var, adress_of_child):
-    # start by splitting the adress_of_child into its components
-    print('entered create for: ' + adress_of_child + '\n')
-    print('the type is: ' + str(type(eval(adress_of_child))) + '\n')
-    levels_of_class = adress_of_child.split('.')
-    print(levels_of_class)
-
-    # Handle the first layer of the group(s)
-    group_name = levels_of_class[1]
-    group = NetCDF.createGroup(str(group_name))
-
-    # if the data is nested, create nested groups to match class structure
-    if len(levels_of_class) > 3:
-        for name in levels_of_class[2:-1]:
-            group = group.createGroup(str(name))
-    else: pass
-
-    # Lastly, handle the variable(s)
-    variable_name = levels_of_class[-1]
-    create_var(variable_name, adress_of_child, group)
-
-
-
-
-def create_var(variable_name, adress_of_child, group):
-    # There are lots of different variable types that we need to handle from the model class
-    
-    # This first conditional statement will catch numpy arrays of any dimension and save them
-    if isinstance(eval(adress_of_child), np.ndarray):
-        write_numpy_array_to_netcdf(variable_name, adress_of_child, group)
-        
-    elif isinstance(eval(adress_of_child), int):
-        print('caught an int!')
-        variable = group.createVariable(variable_name, int, ('int',))
-        variable[:] = eval(adress_of_child)
-        
-    elif isinstance(eval(adress_of_child), float):
-        print('caught a float!')
-        variable = group.createVariable(variable_name, float, ('float',))
-        variable[:] = eval(adress_of_child)
-        
-    else:
-        try:
-            variable = group.createVariable(variable_name, type(eval(adress_of_child)), ('Unlim',))
-            variable = eval(adress_of_child)
-        except Exception as e: print(e)
-
-    print('successfully wrote ' + adress_of_child + ' to netcdf file')
-    
-    
-    
-
-def write_numpy_array_to_netcdf(variable_name, adress_of_child, group):
-    print('entered write_numpy_array_to_netcdf for: ' + variable_name)
-    # to make a nested array in netCDF, we have to get the dimensions of the array,
-    # create corresponding dimensions in the netCDF file, then we can make a variable
-    # in the netCDF with dimensions identical to those in the original array
-    
-    # start by getting the data type at the lowest level in the array:
-    typeis = eval(adress_of_child + '.dtype')
-    print('the type of elements are: ' + str(typeis))
-    
-    # if the array is 1D, we don't need to do anything fancy
-    # sometimes an array has just 1 element in it though, so we need to account for those cases here:
-    if len(eval(adress_of_child)) == 1:
-        if typeis is np.dtype('float64'):
-            variable = group.createVariable(variable_name, typeis, ('float',))
-            variable[:] = eval(adress_of_child)            
-        elif typeis is np.dtype('int64'):
-            variable = group.createVariable(variable_name, typeis, ('int',))
-            variable[:] = eval(adress_of_child)            
-        else:
-            variable = group.createVariable(variable_name, typeis, ('Unlim',))
-            variable[:] = eval(adress_of_child)
-    
-    # this is the 1D case:
-    elif len(np.shape(eval(adress_of_child))) == 1: 
-        variable = group.createVariable(variable_name, typeis, ('Unlim',))
-        variable[:] = eval(adress_of_child)
-    
-    # But if the array is >1D, we do need to be fancy:
-    else:
-        # make the dimensions
-        dimensions = []
-        for dimension in np.shape(eval(adress_of_child)):
-            dimensions.append(str('dim' + str(dimension)))
-            # if the dimension already exists we can't have a duplicate
-            try:
-                group.createDimension(str('dim' + str(dimension)), dimension)
-            except: pass # this would mean that the dimension already exists
-
-        print('the dimensions are: ' + str(dimensions))
-
-        # create the variable:
-        variable = group.createVariable(variable_name, typeis, tuple(dimensions))
-        print('created variable OK')
-
-        # write the variable:
-        variable[:] = eval(adress_of_child)
-
-        
-        
-        
Index: sm/trunk/src/m/contrib/musselman/write_netCDF_commit.py
===================================================================
--- /issm/trunk/src/m/contrib/musselman/write_netCDF_commit.py	(revision 27881)
+++ 	(revision )
@@ -1,489 +1,0 @@
-# imports
-import netCDF4
-from netCDF4 import Dataset
-import numpy as np
-import numpy.ma as ma
-import time
-import os
-from model import *
-from results import *
-from m1qn3inversion import m1qn3inversion
-from taoinversion import taoinversion
-
-
-'''
-Given a model, this set of functions will perform the following:
-    1. Enter each nested class of the model.
-    2. View each attribute of each nested class.
-    3. Compare state of attribute in the model to an empty model class.
-    4. If states are identical, pass.
-    5. Otherwise, create nested groups named after class structure.
-    6. Create variable named after class attribute and assign value to it.
-'''
-
-
-def write_netCDF(model_var, model_name: str, filename: str):
-    print('Python C2NetCDF4 v1.1.14')
-    '''
-    model_var = class object to be saved
-    model_name = name of class instance variable but inside quotation marks: ie if md = model(), then model_name = 'md'
-    filename = path and name to save file under
-    '''
-    # this assigns the name model_name to the class object model_var... very important
-    globals()[model_name] = model_var
-    
-    # Create a NetCDF file to write to
-    make_NetCDF(filename)
-    
-    # Create an instance of an empty model class to compare model_var against
-    global empty_model
-    empty_model = model()
-
-    # Walk through the model_var class and compare subclass states to empty_model
-    walk_through_model(model_var, model_name)
-
-    # in order to handle some subclasses in the results class, we have to utilize this band-aid
-    # there will likely be more band-aids added unless a class name library is created with all class names that might be added to a model
-    try:
-        # if results has meaningful data, save the name of the subclass and class instance
-        NetCDF.groups['results']
-        results_subclasses_bandaid(model_var)
-        # otherwise, ignore
-    except KeyError:
-        pass
-        
-    NetCDF.close()
-    print('Model successfully saved as NetCDF4')
-    
-
-
-def results_subclasses_bandaid(model_var):
-    # since the results class may have nested classes within it, we need to record the name of the 
-    # nested class instance variable as it appears in the model that we're trying to save
-    quality_control = []
-
-    # we save lists of instances to the netcdf
-    solutions = []
-    solutionsteps = []
-    resultsdakotas = []
-    
-    for class_instance_name in model_var.results.__dict__.keys():
-        print(class_instance_name)
-        # for each class instance in results, see which class its from and record that info in the netcdf to recreate structure later
-        # check to see if there is a solutionstep class instance
-        if isinstance(model_var.results.__dict__[class_instance_name],solutionstep):
-            quality_control.append(1)
-            solutionsteps.append(class_instance_name)
-
-        # check to see if there is a solution class instance
-        if isinstance(model_var.results.__dict__[class_instance_name],solution):
-            quality_control.append(1)
-            solutions.append(class_instance_name)
-
-        # check to see if there is a resultsdakota class instance
-        if isinstance(model_var.results.__dict__[class_instance_name],resultsdakota):
-            quality_control.append(1)
-            resultsdakotas.append(class_instance_name)
-
-    if solutionsteps != []:
-        write_string_to_netcdf(variable_name=str('solutionstep'), address_of_child=solutionsteps, group=NetCDF.groups['results'], list=True)
-
-    if solutions != []:
-        write_string_to_netcdf(variable_name=str('solution'), address_of_child=solutions, group=NetCDF.groups['results'], list=True)
-
-    if resultsdakotas != []:
-        write_string_to_netcdf(variable_name=str('resultsdakota'), address_of_child=resultsdakotas, group=NetCDF.groups['results'], list=True)
-
-    
-    if len(quality_control) != len(model_var.results.__dict__.keys()):
-        print('Error: The class instance within your model.results class is not currently supported by this application')
-        print(type(model_var.results.__dict__[class_instance_name]))
-        print(solutions)
-        print(solutionsteps)
-        print(resultsdakotas)
-    else:
-        print('The results class was successfully stored on disk')
-
-
-    
-def make_NetCDF(filename: str):
-    # If file already exists delete / rename it
-    if os.path.exists(filename):
-        print('File {} allready exist'.format(filename))
-    
-        # If so, inqure for a new name or to do delete the existing file
-        newname = input('Give a new name or "delete" to replace: ')
-
-        if newname == 'delete':
-            os.remove(filename)
-        else:
-            print(('New file name is {}'.format(newname)))
-            filename = newname
-    else:
-        # Otherwise create the file and define it globally so other functions can call it
-        global NetCDF
-        NetCDF = Dataset(filename, 'w', format='NETCDF4')
-        NetCDF.history = 'Created ' + time.ctime(time.time())
-        NetCDF.createDimension('Unlim', None)  # unlimited dimension
-        NetCDF.createDimension('float', 1)     # single integer dimension
-        NetCDF.createDimension('int', 1)       # single float dimension
-    
-    print('Successfully created ' + filename)
-
-
-    
-def walk_through_model(model_var, model_name: str):
-    # Iterate over first layer of model_var attributes and assume this first layer is only classes
-    for group in model_var.__dict__.keys():
-        address = str(model_name + '.' + str(group))
-        # Recursively walk through subclasses
-        walk_through_subclasses(model_var, address, model_name)       
-
-
-
-def walk_through_subclasses(model_var, address: str, model_name: str):
-    # Iterate over each subclass' attributes
-    # Use try/except since it's either a class w/ attributes or it's not, no unknown exceptions
-    try:
-        # enter the subclass, see if it has nested classes and/or attributes
-        # then compare attributes between models and write to netCDF if they differ
-        # if subclass found, walk through it and repeat
-        for child in eval(address + '.__dict__.keys()'):
-            # make a string variable so we can send thru this func again
-            address_of_child = str(address + '.' + str(child))
-            # If the attribute is unchanged, move onto the next layer
-            address_of_child_in_empty_class = 'empty_model' + address_of_child.removeprefix(str(model_name))
-            # using try/except here because sometimes a model can have class instances/attributes that are not 
-            # in the framework of an empty model. If this is the case, we move to the except statement
-            try:
-                # if the current object is a results.<solution> object and has the steps attr it needs special treatment
-                if isinstance(eval(address_of_child), solution) and len(eval(address_of_child + '.steps')) != 0:
-                    create_group(model_var, address_of_child, is_struct = True)
-                # if the variable is an array, assume it has relevant data (this is because the next line cannot evaluate "==" with an array)
-                elif isinstance(eval(address_of_child), np.ndarray):
-                    create_group(model_var, address_of_child)
-                # if the attributes are identical we don't need to save anything
-                elif eval(address_of_child) == eval(address_of_child_in_empty_class):
-                    walk_through_subclasses(model_var, address_of_child, model_name)
-                # If it has been modified, record it in the NetCDF file
-                else:
-                    create_group(model_var, address_of_child)
-                    walk_through_subclasses(model_var, address_of_child, model_name)
-            # AttributeError since the empty_model wouldn't have the same attribute as our model
-            except AttributeError:
-                # THE ORDER OF THESE LINES IS CRITICAL
-                try:
-                    walk_through_subclasses(model_var, address_of_child, model_name)
-                    create_group(model_var, address_of_child)
-                except:
-                    pass
-            except Exception as e: print(e)
-    except AttributeError: pass
-    except Exception as e: print(e)
-
-
-        
-def create_group(model_var, address_of_child, is_struct = False):
-    # start by splitting the address_of_child into its components
-    levels_of_class = address_of_child.split('.')
-
-    # Handle the first layer of the group(s)
-    group_name = levels_of_class[1]
-    group = NetCDF.createGroup(str(group_name))
-
-    # need to check if inversion or m1qn3inversion class
-    if group_name == 'inversion':
-        check_inversion_class(model_var)
-    else: pass
-
-    # if the data is nested, create nested groups to match class structure
-    if len(levels_of_class) > 2:
-        for name in levels_of_class[2:-1]:
-            group = group.createGroup(str(name))
-    else: pass
-
-    # Lastly, handle the variable(s)
-    if is_struct:
-        parent_struct_name = address_of_child.split('.')[-1]
-        copy_nested_results_struct(parent_struct_name, address_of_child, group)
-    else:
-        variable_name = levels_of_class[-1]
-        create_var(variable_name, address_of_child, group)
-
-
-def check_inversion_class(model_var):
-    # need to make sure that we have the right inversion class: inversion, m1qn3inversion, taoinversion
-    if isinstance(model_var.__dict__['inversion'], m1qn3inversion):
-        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('m1qn3inversion'), group=NetCDF.groups['inversion'])
-        print('Successfully saved inversion class instance ' + 'm1qn3inversion')
-    elif isinstance(model_var.__dict__['inversion'], taoinversion):
-        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('taoinversion'), group=NetCDF.groups['inversion'])
-        print('Successfully saved inversion class instance ' + 'taoinversion')
-    else:
-        write_string_to_netcdf(variable_name=str('inversion_class_name'), address_of_child=str('inversion'), group=NetCDF.groups['inversion'])
-        print('Successfully saved inversion class instance ' + 'inversion')
-
-
-
-
-
-def copy_nested_results_struct(parent_struct_name, address_of_struct, group):
-    '''
-        This function takes a solution class instance and saves the solutionstep instances from <solution>.steps to the netcdf. 
-
-        To do this, we get the number of dimensions (substructs) of the parent struct.
-        Next, we iterate through each substruct and record the data. 
-        For each substruct, we create a subgroup of the main struct.
-        For each variable, we create dimensions that are assigned to each subgroup uniquely.
-    '''
-    print("Beginning transfer of nested MATLAB struct to the NetCDF")
-    
-    # make a new subgroup to contain all the others:
-    group = group.createGroup(str(parent_struct_name))
-
-    # make sure other systems can flag the nested struct type
-    write_string_to_netcdf('this_is_a_nested', 'struct', group, list=False)
-
-    # other systems know the name of the parent struct because it's covered by the results/qmu functions above
-    address_of_struct_string = address_of_struct
-    address_of_struct = eval(address_of_struct)
-    
-    no_of_dims = len(address_of_struct)
-    for substruct in range(0, no_of_dims):
-        # we start by making subgroups with nice names like "TransientSolution_substruct_44"
-        name_of_subgroup = '1x' + str(substruct)
-        subgroup = group.createGroup(str(name_of_subgroup))
-
-        # do some housekeeping to keep track of the current layer
-        current_substruct = address_of_struct[substruct]
-        current_substruct_string = address_of_struct_string + '[' + str(substruct) + ']'
-        substruct_fields = current_substruct.__dict__.keys()
-
-        # now we need to iterate over each variable of the nested struct and save it to this new subgroup
-        for variable in substruct_fields:
-            address_of_child = current_substruct.__dict__[variable]
-            address_of_child_string = current_substruct_string + '.' + str(variable)
-            create_var(variable, address_of_child_string, subgroup)
-    
-    print(f'Successfully transferred struct {parent_struct_name} to the NetCDF\n')
-    
-        
-
-
-
-def create_var(variable_name, address_of_child, group):
-    # There are lots of different variable types that we need to handle from the model class
-    
-    # This first conditional statement will catch numpy arrays of any dimension and save them
-    if isinstance(eval(address_of_child), np.ndarray):
-        write_numpy_array_to_netcdf(variable_name, address_of_child, group)
-    
-    # check if it's an int
-    elif isinstance(eval(address_of_child), int) or isinstance(eval(address_of_child), np.integer):
-        variable = group.createVariable(variable_name, int, ('int',))
-        variable[:] = eval(address_of_child)
-    
-    # or a float
-    elif isinstance(eval(address_of_child), float) or isinstance(eval(address_of_child), np.floating):
-        variable = group.createVariable(variable_name, float, ('float',))
-        variable[:] = eval(address_of_child)
-
-    # or a string
-    elif isinstance(eval(address_of_child), str):
-        write_string_to_netcdf(variable_name, address_of_child, group)
-
-    #or a bool
-    elif isinstance(eval(address_of_child), bool) or isinstance(eval(address_of_child), np.bool_):
-        # netcdf4 can't handle bool types like True/False so we convert all to int 1/0 and add an attribute named units with value 'bool'
-        variable = group.createVariable(variable_name, int, ('int',))
-        variable[:] = int(eval(address_of_child))
-        variable.units = "bool"
-        
-    # or an empty list
-    elif isinstance(eval(address_of_child), list) and len(eval(address_of_child))==0:
-        variable = group.createVariable(variable_name, int, ('int',))
-
-    # or a list of strings -- this needs work as it can only handle a list of 1 string
-    elif isinstance(eval(address_of_child),list) and isinstance(eval(address_of_child)[0],str):
-        for string in eval(address_of_child):
-            write_string_to_netcdf(variable_name, string, group, list=True)
-
-    # or a regular list
-    elif isinstance(eval(address_of_child), list):
-        print('made list w/ unlim dim')
-        variable = group.createVariable(variable_name, type(eval(address_of_child)[0]), ('Unlim',))
-        variable[:] = eval(address_of_child)
-
-    # anything else... (will likely need to add more cases; ie dict)
-    else:
-        try:
-            variable = group.createVariable(variable_name, type(eval(address_of_child)), ('Unlim',))
-            variable[:] = eval(address_of_child)
-            print('Used Unlim Dim')
-        except Exception as e:
-            print(f'There was error with {variable_name} in {address_of_child}')
-            print("The error message is:")
-            print(e)
-            print('Datatype given: ' + str(type(eval(address_of_child))))
-
-    print('Successfully transferred data from ' + address_of_child + ' to the NetCDF')
-    
-
-
-def write_string_to_netcdf(variable_name, address_of_child, group, list=False):
-    # netcdf and strings dont get along.. we have to do it 'custom':
-    # if we hand it an address we need to do it this way:
-    if list == True:
-        """
-        Save a list of strings to a NetCDF file.
-    
-        Convert a list of strings to a numpy.char_array with utf-8 encoded elements
-        and size rows x cols with each row the same # of cols and save to NetCDF
-        as char array.
-        """
-        try:
-            strings = address_of_child
-            # get dims of array to save
-            rows = len(strings)
-            cols = len(max(strings, key = len))
-    
-            # Define dimensions for the strings
-            rows_name = 'rows' + str(rows)
-            cols_name = 'cols' + str(cols)
-            try:
-                group.createDimension(rows_name, rows)
-            except: pass
-
-            try:
-                group.createDimension(cols_name, cols)
-            except: pass
-                
-            # Create a variable to store the strings
-            string_var = group.createVariable(str(variable_name), 'S1', (rows_name, cols_name))
-    
-            # break the list into a list of lists of words with the same length as the longest word:
-            # make words same sizes by adding spaces 
-            modded_strings = [word + ' ' * (len(max(strings, key=len)) - len(word)) for word in strings]
-            # encoded words into list of encoded lists
-            new_list = [[s.encode('utf-8') for s in word] for word in modded_strings]
-    
-            # make numpy char array with dims rows x cols
-            arr = np.chararray((rows, cols))
-    
-            # fill array with list of encoded lists
-            for i in range(len(new_list)):
-                arr[i] = new_list[i]
-    
-            # save array to netcdf file
-            string_var[:] = arr
-    
-            print(f'Saved {len(modded_strings)} strings to {variable_name}')
-    
-        except Exception as e:
-            print(f'Error: {e}')
-        
-    else:
-        try:
-            the_string_to_save = eval(address_of_child)
-            length_of_the_string = len(the_string_to_save)
-            numpy_datatype = 'S' + str(length_of_the_string)
-            str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))
-        #otherwise we need to treat it like a string:
-        except: 
-            the_string_to_save = address_of_child
-            length_of_the_string = len(the_string_to_save)
-            numpy_datatype = 'S' + str(length_of_the_string)
-            str_out = netCDF4.stringtochar(np.array([the_string_to_save], dtype=numpy_datatype))        
-    
-        # we'll need to make a new dimension for the string if it doesn't already exist
-        name_of_dimension = 'char' + str(length_of_the_string)
-        try: 
-            group.createDimension(name_of_dimension, length_of_the_string)
-        except: pass
-        # this is another band-aid to the results sub classes...
-        try:
-            # now we can make a variable in this dimension:
-            string = group.createVariable(variable_name, 'S1', (name_of_dimension))
-            #finally we can write the variable:
-            string[:] = str_out
-        except RuntimeError: pass
-        except Exception:
-            print(Exception)
-
-
-
-
-
-def write_numpy_array_to_netcdf(variable_name, address_of_child, group):
-    # to make a nested array in netCDF, we have to get the dimensions of the array,
-    # create corresponding dimensions in the netCDF file, then we can make a variable
-    # in the netCDF with dimensions identical to those in the original array
-    
-    # start by getting the data type at the lowest level in the array:
-    typeis = eval(address_of_child + '.dtype')
-
-    # catch boolean arrays here
-    if typeis == bool:
-        # sometimes an array has just 1 element in it, we account for those cases here:
-        if len(eval(address_of_child)) == 1:
-            variable = group.createVariable(variable_name, int, ('int',))
-            variable[:] = int(eval(address_of_child))
-            variable.units = "bool"
-        else:
-            # make the dimensions
-            dimensions = []
-            for dimension in np.shape(eval(address_of_child)):
-                dimensions.append(str('dim' + str(dimension)))
-                # if the dimension already exists we can't have a duplicate
-                try:
-                    group.createDimension(str('dim' + str(dimension)), dimension)
-                except: pass # this would mean that the dimension already exists
-    
-            # create the variable:
-            variable = group.createVariable(variable_name, int, tuple(dimensions))
-            # write the variable:
-            variable[:] = eval(address_of_child + '.astype(int)')
-            variable.units = "bool"
-
-    # handle all other datatypes here
-    else:
-        # sometimes an array has just 1 element in it, we account for those cases here:
-        if len(eval(address_of_child)) == 1:
-            if typeis is np.dtype('float64'):
-                variable = group.createVariable(variable_name, typeis, ('float',))
-                variable[:] = eval(address_of_child + '[0]')            
-            elif typeis is np.dtype('int64'):
-                variable = group.createVariable(variable_name, typeis, ('int',))
-                variable[:] = eval(address_of_child + '[0]')            
-            else:
-                print('Encountered single datatype that was not float64 or int64, saving under unlimited dimension, may cause errors.')
-                variable = group.createVariable(variable_name, typeis, ('Unlim',))
-                variable[:] = eval(address_of_child + '[0]')
-    
-        # This catches all arrays/lists:
-        else:
-            # make the dimensions
-            dimensions = []
-            for dimension in np.shape(eval(address_of_child)):
-                dimensions.append(str('dim' + str(dimension)))
-                # if the dimension already exists we can't have a duplicate
-                try:
-                    group.createDimension(str('dim' + str(dimension)), dimension)
-                except: pass # this would mean that the dimension already exists
-    
-            # create the variable:
-            variable = group.createVariable(variable_name, typeis, tuple(dimensions))
-    
-            # write the variable:
-            variable[:] = eval(address_of_child)
-
-
-
-
-
-
-
-
-
-
-
