Index: /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_EEOPTIONS.h
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_EEOPTIONS.h	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_EEOPTIONS.h	(revision 27556)
@@ -0,0 +1,161 @@
+CBOP
+C     !ROUTINE: CPP_EEOPTIONS.h
+C     !INTERFACE:
+C     include "CPP_EEOPTIONS.h"
+C
+C     !DESCRIPTION:
+C     *==========================================================*
+C     | CPP\_EEOPTIONS.h                                         |
+C     *==========================================================*
+C     | C preprocessor "execution environment" supporting        |
+C     | flags. Use this file to set flags controlling the        |
+C     | execution environment in which a model runs - as opposed |
+C     | to the dynamical problem the model solves.               |
+C     | Note: Many options are implemented with both compile time|
+C     |       and run-time switches. This allows options to be   |
+C     |       removed altogether, made optional at run-time or   |
+C     |       to be permanently enabled. This convention helps   |
+C     |       with the data-dependence analysis performed by the |
+C     |       adjoint model compiler. This data dependency       |
+C     |       analysis can be upset by runtime switches that it  |
+C     |       is unable to recoginise as being fixed for the     |
+C     |       duration of an integration.                        |
+C     |       A reasonable way to use these flags is to          |
+C     |       set all options as selectable at runtime but then  |
+C     |       once an experimental configuration has been        |
+C     |       identified, rebuild the code with the appropriate  |
+C     |       options set at compile time.                       |
+C     *==========================================================*
+CEOP
+
+#ifndef _CPP_EEOPTIONS_H_
+#define _CPP_EEOPTIONS_H_
+
+C     In general the following convention applies:
+C     ALLOW  - indicates an feature will be included but it may
+C     CAN      have a run-time flag to allow it to be switched
+C              on and off.
+C              If ALLOW or CAN directives are "undef'd" this generally
+C              means that the feature will not be available i.e. it
+C              will not be included in the compiled code and so no
+C              run-time option to use the feature will be available.
+C
+C     ALWAYS - indicates the choice will be fixed at compile time
+C              so no run-time option will be present
+
+C=== Macro related options ===
+C--   Control storage of floating point operands
+C     On many systems it improves performance only to use
+C     8-byte precision for time stepped variables.
+C     Constant in time terms ( geometric factors etc.. )
+C     can use 4-byte precision, reducing memory utilisation and
+C     boosting performance because of a smaller working set size.
+C     However, on vector CRAY systems this degrades performance.
+C     Enable to switch REAL4_IS_SLOW from genmake2 (with LET_RS_BE_REAL4):
+#ifdef LET_RS_BE_REAL4
+#undef REAL4_IS_SLOW
+#else /* LET_RS_BE_REAL4 */
+#define REAL4_IS_SLOW
+#endif /* LET_RS_BE_REAL4 */
+
+C--   Control use of "double" precision constants.
+C     Use D0 where it means REAL*8 but not where it means REAL*16
+#define D0 d0
+
+C=== IO related options ===
+C--   Flag used to indicate whether Fortran formatted write
+C     and read are threadsafe. On SGI the routines can be thread
+C     safe, on Sun it is not possible - if you are unsure then
+C     undef this option.
+#undef FMTFTN_IO_THREAD_SAFE
+
+C--   Flag used to indicate whether Binary write to Local file (i.e.,
+C     a different file for each tile) and read are thread-safe.
+#undef LOCBIN_IO_THREAD_SAFE
+
+C--   Flag to turn off the writing of error message to ioUnit zero
+#undef DISABLE_WRITE_TO_UNIT_ZERO
+
+C--   Alternative formulation of BYTESWAP, faster than
+C     compiler flag -byteswapio on the Altix.
+#undef FAST_BYTESWAP
+
+C--   Flag to turn on old default of opening scratch files with the
+C     STATUS='SCRATCH' option. This method, while perfectly FORTRAN-standard,
+C     caused filename conflicts on some multi-node/multi-processor platforms
+C     in the past and has been replace by something (hopefully) more robust.
+#undef USE_FORTRAN_SCRATCH_FILES
+
+C--   Flag defined for eeboot_minimal.F, eeset_parms.F and open_copy_data_file.F
+C     to write STDOUT, STDERR and scratch files from process 0 only.
+C WARNING: to use only when absolutely confident that the setup is working
+C     since any message (error/warning/print) from any proc <> 0 will be lost.
+#undef SINGLE_DISK_IO
+
+C=== MPI, EXCH and GLOBAL_SUM related options ===
+C--   Flag turns off MPI_SEND ready_to_receive polling in the
+C     gather_* subroutines to speed up integrations.
+#undef DISABLE_MPI_READY_TO_RECEIVE
+
+C--   Control MPI based parallel processing
+CXXX We no longer select the use of MPI via this file (CPP_EEOPTIONS.h)
+CXXX To use MPI, use an appropriate genmake2 options file or use
+CXXX genmake2 -mpi .
+CXXX #undef  ALLOW_USE_MPI
+
+C--   Control use of communication that might overlap computation.
+C     Under MPI selects/deselects "non-blocking" sends and receives.
+#define ALLOW_ASYNC_COMMUNICATION
+#undef  ALLOW_ASYNC_COMMUNICATION
+#undef  ALWAYS_USE_ASYNC_COMMUNICATION
+C--   Control use of communication that is atomic to computation.
+C     Under MPI selects/deselects "blocking" sends and receives.
+#define ALLOW_SYNC_COMMUNICATION
+#undef  ALWAYS_USE_SYNC_COMMUNICATION
+
+C--   Control XY periodicity in processor to grid mappings
+C     Note: Model code does not need to know whether a domain is
+C           periodic because it has overlap regions for every box.
+C           Model assume that these values have been
+C           filled in some way.
+#undef  ALWAYS_PREVENT_X_PERIODICITY
+#undef  ALWAYS_PREVENT_Y_PERIODICITY
+#define CAN_PREVENT_X_PERIODICITY
+#define CAN_PREVENT_Y_PERIODICITY
+
+C--   disconnect tiles (no exchange between tiles, just fill-in edges
+C     assuming locally periodic subdomain)
+#undef DISCONNECTED_TILES
+
+C--   Always cumulate tile local-sum in the same order by applying MPI allreduce
+C     to array of tiles ; can get slower with large number of tiles (big set-up)
+#define GLOBAL_SUM_ORDER_TILES
+
+C--   Alternative way of doing global sum without MPI allreduce call
+C     but instead, explicit MPI send & recv calls. Expected to be slower.
+#undef GLOBAL_SUM_SEND_RECV
+
+C--   Alternative way of doing global sum on a single CPU
+C     to eliminate tiling-dependent roundoff errors. Note: This is slow.
+#undef  CG2D_SINGLECPU_SUM
+
+C=== Other options (to add/remove pieces of code) ===
+C--   Flag to turn on checking for errors from all threads and procs
+C     (calling S/R STOP_IF_ERROR) before stopping.
+#define USE_ERROR_STOP
+
+C--   Control use of communication with other component:
+C     allow to import and export from/to Coupler interface.
+#undef COMPONENT_MODULE
+
+C--   Options used to couple MITgcm and ISSM
+C     Eventually this option can probably be merged with COMPONENT_MODULE
+#define ALLOW_CPL_ISSM
+
+C--   Activate some pieces of code for coupling to GEOS AGCM
+#undef HACK_FOR_GMAO_CPL
+
+#endif /* _CPP_EEOPTIONS_H_ */
+
+#include "CPP_EEMACROS.h"
+
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_OPTIONS.h
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_OPTIONS.h	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/CPP_OPTIONS.h	(revision 27556)
@@ -0,0 +1,141 @@
+#ifndef CPP_OPTIONS_H
+#define CPP_OPTIONS_H
+
+CBOP
+C !ROUTINE: CPP_OPTIONS.h
+C !INTERFACE:
+C #include "CPP_OPTIONS.h"
+
+C !DESCRIPTION:
+C *==================================================================*
+C | main CPP options file for the model:
+C | Control which optional features to compile in model/src code.
+C *==================================================================*
+CEOP
+
+C CPP flags controlling particular source code features
+
+C-- Forcing code options:
+
+C o Shortwave heating as extra term in external_forcing.F
+C Note: this should be a run-time option
+#define SHORTWAVE_HEATING
+
+C o Include/exclude Geothermal Heat Flux at the bottom of the ocean
+#undef ALLOW_GEOTHERMAL_FLUX
+
+C o Allow to account for heating due to friction (and momentum dissipation)
+#undef ALLOW_FRICTION_HEATING
+
+C o Allow mass source or sink of Fluid in the interior
+C   (3-D generalisation of oceanic real-fresh water flux)
+#undef ALLOW_ADDFLUID
+
+C o Include pressure loading code
+#define ATMOSPHERIC_LOADING
+
+C o Include/exclude balancing surface forcing fluxes code
+#undef ALLOW_BALANCE_FLUXES
+
+C o Include/exclude balancing surface forcing relaxation code
+#undef ALLOW_BALANCE_RELAX
+
+C o Include/exclude checking for negative salinity
+#undef CHECK_SALINITY_FOR_NEGATIVE_VALUES
+
+C-- Options to discard parts of the main code:
+
+C o Exclude/allow external forcing-fields load
+C   this allows to read & do simple linear time interpolation of oceanic
+C   forcing fields, if no specific pkg (e.g., EXF) is used to compute them.
+#undef EXCLUDE_FFIELDS_LOAD
+
+C o Include/exclude phi_hyd calculation code
+#define INCLUDE_PHIHYD_CALCULATION_CODE
+
+C-- Vertical mixing code options:
+
+C o Include/exclude call to S/R CONVECT
+#define INCLUDE_CONVECT_CALL
+
+C o Include/exclude call to S/R CALC_DIFFUSIVITY
+#define INCLUDE_CALC_DIFFUSIVITY_CALL
+
+C o Allow full 3D specification of vertical diffusivity
+#undef ALLOW_3D_DIFFKR
+
+C o Allow latitudinally varying BryanLewis79 vertical diffusivity
+#undef ALLOW_BL79_LAT_VARY
+
+C o Exclude/allow partial-cell effect (physical or enhanced) in vertical mixing
+C   this allows to account for partial-cell in vertical viscosity and diffusion,
+C   either from grid-spacing reduction effect or as artificially enhanced mixing
+C   near surface & bottom for too thin grid-cell
+#undef EXCLUDE_PCELL_MIX_CODE
+
+C-- Time-stepping code options:
+
+C o Include/exclude combined Surf.Pressure and Drag Implicit solver code
+#define ALLOW_SOLVE4_PS_AND_DRAG
+
+C o Include/exclude Implicit vertical advection code
+#define INCLUDE_IMPLVERTADV_CODE
+
+C o Include/exclude AdamsBashforth-3rd-Order code
+#undef ALLOW_ADAMSBASHFORTH_3
+
+C-- Model formulation options:
+
+C o Allow/exclude "Exact Convervation" of fluid in Free-Surface formulation
+C   that ensures that d/dt(eta) is exactly equal to - Div.Transport
+#define EXACT_CONSERV
+
+C o Allow the use of Non-Linear Free-Surface formulation
+C   this implies that grid-cell thickness (hFactors) varies with time
+#define NONLIN_FRSURF
+
+C o Include/exclude nonHydrostatic code
+#undef ALLOW_NONHYDROSTATIC
+
+C o Include/exclude GM-like eddy stress in momentum code
+#undef ALLOW_EDDYPSI
+
+C-- Algorithm options:
+
+C o Use Non Self-Adjoint (NSA) conjugate-gradient solver
+#undef ALLOW_CG2D_NSA
+
+C o Include/exclude code for single reduction Conjugate-Gradient solver
+#define ALLOW_SRCG
+
+C o Choices for implicit solver routines solve_*diagonal.F
+C   The following has low memory footprint, but not suitable for AD
+#define SOLVE_DIAGONAL_LOWMEMORY
+C   The following one suitable for AD but does not vectorize
+#undef SOLVE_DIAGONAL_KINNER
+
+C-- Retired code options:
+
+C o Use LONG.bin, LATG.bin, etc., initialization for ini_curviliear_grid.F
+C   Default is to use "new" grid files (OLD_GRID_IO undef) but OLD_GRID_IO
+C   is still useful with, e.g., single-domain curvilinear configurations.
+#undef OLD_GRID_IO
+#define ALLOW_SOLVE4_PS_AND_DRAG
+
+
+C-- Other option files:
+
+C o Execution environment support options
+#include "CPP_EEOPTIONS.h"
+
+C o Include/exclude single header file containing multiple packages options
+C   (AUTODIFF, COST, CTRL, ECCO, EXF ...) instead of the standard way where
+C   each of the above pkg get its own options from its specific option file.
+C   Although this method, inherited from ECCO setup, has been traditionally
+C   used for all adjoint built, work is in progress to allow to use the
+C   standard method also for adjoint built.
+c#ifdef PACKAGES_CONFIG_H
+c# include "ECCO_CPPOPTIONS.h"
+c#endif
+
+#endif /* CPP_OPTIONS_H */
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/DIAGNOSTICS_SIZE.h	(revision 27556)
@@ -0,0 +1,28 @@
+C     Diagnostics Array Dimension
+C     ---------------------------
+C     ndiagMax   :: maximum total number of available diagnostics
+C     numlists   :: maximum number of diagnostics list (in data.diagnostics)
+C     numperlist :: maximum number of active diagnostics per list (data.diagnostics)
+C     numLevels  :: maximum number of levels to write    (data.diagnostics)
+C     numDiags   :: maximum size of the storage array for active 2D/3D diagnostics
+C     nRegions   :: maximum number of regions (statistics-diagnostics)
+C     sizRegMsk  :: maximum size of the regional-mask (statistics-diagnostics)
+C     nStats     :: maximum number of statistics (e.g.: aver,min,max ...)
+C     diagSt_size:: maximum size of the storage array for statistics-diagnostics
+C Note : may need to increase "numDiags" when using several 2D/3D diagnostics,
+C  and "diagSt_size" (statistics-diags) since values here are deliberately small.
+      INTEGER    ndiagMax
+      INTEGER    numlists, numperlist, numLevels
+      INTEGER    numDiags
+      INTEGER    nRegions, sizRegMsk, nStats
+      INTEGER    diagSt_size
+      PARAMETER( ndiagMax = 500 )
+      PARAMETER( numlists = 10, numperlist = 50, numLevels=2*Nr )
+      PARAMETER( numDiags = 4*Nr )
+      PARAMETER( nRegions = 0 , sizRegMsk = 1 , nStats = 4 )
+      PARAMETER( diagSt_size = 10*Nr )
+
+
+CEH3 ;;; Local Variables: ***
+CEH3 ;;; mode:fortran ***
+CEH3 ;;; End: ***
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/SHELFICE_OPTIONS.h	(revision 27556)
@@ -0,0 +1,31 @@
+C     *==========================================================*
+C     | SHELFICE_OPTIONS.h
+C     | o CPP options file for SHELFICE package.
+C     *==========================================================*
+C     | Use this file for selecting options within the SHELFICE
+C     | package.
+C     *==========================================================*
+
+#ifndef SHELFICE_OPTIONS_H
+#define SHELFICE_OPTIONS_H
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+
+#ifdef ALLOW_SHELFICE
+C     Package-specific Options & Macros go here
+
+C     allow code for simple ISOMIP thermodynamics
+#define ALLOW_ISOMIP_TD
+
+C     allow friction velocity-dependent transfer coefficient
+C     following Holland and Jenkins, JPO, 1999
+#define SHI_ALLOW_GAMMAFRICT
+
+C     allow (vertical) remeshing whenever ocean top thickness factor
+C     exceeds thresholds
+#define ALLOW_SHELFICE_REMESHING
+C     and allow to print message to STDOUT when this happens
+#define SHELFICE_REMESH_PRINT
+
+#endif /* ALLOW_SHELFICE */
+#endif /* SHELFICE_OPTIONS_H */
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h	(revision 27556)
@@ -0,0 +1,64 @@
+CBOP
+C    !ROUTINE: SIZE.h
+C    !INTERFACE:
+C    include SIZE.h
+C    !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SIZE.h Declare size of underlying computational grid.
+C     *==========================================================*
+C     | The design here supports a three-dimensional model grid
+C     | with indices I,J and K. The three-dimensional domain
+C     | is comprised of nPx*nSx blocks (or tiles) of size sNx
+C     | along the first (left-most index) axis, nPy*nSy blocks
+C     | of size sNy along the second axis and one block of size
+C     | Nr along the vertical (third) axis.
+C     | Blocks/tiles have overlap regions of size OLx and OLy
+C     | along the dimensions that are subdivided.
+C     *==========================================================*
+C     \ev
+C
+C     Voodoo numbers controlling data layout:
+C     sNx :: Number of X points in tile.
+C     sNy :: Number of Y points in tile.
+C     OLx :: Tile overlap extent in X.
+C     OLy :: Tile overlap extent in Y.
+C     nSx :: Number of tiles per process in X.
+C     nSy :: Number of tiles per process in Y.
+C     nPx :: Number of processes to use in X.
+C     nPy :: Number of processes to use in Y.
+C     Nx  :: Number of points in X for the full domain.
+C     Ny  :: Number of points in Y for the full domain.
+C     Nr  :: Number of points in vertical direction.
+CEOP
+      INTEGER sNx
+      INTEGER sNy
+      INTEGER OLx
+      INTEGER OLy
+      INTEGER nSx
+      INTEGER nSy
+      INTEGER nPx
+      INTEGER nPy
+      INTEGER Nx
+      INTEGER Ny
+      INTEGER Nr
+      PARAMETER (
+     &           sNx =  10,
+     &           sNy =  10,
+     &           OLx =   3,
+     &           OLy =   3,
+     &           nSx =   1,
+     &           nSy =   1,
+     &           nPx = 2,
+     &           nPy = 4,
+     &           Nx  = sNx*nSx*nPx,
+     &           Ny  = sNy*nSy*nPy,
+     &           Nr  =  30)
+
+C     MAX_OLX :: Set to the maximum overlap region size of any array
+C     MAX_OLY    that will be exchanged. Controls the sizing of exch
+C                routine buffers.
+      INTEGER MAX_OLX
+      INTEGER MAX_OLY
+      PARAMETER ( MAX_OLX = OLx,
+     &            MAX_OLY = OLy )
+
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h.bak
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h.bak	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/SIZE.h.bak	(revision 27556)
@@ -0,0 +1,64 @@
+CBOP
+C    !ROUTINE: SIZE.h
+C    !INTERFACE:
+C    include SIZE.h
+C    !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SIZE.h Declare size of underlying computational grid.
+C     *==========================================================*
+C     | The design here supports a three-dimensional model grid
+C     | with indices I,J and K. The three-dimensional domain
+C     | is comprised of nPx*nSx blocks (or tiles) of size sNx
+C     | along the first (left-most index) axis, nPy*nSy blocks
+C     | of size sNy along the second axis and one block of size
+C     | Nr along the vertical (third) axis.
+C     | Blocks/tiles have overlap regions of size OLx and OLy
+C     | along the dimensions that are subdivided.
+C     *==========================================================*
+C     \ev
+C
+C     Voodoo numbers controlling data layout:
+C     sNx :: Number of X points in tile.
+C     sNy :: Number of Y points in tile.
+C     OLx :: Tile overlap extent in X.
+C     OLy :: Tile overlap extent in Y.
+C     nSx :: Number of tiles per process in X.
+C     nSy :: Number of tiles per process in Y.
+C     nPx :: Number of processes to use in X.
+C     nPy :: Number of processes to use in Y.
+C     Nx  :: Number of points in X for the full domain.
+C     Ny  :: Number of points in Y for the full domain.
+C     Nr  :: Number of points in vertical direction.
+CEOP
+      INTEGER sNx
+      INTEGER sNy
+      INTEGER OLx
+      INTEGER OLy
+      INTEGER nSx
+      INTEGER nSy
+      INTEGER nPx
+      INTEGER nPy
+      INTEGER Nx
+      INTEGER Ny
+      INTEGER Nr
+      PARAMETER (
+     &           sNx =  20,
+     &           sNy =  20,
+     &           OLx =   3,
+     &           OLy =   3,
+     &           nSx =   1,
+     &           nSy =   1,
+     &           nPx =   1,
+     &           nPy =   2,
+     &           Nx  = sNx*nSx*nPx,
+     &           Ny  = sNy*nSy*nPy,
+     &           Nr  =  30)
+
+C     MAX_OLX :: Set to the maximum overlap region size of any array
+C     MAX_OLY    that will be exchanged. Controls the sizing of exch
+C                routine buffers.
+      INTEGER MAX_OLX
+      INTEGER MAX_OLY
+      PARAMETER ( MAX_OLX = OLx,
+     &            MAX_OLY = OLy )
+
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/cpl_issm.F
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/cpl_issm.F	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/cpl_issm.F	(revision 27556)
@@ -0,0 +1,220 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+
+CBOP
+C     !ROUTINE: CPL_ISSM
+C     !INTERFACE:
+      SUBROUTINE CPL_ISSM( myTime, myIter, myThid )
+
+C     !DESCRIPTION: \bv
+C     *==================================================================
+C     | SUBROUTINE cpl_issm
+C     | o Couple MITgcm ocean model with ISSM ice sheet model
+C     *==================================================================
+C     \ev
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global variables ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "PARAMS.h"
+#include "DYNVARS.h"
+#include "GRID.h"
+#include "FFIELDS.h"
+#include "SHELFICE_OPTIONS.h"
+#include "SHELFICE.h"
+#ifdef ALLOW_EXF
+# include "EXF_OPTIONS.h"
+# include "EXF_FIELDS.h"
+#endif
+
+      LOGICAL  DIFFERENT_MULTIPLE
+      EXTERNAL DIFFERENT_MULTIPLE
+
+C     !LOCAL VARIABLES:
+C     mytime - time counter for this thread (seconds)
+C     myiter - iteration counter for this thread
+C     mythid - thread number for this instance of the routine.
+      _RL     mytime
+      INTEGER myiter, mythid 
+CEOP
+
+#ifdef ALLOW_CPL_ISSM
+#include "EESUPPORT.h"
+      COMMON /CPL_MPI_ID/ mpiMyWid, toissmcomm
+      INTEGER mpiMyWid, toissmcomm, mpiRC
+      INTEGER mpistatus(MPI_STATUS_SIZE)
+      INTEGER i, j, bi, bj, buffsize
+      COMMON /CPL_ISSM_TIME/ CouplingTime
+      _R8 CouplingTime, IceModelTime
+      _R8 xfer_array(Nx,Ny)
+      _R8 local(1:sNx,1:sNy,nSx,nSy)
+      CHARACTER*(MAX_LEN_MBUF) suff
+
+C Initialization steps I1, I2, and I3:
+      IF( myTime .EQ. startTime ) THEN
+
+C   I1. ISSM sends CouplingTime, the interval at which we couple
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Recv(CouplingTime,1,MPI_DOUBLE,0,10001000,
+     &           toissmcomm,mpistatus,mpiRC)
+            _END_MASTER( myThid )
+         ENDIF
+         _BEGIN_MASTER( myThid )
+         CALL MPI_BCAST(CouplingTime,1,MPI_DOUBLE,0,
+     &        MPI_COMM_MODEL,mpiRC)
+         _END_MASTER( myThid )
+C        print*, 'Ocean received CouplingTime: ', CouplingTime
+
+C   I2. MITgcm sends grid size (NX and NY)
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Send(Nx,1,MPI_INT,0,10001003,
+     &           toissmcomm,mpistatus)
+            call MPI_Send(Ny,1,MPI_INT,0,10001004,
+     &           toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+
+C   I3. MITgcm sends grid coordinates of center of cells
+C       (longitude -180 <= XC < 180 and latitude YC)
+C     Send longitude East of center of cell
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj) = xC(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001005,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+C     Send latitude North of center of cell
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj) = yC(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001006,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+
+      ENDIF
+C End initialization steps I1, I2, and I3.
+
+C Recurring steps C1 and C2:
+      IF( MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+
+C   C1. ISSM sends ice model time IceTimeTag
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Recv(IceModelTime,1,MPI_DOUBLE,0,10001001,
+     &           toissmcomm,mpistatus,mpiRC)
+C           print*, 'Ocean received IceModelTime: ', IceModelTime
+            _END_MASTER( myThid )
+         ENDIF
+
+C   C2. MITgcm sends ocean model time OceanTimeTag
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            call MPI_Send(myTime,1,MPI_DOUBLE,0,10001002,
+     &           toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+
+      ENDIF
+C End recurring steps C1 and C2.
+
+C Recurring step C3 except during Initialization:
+C  C3. MITgcm sends
+C      (N-1)*CouplingTime <= OceanModelTime < N*CouplingTime
+C      time-mean melt rate to ISSM
+      IF( myTime .NE. startTime .AND.
+     &     MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+         DO bj=1,nSy
+            DO bi=1,nSx
+               DO j=1,sNy
+                  DO i=1,sNx
+                     local(i,j,bi,bj)=shelficeFreshWaterFlux(i,j,bi,bj)
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+         CALL BAR2( myThid ) 
+         CALL GATHER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )
+            buffsize = Nx*Ny
+            CALL MPI_SEND(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001007,toissmcomm,mpistatus)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid )
+C        print*,'Done Sending shelficeFreshWaterFlux array.'
+         
+      ENDIF
+C End recurring step C3.
+
+C Recurring step C4 except during Termination:
+C  C4. ISSM sends IceModelTime=(N-1)*CouplingTime base to MITgcm
+      IF( myTime .NE. endtime .AND.
+     &     MOD(myTime,CouplingTime) .LT. deltaT/2. ) THEN
+         WRITE(suff,'(I10.10)') myIter
+         CALL WRITE_FLD_XY_RS( 'R_shelfIce1_',suff,R_shelfIce,-1,myThid)
+         IF( myProcId .EQ. 0 ) THEN
+            _BEGIN_MASTER( myThid )         
+            call MPI_Recv(xfer_array,buffsize,MPI_DOUBLE_PRECISION,
+     &           0,10001008,toissmcomm,mpistatus,mpiRC)
+            _END_MASTER( myThid )
+         ENDIF
+         CALL BAR2( myThid ) 
+         CALL SCATTER_2D_R8( xfer_array, local, Nx, Ny,
+     &        .FALSE., .FALSE., myThid )
+         DO bj = myByLo(myThid), myByHi(myThid)
+            DO bi = myBxLo(myThid), myBxHi(myThid)
+               DO j=1,sNy
+                  DO i=1,sNx
+                     IF( local(i,j,bi,bj).LT.9998 ) THEN
+                        R_shelfIce(i,j,bi,bj) = local(i,j,bi,bj)
+                     ELSE
+                        R_shelfIce(i,j,bi,bj) = 0. _d 0
+                     ENDIF
+                  ENDDO
+               ENDDO
+            ENDDO
+         ENDDO
+C- fill in the overlap (+ BARRIER):
+         _EXCH_XY_RS( R_shelfIce, myThid )
+         CALL WRITE_FLD_XY_RS( 'R_shelfIce2_',suff,R_shelfIce,-1,myThid)
+      ENDIF
+C End recurring step C4.
+
+#endif /* ALLOW_CPL_ISSM */
+
+      RETURN
+      END
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/do_oceanic_phys.F
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/do_oceanic_phys.F	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/do_oceanic_phys.F	(revision 27556)
@@ -0,0 +1,1111 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_OPTIONS.h"
+#ifdef ALLOW_MOM_COMMON
+# include "MOM_COMMON_OPTIONS.h"
+#endif
+#ifdef ALLOW_AUTODIFF
+# include "AUTODIFF_OPTIONS.h"
+#endif
+#ifdef ALLOW_CTRL
+# include "CTRL_OPTIONS.h"
+#endif
+#ifdef ALLOW_SALT_PLUME
+# include "SALT_PLUME_OPTIONS.h"
+#endif
+#ifdef ALLOW_ECCO
+# include "ECCO_OPTIONS.h"
+#endif
+
+#ifdef ALLOW_AUTODIFF
+# ifdef ALLOW_GGL90
+#  include "GGL90_OPTIONS.h"
+# endif
+# ifdef ALLOW_GMREDI
+#  include "GMREDI_OPTIONS.h"
+# endif
+# ifdef ALLOW_KPP
+#  include "KPP_OPTIONS.h"
+# endif
+# ifdef ALLOW_SEAICE
+#  include "SEAICE_OPTIONS.h"
+# endif
+# ifdef ALLOW_EXF
+#  include "EXF_OPTIONS.h"
+# endif
+#endif /* ALLOW_AUTODIFF */
+
+CBOP
+C     !ROUTINE: DO_OCEANIC_PHYS
+C     !INTERFACE:
+      SUBROUTINE DO_OCEANIC_PHYS(myTime, myIter, myThid)
+C     !DESCRIPTION: \bv
+C     *==========================================================*
+C     | SUBROUTINE DO_OCEANIC_PHYS
+C     | o Controlling routine for oceanic physics and
+C     |   parameterization
+C     *==========================================================*
+C     | o originally, part of S/R thermodynamics
+C     *==========================================================*
+C     \ev
+
+C     !CALLING SEQUENCE:
+C     DO_OCEANIC_PHYS
+C       |
+C       |-- OBCS_CALC
+C       |
+C       |-- OCN_APPLY_IMPORT
+C       |
+C       |-- FRAZIL_CALC_RHS
+C       |
+C       |-- THSICE_MAIN
+C       |
+C       |-- SEAICE_FAKE
+C       |-- SEAICE_MODEL
+C       |-- SEAICE_COST_SENSI
+C       |
+C       |-- OCN_EXPORT_DATA
+C       |
+C       |-- SHELFICE_THERMODYNAMICS
+C       |
+C       |-- ICEFRONT_THERMODYNAMICS
+C       |
+C       |-- SALT_PLUME_DO_EXCH
+C       |
+C       |-- FREEZE_SURFACE
+C       |
+C       |-- EXTERNAL_FORCING_SURF
+C       |
+C       |-- OBCS_ADJUST
+C       |
+C       |- k loop (Nr:1):
+C       | - DWNSLP_CALC_RHO
+C       | - BBL_CALC_RHO
+C       | - FIND_RHO_2D @ p(k)
+C       | - FIND_RHO_2D @ p(k-1)
+C       | - GRAD_SIGMA
+C       | - CALC_IVDC
+C       | - DIAGS_RHO_L
+C       |- end k loop.
+C       |
+C       |-- CALC_OCE_MXLAYER
+C       |
+C       |-- SALT_PLUME_CALC_DEPTH
+C       |-- SALT_PLUME_VOLFRAC
+C       |-- SALT_PLUME_APPLY
+C       |-- SALT_PLUME_APPLY
+C       |-- SALT_PLUME_FORCING_SURF
+C       |
+C       |-- KPP_CALC
+C       |-- KPP_CALC_DUMMY
+C       |
+C       |-- PP81_CALC
+C       |
+C       |-- KL10_CALC
+C       |
+C       |-- MY82_CALC
+C       |
+C       |-- GGL90_CALC
+C       |
+C       |-- TIMEAVE_SURF_FLUX
+C       |
+C       |-- GMREDI_CALC_TENSOR
+C       |-- GMREDI_CALC_TENSOR_DUMMY
+C       |
+C       |-- DWNSLP_CALC_FLOW
+C       |-- DWNSLP_CALC_FLOW
+C       |
+C       |-- OFFLINE_GET_DIFFUS
+C       |
+C       |-- BBL_CALC_RHS
+C       |
+C       |-- MYPACKAGE_CALC_RHS
+C       |
+C       |-- GMREDI_DO_EXCH
+C       |
+C       |-- KPP_DO_EXCH
+C       |
+C       |-- GGL90_EXCHANGES
+C       |
+C       |-- DIAGS_RHO_G
+C       |-- DIAGS_OCEANIC_SURF_FLUX
+C       |-- SALT_PLUME_DIAGNOSTICS_FILL
+C       |
+C       |-- ECCO_PHYS
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global variables ===
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "PARAMS.h"
+#include "GRID.h"
+#include "DYNVARS.h"
+#ifdef ALLOW_TIMEAVE
+# include "TIMEAVE_STATV.h"
+#endif
+#ifdef ALLOW_OFFLINE
+# include "OFFLINE_SWITCH.h"
+#endif
+
+#ifdef ALLOW_AUTODIFF
+# include "AUTODIFF_MYFIELDS.h"
+# ifdef ALLOW_AUTODIFF_TAMC
+#  include "tamc.h"
+# endif
+# include "FFIELDS.h"
+# include "SURFACE.h"
+# include "EOS.h"
+# ifdef ALLOW_GMREDI
+#  include "GMREDI.h"
+# endif
+# ifdef ALLOW_KPP
+#  include "KPP.h"
+# endif
+# ifdef ALLOW_GGL90
+#  include "GGL90.h"
+# endif
+# ifdef ALLOW_EBM
+#  include "EBM.h"
+# endif
+# ifdef ALLOW_EXF
+#  include "ctrl.h"
+#  include "EXF_FIELDS.h"
+#  ifdef ALLOW_BULKFORMULAE
+#   include "EXF_CONSTANTS.h"
+#  endif
+# endif
+# ifdef ALLOW_SEAICE
+#  include "SEAICE_SIZE.h"
+#  include "SEAICE.h"
+#  include "SEAICE_PARAMS.h"
+# endif
+# ifdef ALLOW_THSICE
+#  include "THSICE_VARS.h"
+# endif
+# ifdef ALLOW_SALT_PLUME
+#  include "SALT_PLUME.h"
+# endif
+# ifdef ALLOW_ECCO
+#  ifdef ALLOW_SIGMAR_COST_CONTRIBUTION
+#   include "ecco_cost.h"
+#  endif
+# endif
+#endif /* ALLOW_AUTODIFF */
+
+C     !INPUT/OUTPUT PARAMETERS:
+C     == Routine arguments ==
+C     myTime :: Current time in simulation
+C     myIter :: Current iteration number in simulation
+C     myThid :: Thread number for this instance of the routine.
+      _RL myTime
+      INTEGER myIter
+      INTEGER myThid
+
+C     !LOCAL VARIABLES:
+C     == Local variables
+C     rhoKp1,rhoKm1 :: Density at current level, and @ level minus one
+C     iMin, iMax    :: Ranges and sub-block indices on which calculations
+C     jMin, jMax       are applied.
+C     bi, bj        :: tile indices
+C     msgBuf        :: Temp. for building output string
+C     i,j,k         :: loop indices
+C     kSrf          :: surface index
+      _RL rhoKp1  (1-OLx:sNx+OLx,1-OLy:sNy+OLy)
+      _RL rhoKm1  (1-OLx:sNx+OLx,1-OLy:sNy+OLy)
+      _RL sigmaX  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      _RL sigmaY  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      _RL sigmaR  (1-OLx:sNx+OLx,1-OLy:sNy+OLy,Nr)
+      INTEGER iMin, iMax
+      INTEGER jMin, jMax
+      INTEGER bi, bj
+      INTEGER i, j, k, kSrf
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+      INTEGER doDiagsRho
+      LOGICAL calcGMRedi, calcKPP, calcConvect
+#ifdef ALLOW_DIAGNOSTICS
+      LOGICAL  DIAGNOSTICS_IS_ON
+      EXTERNAL DIAGNOSTICS_IS_ON
+#endif /* ALLOW_DIAGNOSTICS */
+#ifdef ALLOW_AUTODIFF
+      _RL thetaRef
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_AUTODIFF_TAMC
+      INTEGER act1, act2, act3, act4
+      INTEGER max1, max2, max3
+      INTEGER kkey, itdkey
+#endif
+CEOP
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   dummy statement to end declaration part
+      itdkey = 1
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+      kSrf = 1
+      IF ( usingPCoords ) kSrf = Nr
+
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_ENTER('DO_OCEANIC_PHYS',myThid)
+#endif
+
+      doDiagsRho = 0
+#ifdef ALLOW_DIAGNOSTICS
+      IF ( useDiagnostics .AND. fluidIsWater ) THEN
+        IF ( DIAGNOSTICS_IS_ON('MXLDEPTH',myThid) )
+     &       doDiagsRho = doDiagsRho + 1
+        IF ( DIAGNOSTICS_IS_ON('DRHODR  ',myThid) )
+     &       doDiagsRho = doDiagsRho + 2
+        IF ( DIAGNOSTICS_IS_ON('WdRHO_P ',myThid) )
+     &       doDiagsRho = doDiagsRho + 4
+        IF ( DIAGNOSTICS_IS_ON('WdRHOdP ',myThid) )
+     &       doDiagsRho = doDiagsRho + 8
+      ENDIF
+#endif /* ALLOW_DIAGNOSTICS */
+
+      calcGMRedi  = useGMRedi
+      calcKPP     = useKPP
+      calcConvect = ivdc_kappa.NE.0.
+#ifdef ALLOW_OFFLINE
+      IF ( useOffLine ) THEN
+        calcGMRedi = useGMRedi .AND. .NOT.offlineLoadGMRedi
+        calcKPP    = useKPP    .AND. .NOT.offlineLoadKPP
+        calcConvect=calcConvect.AND. .NOT.offlineLoadConvec
+      ENDIF
+#endif /* ALLOW_OFFLINE */
+
+#ifdef  ALLOW_OBCS
+      IF (useOBCS) THEN
+C--   Calculate future values on open boundaries
+C--   moved before SEAICE_MODEL call since SEAICE_MODEL needs seaice-obcs fields
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+# ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('OBCS_CALC',myThid)
+# endif
+       CALL OBCS_CALC( myTime+deltaTClock, myIter+1,
+     I                 uVel, vVel, wVel, theta, salt, myThid )
+      ENDIF
+#endif  /* ALLOW_OBCS */
+
+#ifdef ALLOW_OCN_COMPON_INTERF
+C--    Apply imported data (from coupled interface) to forcing fields
+C jmc: moved here before any freezing/seaice pkg adjustment of surf-fluxes
+      IF ( useCoupler ) THEN
+         CALL OCN_APPLY_IMPORT( .TRUE., myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_OCN_COMPON_INTERF */
+
+#ifdef ALLOW_AUTODIFF
+      DO bj=myByLo(myThid),myByHi(myThid)
+       DO bi=myBxLo(myThid),myBxHi(myThid)
+        DO j=1-OLy,sNy+OLy
+         DO i=1-OLx,sNx+OLx
+          adjustColdSST_diag(i,j,bi,bj) = 0. _d 0
+# ifdef ALLOW_SALT_PLUME
+          saltPlumeDepth(i,j,bi,bj) = 0. _d 0
+          saltPlumeFlux(i,j,bi,bj)  = 0. _d 0
+# endif
+         ENDDO
+        ENDDO
+       ENDDO
+      ENDDO
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_FRAZIL
+      IF ( useFRAZIL ) THEN
+C--   Freeze water in the ocean interior and let it rise to the surface
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+       CALL FRAZIL_CALC_RHS( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_FRAZIL */
+
+#if (defined ALLOW_THSICE) && !(defined ALLOW_ATM2D)
+      IF ( useThSIce .AND. fluidIsWater ) THEN
+# ifdef ALLOW_AUTODIFF_TAMC
+#  ifdef ALLOW_SEAICE
+CADJ STORE uice,vice         = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+CADJ STORE iceMask,iceHeight = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE snowHeight, Tsrf  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE Qice1, Qice2      = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE sHeating,snowAge  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE hocemxl, icflxsw  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt,theta        = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uvel,vvel         = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qnet,qsw, empmr   = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE atemp,aqh,precip  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE swdown,lwdown     = comlev1, key=ikey_dynamics, kind=isbyte
+#  ifdef NONLIN_FRSURF
+CADJ STORE hFac_surfC        = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+# endif /* ALLOW_AUTODIFF_TAMC */
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('THSICE_MAIN',myThid)
+# endif
+C--     Step forward Therm.Sea-Ice variables
+C       and modify forcing terms including effects from ice
+        CALL TIMER_START('THSICE_MAIN     [DO_OCEANIC_PHYS]', myThid)
+        CALL THSICE_MAIN( myTime, myIter, myThid )
+        CALL TIMER_STOP( 'THSICE_MAIN     [DO_OCEANIC_PHYS]', myThid)
+      ENDIF
+#endif /* ALLOW_THSICE */
+
+#ifdef ALLOW_SEAICE
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE qnet  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qsw   = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE theta = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE salt  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE fu,fv = comlev1, key=ikey_dynamics, kind=isbyte
+#if (defined ALLOW_EXF) && (defined ALLOW_ATM_TEMP)
+CADJ STORE evap  = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+# endif /* ALLOW_AUTODIFF_TAMC */
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE phiHydLow= comlev1, key=ikey_dynamics, byte=isbyte
+#endif
+      IF ( useSEAICE ) THEN
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE uvel,vvel         = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uice,vice         = comlev1, key=ikey_dynamics, kind=isbyte
+#  ifdef ALLOW_EXF
+CADJ STORE atemp,aqh,precip  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE swdown,lwdown     = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uwind,vwind       = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef SEAICE_VARIABLE_SALINITY
+CADJ STORE hsalt             = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef ATMOSPHERIC_LOADING
+CADJ STORE pload, siceload   = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef NONLIN_FRSURF
+CADJ STORE recip_hfacc       = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif
+#  ifdef ANNUAL_BALANCE
+CADJ STORE balance_itcount   = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif /* ANNUAL_BALANCE */
+#  ifdef ALLOW_THSICE
+C-- store thSIce vars before advection (called from SEAICE_MODEL) updates them:
+CADJ STORE iceMask,iceHeight = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE snowHeight,hOceMxL= comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE Qice1, Qice2      = comlev1, key=ikey_dynamics, kind=isbyte
+#  endif /* ALLOW_THSICE */
+# endif /* ALLOW_AUTODIFF_TAMC */
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('SEAICE_MODEL',myThid)
+# endif
+        CALL TIMER_START('SEAICE_MODEL    [DO_OCEANIC_PHYS]', myThid)
+        CALL SEAICE_MODEL( myTime, myIter, myThid )
+        CALL TIMER_STOP ('SEAICE_MODEL    [DO_OCEANIC_PHYS]', myThid)
+# ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE tices = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE heff  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE hsnow = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE area  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uIce  = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE vIce  = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+# ifdef ALLOW_COST
+        CALL SEAICE_COST_SENSI ( myTime, myIter, myThid )
+# endif
+# ifdef ALLOW_AUTODIFF
+      ELSEIF ( SEAICEadjMODE .EQ. -1 ) THEN
+CADJ STORE area = comlev1, key=ikey_dynamics, kind=isbyte
+        CALL SEAICE_FAKE( myTime, myIter, myThid )
+# endif /* ALLOW_AUTODIFF */
+      ENDIF
+#endif /* ALLOW_SEAICE */
+
+#if (defined ALLOW_OCN_COMPON_INTERF) && (defined ALLOW_THSICE)
+C--   After seaice-dyn and advection of pkg/thsice fields,
+C     Export ocean coupling fields to coupled interface (only with pkg/thsice)
+      IF ( useCoupler ) THEN
+# ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('OCN_EXPORT_DATA',myThid)
+# endif
+         CALL TIMER_START('OCN_EXPORT_DATA [DO_OCEANIC_PHYS]', myThid)
+         CALL OCN_EXPORT_DATA( myTime, myIter, myThid )
+         CALL TIMER_STOP ('OCN_EXPORT_DATA [DO_OCEANIC_PHYS]', myThid)
+      ENDIF
+#endif /* ALLOW_OCN_COMPON_INTERF & ALLOW_THSICE */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE sst, sss          = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE qsw               = comlev1, key=ikey_dynamics, kind=isbyte
+# ifdef ALLOW_SEAICE
+CADJ STORE area              = comlev1, key=ikey_dynamics, kind=isbyte
+# endif
+#endif
+
+#ifdef ALLOW_CPL_ISSM
+      IF ( useCoupler) CALL CPL_ISSM( myTime, myIter, myThid )
+#endif
+
+#ifdef ALLOW_SHELFICE
+      IF ( useShelfIce .AND. fluidIsWater ) THEN
+#ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('SHELFICE_THERMODYNAMICS',myThid)
+#endif
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE salt, theta       = comlev1, key=ikey_dynamics, kind=isbyte
+CADJ STORE uvel, vvel        = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+C     compute temperature and (virtual) salt flux at the
+C     shelf-ice ocean interface
+       CALL TIMER_START('SHELFICE_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &       myThid)
+       CALL SHELFICE_THERMODYNAMICS( myTime, myIter, myThid )
+       CALL TIMER_STOP( 'SHELFICE_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &      myThid)
+      ENDIF
+#endif /* ALLOW_SHELFICE */
+
+#ifdef ALLOW_ICEFRONT
+      IF ( useICEFRONT .AND. fluidIsWater ) THEN
+#ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('ICEFRONT_THERMODYNAMICS',myThid)
+#endif
+C     compute temperature and (virtual) salt flux at the
+C     ice-front ocean interface
+       CALL TIMER_START('ICEFRONT_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &       myThid)
+       CALL ICEFRONT_THERMODYNAMICS( myTime, myIter, myThid )
+       CALL TIMER_STOP( 'ICEFRONT_THERMODYNAMICS [DO_OCEANIC_PHYS]',
+     &      myThid)
+      ENDIF
+#endif /* ALLOW_ICEFRONT */
+
+#ifdef ALLOW_SALT_PLUME
+      IF ( useSALT_PLUME ) THEN
+Catn: exchanging saltPlumeFlux:
+        CALL SALT_PLUME_DO_EXCH( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_SALT_PLUME */
+
+C--   Freeze water at the surface
+      IF ( allowFreezing ) THEN
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta             = comlev1, key=ikey_dynamics, kind=isbyte
+#endif
+        CALL FREEZE_SURFACE( myTime, myIter, myThid )
+      ENDIF
+
+      iMin = 1-OLx
+      iMax = sNx+OLx
+      jMin = 1-OLy
+      jMax = sNy+OLy
+
+C---  Determines forcing terms based on external fields
+C     relaxation terms, etc.
+#ifdef ALLOW_AUTODIFF
+CADJ STORE salt, theta       = comlev1, key=ikey_dynamics, kind=isbyte
+#else  /* ALLOW_AUTODIFF */
+C--   if fluid is not water, by-pass surfaceForcing, find_rho, gmredi
+C     and all vertical mixing schemes, but keep OBCS_CALC
+      IF ( fluidIsWater ) THEN
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_CALL('EXTERNAL_FORCING_SURF',myThid)
+#endif
+        CALL EXTERNAL_FORCING_SURF(
+     I             iMin, iMax, jMin, jMax,
+     I             myTime, myIter, myThid )
+
+#ifdef  ALLOW_OBCS
+      IF (useOBCS) THEN
+C--   After all surface fluxes are known apply balancing fluxes and
+C--   apply tidal forcing to open boundaries
+# ifdef ALLOW_DEBUG
+       IF (debugMode) CALL DEBUG_CALL('OBCS_ADJUST',myThid)
+# endif
+       CALL OBCS_ADJUST(
+     I      myTime+deltaTClock, myIter+1, myThid )
+      ENDIF
+#endif  /* ALLOW_OBCS */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   HPF directive to help TAMC
+CHPF$ INDEPENDENT
+#endif /* ALLOW_AUTODIFF_TAMC */
+      DO bj=myByLo(myThid),myByHi(myThid)
+#ifdef ALLOW_AUTODIFF_TAMC
+C--   HPF directive to help TAMC
+CHPF$ INDEPENDENT
+#endif /* ALLOW_AUTODIFF_TAMC */
+       DO bi=myBxLo(myThid),myBxHi(myThid)
+
+#ifdef ALLOW_AUTODIFF_TAMC
+          act1 = bi - myBxLo(myThid)
+          max1 = myBxHi(myThid) - myBxLo(myThid) + 1
+          act2 = bj - myByLo(myThid)
+          max2 = myByHi(myThid) - myByLo(myThid) + 1
+          act3 = myThid - 1
+          max3 = nTx*nTy
+          act4 = ikey_dynamics - 1
+          itdkey = (act1 + 1) + act2*max1
+     &                      + act3*max1*max2
+     &                      + act4*max1*max2*max3
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--   Set up work arrays with valid (i.e. not NaN) values
+C     These inital values do not alter the numerical results. They
+C     just ensure that all memory references are to valid floating
+C     point numbers. This prevents spurious hardware signals due to
+C     uninitialised but inert locations.
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+C This is currently used by GMRedi, IVDC, MXL-depth  and Diagnostics
+           sigmaX(i,j,k) = 0. _d 0
+           sigmaY(i,j,k) = 0. _d 0
+           sigmaR(i,j,k) = 0. _d 0
+#if (defined (ALLOW_SIGMAR_COST_CONTRIBUTION) || defined (ALLOW_LEITH_QG))
+           sigmaRfield(i,j,k,bi,bj) = 0. _d 0
+#endif
+          ENDDO
+         ENDDO
+        ENDDO
+
+        DO j=1-OLy,sNy+OLy
+         DO i=1-OLx,sNx+OLx
+          rhoKm1 (i,j)   = 0. _d 0
+          rhoKp1 (i,j)   = 0. _d 0
+         ENDDO
+        ENDDO
+#ifdef ALLOW_AUTODIFF
+cph all the following init. are necessary for TAF
+cph although some of these are re-initialised later.
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           rhoInSitu(i,j,k,bi,bj) = 0.
+# ifdef ALLOW_GGL90
+           GGL90viscArU(i,j,k,bi,bj)  = 0. _d 0
+           GGL90viscArV(i,j,k,bi,bj)  = 0. _d 0
+           GGL90diffKr(i,j,k,bi,bj)  = 0. _d 0
+# endif /* ALLOW_GGL90 */
+# ifdef ALLOW_SALT_PLUME
+#  ifdef SALT_PLUME_VOLUME
+           SPforcingS(i,j,k,bi,bj) = 0. _d 0
+           SPforcingT(i,j,k,bi,bj) = 0. _d 0
+#  endif
+# endif /* ALLOW_SALT_PLUME */
+          ENDDO
+         ENDDO
+        ENDDO
+#ifdef ALLOW_OFFLINE
+       IF ( calcConvect ) THEN
+#endif
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           IVDConvCount(i,j,k,bi,bj) = 0.
+          ENDDO
+         ENDDO
+        ENDDO
+#ifdef ALLOW_OFFLINE
+       ENDIF
+       IF ( calcGMRedi ) THEN
+#endif
+# ifdef ALLOW_GMREDI
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           Kwx(i,j,k,bi,bj)  = 0. _d 0
+           Kwy(i,j,k,bi,bj)  = 0. _d 0
+           Kwz(i,j,k,bi,bj)  = 0. _d 0
+           Kux(i,j,k,bi,bj)  = 0. _d 0
+           Kvy(i,j,k,bi,bj)  = 0. _d 0
+#  ifdef GM_EXTRA_DIAGONAL
+           Kuz(i,j,k,bi,bj)  = 0. _d 0
+           Kvz(i,j,k,bi,bj)  = 0. _d 0
+#  endif
+#  ifdef GM_BOLUS_ADVEC
+           GM_PsiX(i,j,k,bi,bj)  = 0. _d 0
+           GM_PsiY(i,j,k,bi,bj)  = 0. _d 0
+#  endif
+#  ifdef GM_VISBECK_VARIABLE_K
+           VisbeckK(i,j,bi,bj)   = 0. _d 0
+#  endif
+          ENDDO
+         ENDDO
+        ENDDO
+# endif /* ALLOW_GMREDI */
+#ifdef ALLOW_OFFLINE
+       ENDIF
+       IF ( calcKPP ) THEN
+#endif
+# ifdef ALLOW_KPP
+        DO k=1,Nr
+         DO j=1-OLy,sNy+OLy
+          DO i=1-OLx,sNx+OLx
+           KPPdiffKzS(i,j,k,bi,bj)  = 0. _d 0
+           KPPdiffKzT(i,j,k,bi,bj)  = 0. _d 0
+          ENDDO
+         ENDDO
+        ENDDO
+# endif /* ALLOW_KPP */
+#ifdef ALLOW_OFFLINE
+       ENDIF
+#endif
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta(:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE salt (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE totphihyd(:,:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+# ifdef ALLOW_KPP
+CADJ STORE uvel (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE vvel (:,:,:,bi,bj)  = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+# ifdef ALLOW_SALT_PLUME
+CADJ STORE saltplumedepth(:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE saltplumeflux(:,:,bi,bj)
+CADJ &                         = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--   Always compute density (stored in common block) here; even when it is not
+C     needed here, will be used anyway in calc_phi_hyd (data flow easier this way)
+#ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('FIND_RHO_2D (xNr)',myThid)
+#endif
+#ifdef ALLOW_AUTODIFF
+        IF ( fluidIsWater ) THEN
+#endif /* ALLOW_AUTODIFF */
+#ifdef ALLOW_DOWN_SLOPE
+         IF ( useDOWN_SLOPE ) THEN
+           DO k=1,Nr
+            CALL DWNSLP_CALC_RHO(
+     I                  theta, salt,
+     O                  rhoInSitu(1-OLx,1-OLy,k,bi,bj),
+     I                  k, bi, bj, myTime, myIter, myThid )
+           ENDDO
+         ENDIF
+#endif /* ALLOW_DOWN_SLOPE */
+#ifdef ALLOW_BBL
+         IF ( useBBL ) THEN
+C     pkg/bbl requires in-situ bbl density for depths equal to and deeper than the bbl.
+C     To reduce computation and storage requirement, these densities are stored in the
+C     dry grid boxes of rhoInSitu.  See BBL_CALC_RHO for details.
+           DO k=Nr,1,-1
+            CALL BBL_CALC_RHO(
+     I                  theta, salt,
+     O                  rhoInSitu,
+     I                  k, bi, bj, myTime, myIter, myThid )
+
+           ENDDO
+         ENDIF
+#endif /* ALLOW_BBL */
+         IF ( .NOT. ( useDOWN_SLOPE .OR. useBBL ) ) THEN
+           DO k=1,Nr
+            CALL FIND_RHO_2D(
+     I                iMin, iMax, jMin, jMax, k,
+     I                theta(1-OLx,1-OLy,k,bi,bj),
+     I                salt (1-OLx,1-OLy,k,bi,bj),
+     O                rhoInSitu(1-OLx,1-OLy,k,bi,bj),
+     I                k, bi, bj, myThid )
+           ENDDO
+         ENDIF
+#ifdef ALLOW_AUTODIFF
+        ELSE
+C-        fluid is not water:
+          DO k=1,Nr
+           IF ( select_rStar.GE.1 .OR. selectSigmaCoord.GE.1 ) THEN
+C-    isothermal (theta=const) reference state
+             thetaRef = thetaConst
+           ELSE
+C-    horizontally uniform (tRef) reference state
+             thetaRef = tRef(k)
+           ENDIF
+           DO j=1-OLy,sNy+OLy
+            DO i=1-OLx,sNx+OLx
+             rhoInSitu(i,j,k,bi,bj) =
+     &         ( theta(i,j,k,bi,bj)
+     &              *( salt(i,j,k,bi,bj)*atm_Rq + oneRL )
+     &         - thetaRef )*maskC(i,j,k,bi,bj)
+            ENDDO
+           ENDDO
+          ENDDO
+        ENDIF
+#endif /* ALLOW_AUTODIFF */
+
+#ifdef ALLOW_DEBUG
+        IF (debugMode) THEN
+          WRITE(msgBuf,'(A,2(I4,A))')
+     &         'ENTERING UPWARD K LOOP (bi=', bi, ', bj=', bj,')'
+          CALL DEBUG_MSG(msgBuf(1:43),myThid)
+        ENDIF
+#endif
+
+C--     Start of diagnostic loop
+        DO k=Nr,1,-1
+
+#ifdef ALLOW_AUTODIFF_TAMC
+C? Patrick, is this formula correct now that we change the loop range?
+C? Do we still need this?
+cph kkey formula corrected.
+cph Needed for rhoK, rhoKm1, in the case useGMREDI.
+          kkey = (itdkey-1)*Nr + k
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+c#ifdef ALLOW_AUTODIFF_TAMC
+cCADJ STORE theta(:,:,k,bi,bj) = comlev1_bibj_k, key=kkey,
+cCADJ &     kind = isbyte
+cCADJ STORE salt(:,:,k,bi,bj)  = comlev1_bibj_k, key=kkey,
+cCADJ &     kind = isbyte
+c#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--       Calculate gradients of potential density for isoneutral
+C         slope terms (e.g. GM/Redi tensor or IVDC diffusivity)
+          IF ( calcGMRedi .OR. (k.GT.1 .AND. calcConvect)
+     &         .OR. usePP81 .OR. useKL10
+     &         .OR. useMY82 .OR. useGGL90
+     &         .OR. useSALT_PLUME .OR. doDiagsRho.GE.1 ) THEN
+            IF (k.GT.1) THEN
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE theta(:,:,k-1,bi,bj) = comlev1_bibj_k, key=kkey,kind=isbyte
+CADJ STORE salt (:,:,k-1,bi,bj) = comlev1_bibj_k, key=kkey,kind=isbyte
+CADJ STORE rhokm1 (bi,bj)       = comlev1_bibj_k, key=kkey,kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+             IF ( usingZCoords ) THEN
+              DO j=jMin,jMax
+               DO i=iMin,iMax
+                rhoKp1(i,j) = rhoInSitu(i,j,k,bi,bj)
+               ENDDO
+              ENDDO
+              CALL FIND_RHO_2D(
+     I                 iMin, iMax, jMin, jMax, k,
+     I                 theta(1-OLx,1-OLy,k-1,bi,bj),
+     I                 salt (1-OLx,1-OLy,k-1,bi,bj),
+     O                 rhoKm1,
+     I                 k-1, bi, bj, myThid )
+             ELSE
+              CALL FIND_RHO_2D(
+     I                 iMin, iMax, jMin, jMax, k-1,
+     I                 theta(1-OLx,1-OLy,k,bi,bj),
+     I                 salt (1-OLx,1-OLy,k,bi,bj),
+     O                 rhoKp1,
+     I                 k, bi, bj, myThid )
+              DO j=jMin,jMax
+               DO i=iMin,iMax
+                rhoKm1(i,j) = rhoInSitu(i,j,k-1,bi,bj)
+               ENDDO
+              ENDDO
+             ENDIF
+            ENDIF
+#ifdef ALLOW_DEBUG
+            IF (debugMode) CALL DEBUG_CALL('GRAD_SIGMA',myThid)
+#endif
+            CALL GRAD_SIGMA(
+     I             bi, bj, iMin, iMax, jMin, jMax, k,
+     I             rhoInSitu(1-OLx,1-OLy,k,bi,bj), rhoKm1, rhoKp1,
+     O             sigmaX, sigmaY, sigmaR,
+     I             myThid )
+
+#if (defined (ALLOW_SIGMAR_COST_CONTRIBUTION) || defined (ALLOW_LEITH_QG))
+            DO j=jMin,jMax
+             DO i=iMin,iMax
+              sigmaRfield(i,j,k,bi,bj)=sigmaR(i,j,k)
+             ENDDO
+            ENDDO
+#endif /* ALLOW_SIGMAR_COST_CONTRIBUTION or ALLOW_LEITH_QG */
+
+#ifdef ALLOW_AUTODIFF
+#ifdef GMREDI_WITH_STABLE_ADJOINT
+cgf zero out adjoint fields to stabilize pkg/gmredi adjoint
+cgf -> cuts adjoint dependency from slope to state
+            CALL ZERO_ADJ_LOC( Nr, sigmaX, myThid)
+            CALL ZERO_ADJ_LOC( Nr, sigmaY, myThid)
+            CALL ZERO_ADJ_LOC( Nr, sigmaR, myThid)
+#endif
+#endif /* ALLOW_AUTODIFF */
+          ENDIF
+
+C--       Implicit Vertical Diffusion for Convection
+          IF (k.GT.1 .AND. calcConvect) THEN
+#ifdef ALLOW_DEBUG
+            IF (debugMode) CALL DEBUG_CALL('CALC_IVDC',myThid)
+#endif
+            CALL CALC_IVDC(
+     I        bi, bj, iMin, iMax, jMin, jMax, k,
+     I        sigmaR,
+     I        myTime, myIter, myThid)
+          ENDIF
+
+#ifdef ALLOW_DIAGNOSTICS
+          IF ( doDiagsRho.GE.4 ) THEN
+            CALL DIAGS_RHO_L( doDiagsRho, k, bi, bj,
+     I                        rhoInSitu(1-OLx,1-OLy,1,bi,bj),
+     I                        rhoKm1, wVel,
+     I                        myTime, myIter, myThid )
+          ENDIF
+#endif
+
+C--     end of diagnostic k loop (Nr:1)
+        ENDDO
+
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE IVDConvCount(:,:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif
+
+C--     Diagnose Mixed Layer Depth:
+        IF ( calcGMRedi .OR. MOD(doDiagsRho,2).EQ.1 ) THEN
+          CALL CALC_OCE_MXLAYER(
+     I              rhoInSitu(1-OLx,1-OLy,kSrf,bi,bj), sigmaR,
+     I              bi, bj, myTime, myIter, myThid )
+        ENDIF
+
+#ifdef ALLOW_SALT_PLUME
+        IF ( useSALT_PLUME ) THEN
+          CALL SALT_PLUME_CALC_DEPTH(
+     I              rhoInSitu(1-OLx,1-OLy,kSrf,bi,bj), sigmaR,
+     I              bi, bj, myTime, myIter, myThid )
+#ifdef SALT_PLUME_VOLUME
+          CALL SALT_PLUME_VOLFRAC(
+     I              bi, bj, myTime, myIter, myThid )
+C-- get forcings for kpp
+          CALL SALT_PLUME_APPLY(
+     I              1, bi, bj, recip_hFacC(1-OLx,1-OLy,kSrf,bi,bj),
+     I              theta, 0,
+     I              myTime, myIter, myThid )
+          CALL SALT_PLUME_APPLY(
+     I              2, bi, bj, recip_hFacC(1-OLx,1-OLy,kSrf,bi,bj),
+     I              salt, 0,
+     I              myTime, myIter, myThid )
+C-- need to call this S/R from here to apply just before kpp
+          CALL SALT_PLUME_FORCING_SURF(
+     I              bi, bj, iMin, iMax, jMin, jMax,
+     I              myTime, myIter, myThid )
+#endif /* SALT_PLUME_VOLUME */
+        ENDIF
+#endif /* ALLOW_SALT_PLUME */
+
+#ifdef ALLOW_DIAGNOSTICS
+        IF ( MOD(doDiagsRho,4).GE.2 ) THEN
+          CALL DIAGNOSTICS_FILL (sigmaR, 'DRHODR  ', 0, Nr,
+     &         2, bi, bj, myThid)
+        ENDIF
+#endif /* ALLOW_DIAGNOSTICS */
+
+C--    This is where EXTERNAL_FORCING_SURF(bi,bj) used to be called;
+C      now called earlier, before bi,bj loop.
+
+#ifdef ALLOW_AUTODIFF_TAMC
+cph needed for KPP
+CADJ STORE surfaceForcingU(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingV(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingS(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE surfaceForcingT(:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+#ifdef  ALLOW_KPP
+C--     Compute KPP mixing coefficients
+        IF ( calcKPP ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('KPP_CALC',myThid)
+#endif
+          CALL TIMER_START('KPP_CALC [DO_OCEANIC_PHYS]', myThid)
+          CALL KPP_CALC(
+     I                  bi, bj, myTime, myIter, myThid )
+          CALL TIMER_STOP ('KPP_CALC [DO_OCEANIC_PHYS]', myThid)
+#if (defined ALLOW_AUTODIFF) && !(defined ALLOW_OFFLINE)
+        ELSE
+          CALL KPP_CALC_DUMMY(
+     I                  bi, bj, myTime, myIter, myThid )
+#endif /* ALLOW_AUTODIFF and not ALLOW_OFFLINE */
+        ENDIF
+#endif  /* ALLOW_KPP */
+
+#ifdef  ALLOW_PP81
+C--     Compute PP81 mixing coefficients
+        IF (usePP81) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('PP81_CALC',myThid)
+#endif
+          CALL PP81_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_PP81 */
+
+#ifdef  ALLOW_KL10
+C--     Compute KL10 mixing coefficients
+        IF (useKL10) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('KL10_CALC',myThid)
+#endif
+          CALL KL10_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_KL10 */
+
+#ifdef  ALLOW_MY82
+C--     Compute MY82 mixing coefficients
+        IF (useMY82) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('MY82_CALC',myThid)
+#endif
+          CALL MY82_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+        ENDIF
+#endif /* ALLOW_MY82 */
+
+#ifdef  ALLOW_GGL90
+#ifdef ALLOW_AUTODIFF_TAMC
+CADJ STORE GGL90TKE(:,:,:,bi,bj)
+CADJ &     = comlev1_bibj, key=itdkey, kind=isbyte
+#endif /* ALLOW_AUTODIFF_TAMC */
+C--     Compute GGL90 mixing coefficients
+        IF ( useGGL90 .AND. Nr.GT.1 ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('GGL90_CALC',myThid)
+#endif
+          CALL TIMER_START('GGL90_CALC [DO_OCEANIC_PHYS]', myThid)
+          CALL GGL90_CALC(
+     I                     bi, bj, sigmaR, myTime, myIter, myThid )
+          CALL TIMER_STOP ('GGL90_CALC [DO_OCEANIC_PHYS]', myThid)
+        ENDIF
+#endif /* ALLOW_GGL90 */
+
+#ifdef ALLOW_TIMEAVE
+        IF ( taveFreq.GT. 0. _d 0 ) THEN
+          CALL TIMEAVE_SURF_FLUX( bi, bj, myTime, myIter, myThid)
+        ENDIF
+        IF ( taveFreq.GT.0. .AND. calcConvect ) THEN
+          CALL TIMEAVE_CUMULATE(ConvectCountTave, IVDConvCount,
+     I                           Nr, deltaTClock, bi, bj, myThid)
+        ENDIF
+#endif /* ALLOW_TIMEAVE */
+
+#ifdef ALLOW_GMREDI
+#ifdef ALLOW_AUTODIFF_TAMC
+# ifndef GM_EXCLUDE_CLIPPING
+cph storing here is needed only for one GMREDI_OPTIONS:
+cph define GM_BOLUS_ADVEC
+cph keep it although TAF says you dont need to.
+cph but I have avoided the #ifdef for now, in case more things change
+CADJ STORE sigmaX(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE sigmaY(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+CADJ STORE sigmaR(:,:,:)       = comlev1_bibj, key=itdkey, kind=isbyte
+# endif
+#endif /* ALLOW_AUTODIFF_TAMC */
+
+C--     Calculate iso-neutral slopes for the GM/Redi parameterisation
+        IF ( calcGMRedi ) THEN
+#ifdef ALLOW_DEBUG
+          IF (debugMode) CALL DEBUG_CALL('GMREDI_CALC_TENSOR',myThid)
+#endif
+          CALL GMREDI_CALC_TENSOR(
+     I             iMin, iMax, jMin, jMax,
+     I             sigmaX, sigmaY, sigmaR,
+     I             bi, bj, myTime, myIter, myThid )
+#if (defined ALLOW_AUTODIFF) && !(defined ALLOW_OFFLINE)
+        ELSE
+          CALL GMREDI_CALC_TENSOR_DUMMY(
+     I             iMin, iMax, jMin, jMax,
+     I             sigmaX, sigmaY, sigmaR,
+     I             bi, bj, myTime, myIter, myThid )
+#endif /* ALLOW_AUTODIFF and not ALLOW_OFFLINE */
+        ENDIF
+#endif /* ALLOW_GMREDI */
+
+#ifdef ALLOW_DOWN_SLOPE
+        IF ( useDOWN_SLOPE ) THEN
+C--     Calculate Downsloping Flow for Down_Slope parameterization
+         IF ( usingPCoords ) THEN
+          CALL DWNSLP_CALC_FLOW(
+     I                bi, bj, kSurfC, rhoInSitu,
+     I                myTime, myIter, myThid )
+         ELSE
+          CALL DWNSLP_CALC_FLOW(
+     I                bi, bj, kLowC, rhoInSitu,
+     I                myTime, myIter, myThid )
+         ENDIF
+        ENDIF
+#endif /* ALLOW_DOWN_SLOPE */
+
+C--   end bi,bj loops.
+       ENDDO
+      ENDDO
+
+#ifndef ALLOW_AUTODIFF
+C---  if fluid Is Water: end
+      ENDIF
+#endif
+
+#ifdef ALLOW_OFFLINE
+      IF ( useOffLine ) THEN
+#ifdef ALLOW_DEBUG
+        IF (debugMode) CALL DEBUG_CALL('OFFLINE_GET_DIFFUS',myThid)
+#endif /* ALLOW_DEBUG */
+        CALL OFFLINE_GET_DIFFUS( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_OFFLINE */
+
+#ifdef ALLOW_BBL
+      IF ( useBBL ) THEN
+       CALL BBL_CALC_RHS(
+     I                          myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_BBL */
+
+#ifdef ALLOW_MYPACKAGE
+      IF ( useMYPACKAGE ) THEN
+       CALL MYPACKAGE_CALC_RHS(
+     I                          myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_MYPACKAGE */
+
+#ifdef ALLOW_GMREDI
+      IF ( calcGMRedi ) THEN
+        CALL GMREDI_DO_EXCH( myTime, myIter, myThid )
+      ENDIF
+#endif /* ALLOW_GMREDI */
+
+#ifdef ALLOW_KPP
+      IF ( calcKPP ) THEN
+        CALL KPP_DO_EXCH( myThid )
+      ENDIF
+#endif /* ALLOW_KPP */
+
+#ifdef ALLOW_GGL90
+      IF ( useGGL90 )
+     &  CALL GGL90_EXCHANGES( myThid )
+#endif /* ALLOW_GGL90 */
+
+#ifdef ALLOW_DIAGNOSTICS
+      IF ( fluidIsWater .AND. useDiagnostics ) THEN
+        CALL DIAGS_RHO_G(
+     I                    rhoInSitu, uVel, vVel, wVel,
+     I                    myTime, myIter, myThid )
+      ENDIF
+      IF ( useDiagnostics ) THEN
+        CALL DIAGS_OCEANIC_SURF_FLUX( myTime, myIter, myThid )
+      ENDIF
+      IF ( calcConvect .AND. useDiagnostics ) THEN
+        CALL DIAGNOSTICS_FILL( IVDConvCount, 'CONVADJ ',
+     &                               0, Nr, 0, 1, 1, myThid )
+      ENDIF
+#ifdef ALLOW_SALT_PLUME
+      IF ( useDiagnostics )
+     &      CALL SALT_PLUME_DIAGNOSTICS_FILL(bi,bj,myThid)
+#endif
+#endif
+
+#ifdef ALLOW_DEBUG
+      IF (debugMode) CALL DEBUG_LEAVE('DO_OCEANIC_PHYS',myThid)
+#endif
+
+      RETURN
+      END
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/eeboot_minimal.F
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/eeboot_minimal.F	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/eeboot_minimal.F	(revision 27556)
@@ -0,0 +1,328 @@
+#include "PACKAGES_CONFIG.h"
+#include "CPP_EEOPTIONS.h"
+
+CBOP
+C     !ROUTINE: EEBOOT_MINIMAL
+
+C     !INTERFACE:
+      SUBROUTINE EEBOOT_MINIMAL( myComm )
+
+C     !DESCRIPTION:
+C     *==========================================================*
+C     | SUBROUTINE EEBOOT\_MINIMAL
+C     | o Set an initial environment that is predictable i.e.
+C     | behaves in a similar way on all machines and stable.
+C     *==========================================================*
+C     | Under MPI this routine calls MPI\_INIT to setup the
+C     | mpi environment ( on some systems the code is running as
+C     | a single process prior to MPI\_INIT, on others the mpirun
+C     | script has already created multiple processes). Until
+C     | MPI\_Init is called it is unclear what state the
+C     | application is in. Once this routine has been run it is
+C     | "safe" to do things like I/O to report erros and to get
+C     | run parameters.
+C     | Note: This routine can also be compiled with CPP
+C     | directives set so that no multi-processing is initialise.
+C     | This is OK and will work fine.
+C     *==========================================================*
+
+C     !USES:
+      IMPLICIT NONE
+C     == Global data ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "EESUPPORT.h"
+
+C     !ROUTINE ARGUMENTS
+C     == Routine arguments ==
+C     myComm     :: Communicator that is passed down from
+C                   upper level driver (if there is one).
+      INTEGER myComm
+
+C     !FUNCTIONS:
+c     INTEGER  IFNBLNK
+c     EXTERNAL IFNBLNK
+      INTEGER  ILNBLNK
+      EXTERNAL ILNBLNK
+
+C     !LOCAL VARIABLES:
+C     == Local variables ==
+C     myThid     :: Temp. dummy thread number.
+C     fNam       :: Used to build file name for standard and error output.
+C     msgBuf     :: Used to build messages for printing.
+      INTEGER myThid
+#ifdef USE_PDAF
+      CHARACTER*18 fNam
+#else
+      CHARACTER*13 fNam
+#endif /* USE_PDAF */
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+#ifdef ALLOW_USE_MPI
+C     mpiRC      :: Error code reporting variable used with MPI.
+      INTEGER mpiRC
+      INTEGER mpiIsInitialized
+      LOGICAL doReport
+#if defined(ALLOW_OASIS) || defined(COMPONENT_MODULE)
+      INTEGER mpiMyWId
+#elif defined(ALLOW_NEST2W_COMMON)
+      INTEGER mpiMyWId
+#endif
+#ifdef ALLOW_CPL_ISSM
+      COMMON /CPL_MPI_ID/ mpiMyWid, toissmcomm
+      INTEGER             mpiMyWid, toissmcomm
+      INTEGER my_local_rank,my_local_size, numprocsworld
+      INTEGER status(MPI_STATUS_SIZE)
+#endif /* ALLOW_CPL_ISSM */
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+      INTEGER mpiMyWId, color
+#endif
+#ifdef USE_PDAF
+      INTEGER mpi_task_id
+      CHARACTER*(14) fmtStr
+#else
+      CHARACTER*(6) fmtStr
+#endif /* USE_PDAF */
+      INTEGER iTmp
+#endif /* ALLOW_USE_MPI */
+CEOP
+
+C--   Default values set to single processor case
+      numberOfProcs = 1
+      myProcId      = 0
+      pidIO         = myProcId
+      myProcessStr  = '------'
+C     Set a dummy value for myThid because we are not multi-threading yet.
+      myThid        = 1
+
+C     Annoyingly there is no universal way to have the usingMPI
+C     parameter work as one might expect. This is because, on some
+C     systems I/O does not work until MPI_Init has been called.
+C     The solution for now is that the parameter below may need to
+C     be changed manually!
+#ifdef ALLOW_USE_MPI
+      usingMPI = .TRUE.
+#else
+      usingMPI = .FALSE.
+#endif
+
+      IF ( .NOT.usingMPI ) THEN
+
+        WRITE(myProcessStr,'(I4.4)') myProcId
+        WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
+        OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
+c       WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
+c       OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
+
+#ifdef ALLOW_USE_MPI
+      ELSE
+C--   MPI style multiple-process initialisation
+C--   =========================================
+
+       CALL MPI_Initialized( mpiIsInitialized, mpiRC )
+
+       IF ( mpiIsInitialized .EQ. 0 ) THEN
+C--     Initialise MPI multi-process parallel environment.
+C       On some systems program forks at this point. Others have already
+C       forked within mpirun - now thats an open standard!
+        CALL MPI_INIT( mpiRC )
+        IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+         eeBootError = .TRUE.
+         WRITE(msgBuf,'(A,I5)')
+     &        'EEBOOT_MINIMAL: MPI_INIT return code', mpiRC
+         CALL PRINT_ERROR( msgBuf, myThid )
+         GOTO 999
+        ENDIF
+
+C--     MPI has now been initialized ; now we need to either
+C       ask for a communicator or pretend that we have:
+C       Pretend that we have asked for a communicator
+        MPI_COMM_MODEL = MPI_COMM_WORLD
+
+       ELSE
+C--     MPI was already initialized and communicator has been passed
+C       down from upper level driver
+        MPI_COMM_MODEL = myComm
+
+       ENDIF
+
+       doReport = .FALSE.
+#ifdef USE_PDAF
+C     initialize PDAF
+C     for more output increase second parameter from 1 to 2
+       CALL INIT_PARALLEL_PDAF(0, 1, MPI_COMM_MODEL, MPI_COMM_MODEL,
+     &      mpi_task_id)
+#endif /* USE_PDAF */
+
+#ifdef ALLOW_OASIS
+C      add a 1rst preliminary call EESET_PARAMS to set useOASIS
+C      (needed to decide either to call OASIS_INIT or not)
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+       IF ( useOASIS ) CALL OASIS_INIT(MPI_COMM_MODEL)
+#endif /* ALLOW_OASIS */
+
+#ifdef COMPONENT_MODULE
+C--    Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C- jmc: test:
+C      add a 1rst preliminary call EESET_PARAMS to set useCoupler
+C      (needed to decide either to call CPL_INIT or not)
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+C- jmc: test end ; otherwise, uncomment next line:
+c      useCoupler = .TRUE.
+
+C--    Ask coupler interface for a communicator
+       IF ( useCoupler) CALL CPL_INIT
+#endif /* COMPONENT_MODULE */
+
+C--    Case with Nest(ing)
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+C--    Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C--    Setup Nesting Execution Environment
+       CALL NEST_EEINIT( mpiMyWId, color )
+#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
+
+#ifdef ALLOW_NEST2W_COMMON
+C--    Case with 2-Ways Nest(ing)
+C-     Set the running directory
+       CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
+       CALL SETDIR( mpiMyWId )
+
+C-     Setup Nesting Execution Environment
+       CALL NEST2W_EEINIT( mpiMyWId )
+       IF ( eeBootError ) GOTO 999
+#endif /* ALLOW_NEST2W_COMMON */
+
+#ifdef ALLOW_CPL_ISSM
+C     add a 1rst preliminary call EESET_PARAMS to set useCoupler
+       CALL MPI_COMM_RANK(MPI_COMM_WORLD, mpiMyWid, mpiRC)
+       CALL EESET_PARMS ( mpiMyWId, doReport )
+
+       IF ( useCoupler ) THEN
+          CALL MPI_COMM_SIZE(MPI_COMM_WORLD, numprocsworld, mpiRC)
+
+c     Split world into sub-communicators for each and every model:*/
+          CALL MPI_COMM_SPLIT(MPI_COMM_WORLD,1,MPIMYWID,
+     &         MPI_COMM_MODEL,mpiRC)
+
+          print*,'Oc My global rank',mpiMyWid
+          print*,'Oc My world size:',numprocsworld
+
+          CALL MPI_INTERCOMM_CREATE(MPI_COMM_MODEL,0,MPI_COMM_WORLD,
+     &         0,0,toissmcomm,mpiRC)
+
+          CALL MPI_COMM_RANK(MPI_COMM_MODEL, my_local_rank, mpiRC)
+          CALL MPI_COMM_SIZE(MPI_COMM_MODEL, my_local_size, mpiRC)
+
+          print*,'Oc My global rank',mpiMyWid,'MyLocal rank: ',
+     &         my_local_rank
+          print*,'Oc My world size:',numprocsworld,'My local size: ',
+     &         my_local_size
+       ENDIF
+#endif /* ALLOW_CPL_ISSM */
+
+C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----|
+
+C--    Get my process number
+       CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I5)')
+     &        'EEBOOT_MINIMAL: MPI_COMM_RANK return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+       myProcId = mpiMyId
+       iTmp = MAX(4,1 + INT(LOG10(DFLOAT(nPx*nPy))))
+#ifdef USE_PDAF
+       WRITE(fmtStr,'(4(A,I1),A)')
+     &      '(I',iTmp,'.',iTmp,',A1,I',iTmp,'.',iTmp,')'
+       WRITE(myProcessStr,fmtStr) mpi_task_id,'.',myProcId
+#else
+       WRITE(fmtStr,'(2(A,I1),A)') '(I',iTmp,'.',iTmp,')'
+       WRITE(myProcessStr,fmtStr) myProcId
+#endif /* USE_PDAF */
+       iTmp = ILNBLNK( myProcessStr )
+       mpiPidIo = myProcId
+       pidIO    = mpiPidIo
+       IF ( mpiPidIo .EQ. myProcId ) THEN
+#ifdef SINGLE_DISK_IO
+        IF( myProcId .EQ. 0 ) THEN
+#endif
+         WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:iTmp)
+         OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
+         WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:iTmp)
+         OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
+#ifdef SINGLE_DISK_IO
+        ELSE
+         OPEN(errorMessageUnit,FILE='/dev/null',STATUS='unknown')
+         standardMessageUnit=errorMessageUnit
+        ENDIF
+        IF( myProcId .EQ. 0 ) THEN
+          WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
+     &     'defined SINGLE_DISK_IO will result in losing'
+          CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
+     &                        SQUEEZE_RIGHT, myThid )
+          WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
+     &     'any message (error/warning) from any proc <> 0'
+          CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
+     &                        SQUEEZE_RIGHT, myThid )
+        ENDIF
+#endif
+       ENDIF
+
+#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
+       WRITE(standardMessageUnit,'(2(A,I6))')
+     &           ' mpiMyWId =', mpiMyWId, ' , color =',color
+#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
+
+C--    Synchronise all processes
+C      Strictly this is superfluous, but by using it we can guarantee to
+C      find out about processes that did not start up.
+       CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I6)')
+     &        'EEBOOT_MINIMAL: MPI_BARRIER return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+
+C--    Get number of MPI processes
+       CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(A,I6)')
+     &        'EEBOOT_MINIMAL: MPI_COMM_SIZE return code', mpiRC
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+       numberOfProcs = mpiNProcs
+
+#endif /* ALLOW_USE_MPI */
+      ENDIF
+
+C--    Under MPI only allow same number of processes as proc grid size.
+C      Strictly we are allowed more procs but knowing there
+C      is an exact match makes things easier.
+       IF ( numberOfProcs .NE. nPx*nPy ) THEN
+        eeBootError = .TRUE.
+        WRITE(msgBuf,'(2(A,I6))')
+     &  'EEBOOT_MINIMAL: No. of procs=', numberOfProcs,
+     &  ' not equal to nPx*nPy=', nPx*nPy
+        CALL PRINT_ERROR( msgBuf, myThid )
+        GOTO 999
+       ENDIF
+
+#ifdef USE_LIBHPM
+       CALL F_HPMINIT(myProcId, "mitgcmuv")
+#endif
+
+ 999  CONTINUE
+      RETURN
+      END
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/eedie.F
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/eedie.F	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/eedie.F	(revision 27556)
@@ -0,0 +1,107 @@
+#include "CPP_EEOPTIONS.h"
+#ifdef USE_LIBHPM
+# include "f_hpm.h"
+#endif
+
+CBOP
+      SUBROUTINE EEDIE
+C     *==========================================================*
+C     | SUBROUTINE EEDIE                                         |
+C     | o Close execution "environment", particularly perform    |
+C     |   steps to terminate parallel processing.                |
+C     *==========================================================*
+C     | Note: This routine can also be compiled with CPP         |
+C     | directives set so that no multi-processing is initialised|
+C     | This is OK and should work fine.                         |
+C     *==========================================================*
+      IMPLICIT NONE
+
+C     == Global variables ==
+#include "SIZE.h"
+#include "EEPARAMS.h"
+#include "EESUPPORT.h"
+CEOP
+
+C     == Local variables ==
+C     msgBuf       :: I/O Buffer
+C     nThreadsDone :: Used to count number of completed threads.
+C     I            :: Loop counter.
+      CHARACTER*(MAX_LEN_MBUF) msgBuf
+      INTEGER nThreadsDone
+      INTEGER I
+#ifdef ALLOW_USE_MPI
+C     mpiRC        :: Error code reporting variable used with MPI.
+      INTEGER mpiRC
+#endif /* ALLOW_USE_MPI */
+
+      IF ( eeBootError ) THEN
+C--   Skip ended threads counting if earlier error was found
+        WRITE(msgBuf,'(2A)')
+     &   'EEDIE: earlier error in multi-proc/thread setting'
+        CALL PRINT_ERROR( msgBuf, 1 )
+        fatalError = .TRUE.
+
+      ELSE
+C--   Check that all the threads have ended
+C     No thread should reach this loop before all threads have set
+C     threadIsComplete to TRUE. If they do then either there is a bug
+C     in the code or the behaviour of the parallel compiler directives
+C     are not right for this code. In the latter case different
+C     directives may be available or the compiler itself may have a
+C     bug or you may need a different parallel compiler for main.F
+        nThreadsDone = 0
+        DO I = 1, nThreads
+         IF ( threadIsComplete(I) ) nThreadsDone = nThreadsDone+1
+        ENDDO
+        IF ( nThreadsDone .LT. nThreads ) THEN
+         WRITE(msgBuf,'(A,I5,A)')
+     &    'S/R EEDIE: Only',nThreadsDone,' threads have completed,'
+         CALL PRINT_ERROR( msgBuf, 1 )
+         WRITE(msgBuf,'(A,I5,A)')
+     &    'S/R EEDIE:',nThreads,' are expected for this config !'
+         CALL PRINT_ERROR( msgBuf, 1 )
+         eeEndError = .TRUE.
+         fatalError = .TRUE.
+        ENDIF
+
+C--   end if/else eebootError
+      ENDIF
+
+#ifdef USE_LIBHPM
+      CALL F_HPMTERMINATE(myProcId)
+#endif
+
+C--   Flush IO-unit before MPI termination
+      CALL MDS_FLUSH( errorMessageUnit, 1 )
+c#ifdef ALLOW_USE_MPI
+      CALL MDS_FLUSH( standardMessageUnit, 1 )
+c#endif /* ALLOW_USE_MPI */
+
+#ifdef ALLOW_USE_MPI
+C- Note: since MPI_INIT is always called, better to also always terminate MPI
+C        (even if usingMPI=F) --> comment out test on usingMPI
+c     IF ( usingMPI ) THEN
+
+C--   MPI style multiple-process termination
+C--   ======================================
+#if (defined COMPONENT_MODULE) || (defined ALLOW_CPL_ISSM)
+       IF ( useCoupler) CALL MPI_BARRIER( MPI_COMM_WORLD, mpiRC )
+#endif
+#ifdef ALLOW_OASIS
+       IF ( useOASIS ) CALL OASIS_FINALIZE
+#endif
+       CALL MPI_FINALIZE  ( mpiRC )
+       IF ( mpiRC .NE. MPI_SUCCESS ) THEN
+        eeEndError = .TRUE.
+        fatalError = .TRUE.
+        WRITE(msgBuf,'(A,I5)')
+     &       'S/R FIN_PROCS: MPI_FINALIZE return code',
+     &       mpiRC
+        CALL PRINT_ERROR( msgBuf, 1 )
+       ENDIF
+
+c     ENDIF
+#endif /* ALLOW_USE_MPI */
+
+      RETURN
+      END
Index: /issm/trunk-jpl/test/MITgcm/code_remesh/packages.conf
===================================================================
--- /issm/trunk-jpl/test/MITgcm/code_remesh/packages.conf	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/code_remesh/packages.conf	(revision 27556)
@@ -0,0 +1,6 @@
+#-- list of packages (or group of packages) to compile for this experiment:
+gfd
+obcs
+shelfice
+diagnostics
+timeave
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/data
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/data	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/data	(revision 27556)
@@ -0,0 +1,77 @@
+# ====================
+# | Model parameters |
+# ====================
+#
+# Continuous equation parameters
+ &PARM01
+ Tref = 30*-1.9,
+ Sref = 30*34.4,
+ viscAz=1.E-3,
+ viscAh=600.0,
+ no_slip_sides=.FALSE.,
+ no_slip_bottom=.FALSE.,
+ diffKhT=100.0,
+ diffKzT=5.E-5,
+ diffKhS=100.0,
+ diffKzS=5.E-5,
+ bottomDragQuadratic=2.5E-3,
+ eosType='JMD95Z', 
+ HeatCapacity_cp = 3974.0,
+ rhoConst=1030.,
+ rhoNil=1030.,
+ gravity=9.81,
+ convertFW2Salt = 33.4,
+ rigidLid=.FALSE.,
+ implicitFreeSurface=.TRUE.,
+ exactConserv=.TRUE.,
+ hFacMin=0.05,
+ nonHydrostatic=.FALSE.,
+ globalfiles = .TRUE.,
+ useSingleCpuIO = .TRUE.,
+ vectorInvariantMomentum = .TRUE.,
+ selectImplicitDrag = 2,  
+ nonlinfreesurf = 4, 
+ &
+
+# Elliptic solver parameters
+ &PARM02
+ cg2dMaxIters=1000,
+ cg2dTargetResidual=1.E-13,
+ cg3dMaxIters=400,
+ cg3dTargetResidual=1.E-13,
+ &
+
+# Time stepping parameters
+ &PARM03
+ niter0=0.
+ endTime=2592000.,
+ deltaT=1200.0,
+ abEps=0.1,
+ cAdjFreq = 1.,
+ pChkptFreq=2592000.,
+ chkptFreq=0.,
+ dumpFreq=0.,
+ taveFreq=2592000.,
+ monitorFreq=1200.,
+ monitorSelect=2,
+ &
+
+# Gridding parameters
+ &PARM04
+ usingSphericalPolarGrid=.TRUE.,
+ xgOrigin = 0.0,
+ ygOrigin = -80.0,
+ delX = 20*0.25,
+ delY = 40*0.05,
+ delZ = 30*30.0,
+ &
+
+# Input datasets
+ &PARM05
+ bathyFile       = 'bathymetry.bin',
+ hydrogSaltFile  = 'Salt.bin',
+ hydrogThetaFile = 'Theta.bin',
+ uVelInitFile    = 'Uvel.bin',
+ vVelInitFile    = 'Vvel.bin',
+ pSurfInitFile   = 'Etan.bin',
+ &
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/data.diagnostics
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/data.diagnostics	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/data.diagnostics	(revision 27556)
@@ -0,0 +1,49 @@
+# Diagnostic Package Choices
+#-----------------
+# for each output-stream:
+#  filename(n) : prefix of the output file name (only 8.c long) for outp.stream n
+#  frequency(n):< 0 : write snap-shot output every |frequency| seconds
+#               > 0 : write time-average output every frequency seconds
+#  timePhase(n)     : write at time = timePhase + multiple of |frequency|
+#  averagingFreq(n) : frequency (in s) for periodic averaging interval
+#  averagingPhase(n): phase     (in s) for periodic averaging interval
+#  repeatCycle(n)   : number of averaging intervals in 1 cycle
+#  levels(:,n) : list of levels to write to file (Notes: declared as REAL)
+#                 when this entry is missing, select all common levels of this list
+#  fields(:,n) : list of diagnostics fields (8.c) (see "available_diagnostics.log"
+#                 file for the list of all available diag. in this particular config)
+#-----------------
+ &DIAGNOSTICS_LIST
+# diag_mnc     = .FALSE.,
+  dumpAtLast   = .FALSE.,
+  fields(1:4,1) = 'ETAN    ','SHIfwFlx',
+                   'SHI_mass','SHIRshel'
+#                  'surForcT','surForcS','TFLUX   ','SFLUX   ','oceFreez',
+#                  'TRELAX  ','SRELAX  ',
+#  fields(1,1)='ETAN'
+   filename(1) = 'surfDiag',
+# frequency(1) =  2592000.,
+# frequency(1) =  86400.,
+  frequency(1) =  86400.,
+ &
+
+#--------------------
+# Parameter for Diagnostics of per level statistics:
+#--------------------
+#  diagSt_mnc (logical): write stat-diags to NetCDF files (default=diag_mnc)
+#  diagSt_regMaskFile : file containing the region-mask to read-in
+#  nSetRegMskFile   : number of region-mask sets within the region-mask file
+#  set_regMask(i)   : region-mask set-index that identifies the region "i"
+#  val_regMask(i)   : region "i" identifier value in the region mask
+#--for each output-stream:
+#  stat_fName(n) : prefix of the output file name (max 80c long) for outp.stream n
+#  stat_freq(n):< 0 : write snap-shot output every |stat_freq| seconds
+#               > 0 : write time-average output every stat_freq seconds
+#  stat_phase(n)    : write at time = stat_phase + multiple of |stat_freq|
+#  stat_region(:,n) : list of "regions" (default: 1 region only=global)
+#  stat_fields(:,n) : list of selected diagnostics fields (8.c) in outp.stream n
+#                (see "available_diagnostics.log" file for the full list of diags)
+#--------------------
+ &DIAG_STATIS_PARMS
+ &
+
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/data.obcs
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/data.obcs	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/data.obcs	(revision 27556)
@@ -0,0 +1,18 @@
+# ***************
+# Open boundaries
+# ***************
+ &OBCS_PARM01
+ OB_Iwest = 40*1,
+ OB_Ieast = 40*-1,
+#
+ useOBCSprescribe = .TRUE.,
+#
+ OBWsFile = 'OBs.bin',
+ OBWtFile = 'OBt.bin',
+ OBWuFile = 'OBu.bin',
+ OBWvFile = 'zeros.bin',
+ OBEsFile = 'OBs.bin',
+ OBEtFile = 'OBt.bin',
+ OBEuFile = 'OBu.bin',
+ OBEvFile = 'zeros.bin',
+ &
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/data.pkg
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/data.pkg	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/data.pkg	(revision 27556)
@@ -0,0 +1,5 @@
+# Packages
+ &PACKAGES
+ useShelfIce = .TRUE.,
+ useOBCS     = .TRUE.,
+ &
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/data.shelfice
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/data.shelfice	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/data.shelfice	(revision 27556)
@@ -0,0 +1,19 @@
+# ===================================
+# | Parameters for SHELFICE package |
+# ===================================
+ &SHELFICE_PARM01
+ SHELFICEconserve = .TRUE.,
+ SHELFICEboundaryLayer = .TRUE.,
+ SHELFICEtopoFile='icetopo.bin',
+ SHELFICEwriteState = .TRUE.,
+ SHELFICEMassStepping = .TRUE.,
+#--
+ SHELFICEremeshFrequency = 600.0,
+#- need to satisfy: splitThrs > 1 + mergeThrs / Sdz
+#       with: Sdz = min{ delR(k+1)/delR(k) }_[k=1:Nr-1]
+ SHELFICEsplitThreshold = 1.12,
+ SHELFICEmergeThreshold = 0.10,
+#--
+ SHELFICEmassFile='shelficemass.bin',
+ SHELFICEMassDynTendFile='shelfice_dMdt.r02.bin',
+ &
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/eedata
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/eedata	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/eedata	(revision 27556)
@@ -0,0 +1,10 @@
+# Example "eedata" file
+# Lines beginning "#" are comments
+# nTx - No. threads per process in X
+# nTy - No. threads per process in Y
+ &EEPARMS
+ useCoupler=.TRUE.,
+ &
+# Note: Some systems use & as the
+# namelist terminator. Other systems
+# use a / character (as shown here).
Index: /issm/trunk-jpl/test/MITgcm/input_remesh/eedata_uncoupled
===================================================================
--- /issm/trunk-jpl/test/MITgcm/input_remesh/eedata_uncoupled	(revision 27556)
+++ /issm/trunk-jpl/test/MITgcm/input_remesh/eedata_uncoupled	(revision 27556)
@@ -0,0 +1,10 @@
+# Example "eedata" file
+# Lines beginning "#" are comments
+# nTx - No. threads per process in X
+# nTy - No. threads per process in Y
+ &EEPARMS
+ useCoupler=.FALSE.,
+ &
+# Note: Some systems use & as the
+# namelist terminator. Other systems
+# use a / character (as shown here).
