Changeset 22381


Ignore:
Timestamp:
01/29/18 17:58:28 (7 years ago)
Author:
dmenemen
Message:

This allows MITgcm and ISSM to run in parallel, but
does not yet do any communications between the two.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/test/MITgcm/code/eeboot_minimal.F

    r22377 r22381  
    6767#ifdef ALLOW_CPL_ISSM
    6868      COMMON /CPL_MPI_ID/
    69      &     mpiMyWid, local_ocean_leader, local_ice_leader
    70       integer :: n, myid, numprocs, i, mpiMyWid, numprocsworld
    71       integer :: mycomponent
    72       integer :: icesize, oceansize
    73       integer :: local_ocean_leader, local_ice_leader
    74       integer, dimension(:), allocatable :: components
    75       integer, dimension(:), allocatable :: icegroup, oceangroup
    76 
    77       integer oceancommsize,icecommsize,rankzeros(2)
    78       integer my_local_rank,my_size,my_local_size
    79       integer modelcomm,toissmcomm
    80       integer dummy1(1),dummy2(1)
    81 
     69     &     mpiMyWid
     70      integer :: mpiMyWid, numprocsworld
     71      integer icecommsize,rankzeros(2)
     72      integer my_local_rank,my_local_size
     73      integer toissmcomm
    8274#endif /* ALLOW_CPL_ISSM */
    8375#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
     
    192184
    193185#ifdef ALLOW_CPL_ISSM
    194 cdm    CALL SETDIR_OCEAN( )
    195186       call MPI_COMM_RANK(MPI_COMM_WORLD, mpiMyWid, mpiRC)
    196187       call MPI_COMM_SIZE(MPI_COMM_WORLD, numprocsworld, mpiRC)
    197        print*,'choubi 2'       
    198 
    199188
    200189c     ocean  comm size: 4 (normally should be recovered from list of
    201190c     arguments after mpirun:) */
    202191      icecommsize=2
    203       oceancommsize=8
    204192
    205193c     Build array of who is rank 0 of their own group:*/
     
    208196
    209197c     Split world into sub-communicators for each and every model:*/
    210       call MPI_COMM_SPLIT(MPI_COMM_WORLD,1, MPIMYWID, modelcomm,mpiRC)
    211        print*,'choubi 3'
    212 
    213       call MPI_INTERCOMM_CREATE(modelcomm,0,MPI_COMM_WORLD,rankzeros(1),
    214      &                          0,toissmcomm,mpiRC)
    215        print*,'choubi 4a'
     198      call MPI_COMM_SPLIT(MPI_COMM_WORLD,1,MPIMYWID,
     199     &                    MPI_COMM_MODEL,mpiRC)
     200
     201      print*,'Oc My global rank',mpiMyWid
     202      print*,'Oc My world size:',numprocsworld
     203
     204cdm      call MPI_INTERCOMM_CREATE(MPI_COMM_MODEL,0,MPI_COMM_WORLD,
     205cdm     &                          rankzeros(1),0,toissmcomm,mpiRC)
    216206     
    217       call MPI_COMM_RANK(modelcomm, my_local_rank, mpiRC)
    218        print*,'choubi 4b'
    219       call MPI_COMM_SIZE(modelcomm, my_local_size, mpiRC)
     207      call MPI_COMM_RANK(MPI_COMM_MODEL, my_local_rank, mpiRC)
     208      call MPI_COMM_SIZE(MPI_COMM_MODEL, my_local_size, mpiRC)
    220209
    221210      print*,'Oc My global rank',mpiMyWid,'MyLocal rank: ',my_local_rank
    222       print*,'Oc My world size:',my_size,'My local size: ',my_local_size
    223 
    224        
    225 cdmC     allocate array components based on the number of processors
    226 cdm       allocate(components(numprocsworld))
    227 cdm
    228 cdmC     assign a component to the ocean code to organize processors into a group
    229 cdm       mycomponent=0
    230 cdm
    231 cdmC     gather components to all processors,
    232 cdmC     so each knows who is ice and who is ocean
    233 cdm       call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1,
    234 cdm     &      MPI_INTEGER,MPI_COMM_WORLD,mpiRC)
    235 cdm       print*,'choubi 3'       
    236 cdmC     form ice and ocean groups
    237 cdmC     count the processors in each groups
    238 cdm       icesize=0
    239 cdm       oceansize=0
    240 cdm       do i=1,numprocsworld
    241 cdm          if(components(i).eq.0) then
    242 cdm             oceansize=oceansize+1
    243 cdm          elseif(components(i).eq.1) then
    244 cdm             icesize=icesize+1
    245 cdm          else
    246 cdm             write(6,*) 'error: processor', i,
    247 cdm     &            'not associated with ice or ocean'
    248 cdm             stop
    249 cdm          endif
    250 cdm       enddo
    251 cdm
    252 cdmC     allocate group arrays
    253 cdm       allocate(icegroup(icesize))
    254 cdm       allocate(oceangroup(oceansize))
    255 cdmC     form the groups
    256 cdm       icesize=0
    257 cdm       oceansize=0
    258 cdm       do i=1,numprocsworld
    259 cdm          if(components(i).eq.0) then
    260 cdm             oceansize=oceansize+1
    261 cdm             oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1
    262 cdm          elseif(components(i).eq.1) then
    263 cdm             icesize=icesize+1
    264 cdm             icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1
    265 cdm          else
    266 cdm             write(6,*) 'error: processor', i,
    267 cdm     &            'not associated with ice or ocean'
    268 cdm          endif
    269 cdm       enddo
    270 cdm
    271 cdmC     pick the lowest rank in the group as the local group leader
    272 cdm       local_ocean_leader=oceangroup(1)
    273 cdm       local_ice_leader=icegroup(1)
    274 cdm       print*,'choubi 3'
    275 cdmC     form ocean communicator
    276 cdm       call MPI_comm_split(MPI_COMM_WORLD,mycomponent,mpiMyWid,
    277 cdm     &      MPI_COMM_MODEL,mpiRC)
    278 cdm       call MPI_comm_rank(MPI_COMM_MODEL,myid,mpiRC)
    279 cdm       call MPI_comm_size(MPI_COMM_MODEL,numprocs,mpiRC)
    280        print*,'choubi 4'
     211      print*,'Oc My world size:',numprocsworld,'My local size: ',
     212     & my_local_size
     213
    281214#endif /* ALLOW_CPL_ISSM */
    282215
    283216C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----|
     217
    284218C--    Get my process number
    285219       CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC )
     
    290224        CALL PRINT_ERROR( msgBuf, myThid )
    291225        GOTO 999
    292       ENDIF
     226       ENDIF
    293227       myProcId = mpiMyId
    294228#ifdef USE_PDAF
     
    312246         WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:9)
    313247#endif
    314       print*,'choubi 5'
    315248         OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
    316       print*,'choubi 5a'
    317249#ifdef SINGLE_DISK_IO
    318250        ELSE
     
    320252         standardMessageUnit=errorMessageUnit
    321253        ENDIF
    322       print*,'choubi 6'
    323254        IF( myProcId .EQ. 0 ) THEN
    324255          WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
     
    331262     &                        SQUEEZE_RIGHT, myThid )
    332263        ENDIF
    333       print*,'choubi 7'
    334 #endif
    335       print*,'choubi 8'
    336        ENDIF
    337       print*,'choubi 10'
     264#endif
     265       ENDIF
     266
    338267#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
    339268       WRITE(standardMessageUnit,'(2(A,I6))')
     
    366295#endif /* ALLOW_USE_MPI */
    367296      ENDIF
    368       print*,'choubi 11'
     297
    369298C--    Under MPI only allow same number of processes as proc grid size.
    370299C      Strictly we are allowed more procs but knowing there
     
    382311       CALL F_HPMINIT(myProcId, "mitgcmuv")
    383312#endif
    384       print*,'choubi 12'
     313
    385314 999  CONTINUE
    386315      RETURN
Note: See TracChangeset for help on using the changeset viewer.