Changeset 22381
- Timestamp:
- 01/29/18 17:58:28 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/test/MITgcm/code/eeboot_minimal.F
r22377 r22381 67 67 #ifdef ALLOW_CPL_ISSM 68 68 COMMON /CPL_MPI_ID/ 69 & mpiMyWid, local_ocean_leader, local_ice_leader 70 integer :: n, myid, numprocs, i, mpiMyWid, numprocsworld 71 integer :: mycomponent 72 integer :: icesize, oceansize 73 integer :: local_ocean_leader, local_ice_leader 74 integer, dimension(:), allocatable :: components 75 integer, dimension(:), allocatable :: icegroup, oceangroup 76 77 integer oceancommsize,icecommsize,rankzeros(2) 78 integer my_local_rank,my_size,my_local_size 79 integer modelcomm,toissmcomm 80 integer dummy1(1),dummy2(1) 81 69 & mpiMyWid 70 integer :: mpiMyWid, numprocsworld 71 integer icecommsize,rankzeros(2) 72 integer my_local_rank,my_local_size 73 integer toissmcomm 82 74 #endif /* ALLOW_CPL_ISSM */ 83 75 #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) … … 192 184 193 185 #ifdef ALLOW_CPL_ISSM 194 cdm CALL SETDIR_OCEAN( )195 186 call MPI_COMM_RANK(MPI_COMM_WORLD, mpiMyWid, mpiRC) 196 187 call MPI_COMM_SIZE(MPI_COMM_WORLD, numprocsworld, mpiRC) 197 print*,'choubi 2'198 199 188 200 189 c ocean comm size: 4 (normally should be recovered from list of 201 190 c arguments after mpirun:) */ 202 191 icecommsize=2 203 oceancommsize=8204 192 205 193 c Build array of who is rank 0 of their own group:*/ … … 208 196 209 197 c Split world into sub-communicators for each and every model:*/ 210 call MPI_COMM_SPLIT(MPI_COMM_WORLD,1, MPIMYWID, modelcomm,mpiRC) 211 print*,'choubi 3' 212 213 call MPI_INTERCOMM_CREATE(modelcomm,0,MPI_COMM_WORLD,rankzeros(1), 214 & 0,toissmcomm,mpiRC) 215 print*,'choubi 4a' 198 call MPI_COMM_SPLIT(MPI_COMM_WORLD,1,MPIMYWID, 199 & MPI_COMM_MODEL,mpiRC) 200 201 print*,'Oc My global rank',mpiMyWid 202 print*,'Oc My world size:',numprocsworld 203 204 cdm call MPI_INTERCOMM_CREATE(MPI_COMM_MODEL,0,MPI_COMM_WORLD, 205 cdm & rankzeros(1),0,toissmcomm,mpiRC) 216 206 217 call MPI_COMM_RANK(modelcomm, my_local_rank, mpiRC) 218 print*,'choubi 4b' 219 call MPI_COMM_SIZE(modelcomm, my_local_size, mpiRC) 207 call MPI_COMM_RANK(MPI_COMM_MODEL, my_local_rank, mpiRC) 208 call MPI_COMM_SIZE(MPI_COMM_MODEL, my_local_size, mpiRC) 220 209 221 210 print*,'Oc My global rank',mpiMyWid,'MyLocal rank: ',my_local_rank 222 print*,'Oc My world size:',my_size,'My local size: ',my_local_size 223 224 225 cdmC allocate array components based on the number of processors 226 cdm allocate(components(numprocsworld)) 227 cdm 228 cdmC assign a component to the ocean code to organize processors into a group 229 cdm mycomponent=0 230 cdm 231 cdmC gather components to all processors, 232 cdmC so each knows who is ice and who is ocean 233 cdm call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1, 234 cdm & MPI_INTEGER,MPI_COMM_WORLD,mpiRC) 235 cdm print*,'choubi 3' 236 cdmC form ice and ocean groups 237 cdmC count the processors in each groups 238 cdm icesize=0 239 cdm oceansize=0 240 cdm do i=1,numprocsworld 241 cdm if(components(i).eq.0) then 242 cdm oceansize=oceansize+1 243 cdm elseif(components(i).eq.1) then 244 cdm icesize=icesize+1 245 cdm else 246 cdm write(6,*) 'error: processor', i, 247 cdm & 'not associated with ice or ocean' 248 cdm stop 249 cdm endif 250 cdm enddo 251 cdm 252 cdmC allocate group arrays 253 cdm allocate(icegroup(icesize)) 254 cdm allocate(oceangroup(oceansize)) 255 cdmC form the groups 256 cdm icesize=0 257 cdm oceansize=0 258 cdm do i=1,numprocsworld 259 cdm if(components(i).eq.0) then 260 cdm oceansize=oceansize+1 261 cdm oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1 262 cdm elseif(components(i).eq.1) then 263 cdm icesize=icesize+1 264 cdm icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1 265 cdm else 266 cdm write(6,*) 'error: processor', i, 267 cdm & 'not associated with ice or ocean' 268 cdm endif 269 cdm enddo 270 cdm 271 cdmC pick the lowest rank in the group as the local group leader 272 cdm local_ocean_leader=oceangroup(1) 273 cdm local_ice_leader=icegroup(1) 274 cdm print*,'choubi 3' 275 cdmC form ocean communicator 276 cdm call MPI_comm_split(MPI_COMM_WORLD,mycomponent,mpiMyWid, 277 cdm & MPI_COMM_MODEL,mpiRC) 278 cdm call MPI_comm_rank(MPI_COMM_MODEL,myid,mpiRC) 279 cdm call MPI_comm_size(MPI_COMM_MODEL,numprocs,mpiRC) 280 print*,'choubi 4' 211 print*,'Oc My world size:',numprocsworld,'My local size: ', 212 & my_local_size 213 281 214 #endif /* ALLOW_CPL_ISSM */ 282 215 283 216 C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----| 217 284 218 C-- Get my process number 285 219 CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC ) … … 290 224 CALL PRINT_ERROR( msgBuf, myThid ) 291 225 GOTO 999 292 ENDIF226 ENDIF 293 227 myProcId = mpiMyId 294 228 #ifdef USE_PDAF … … 312 246 WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:9) 313 247 #endif 314 print*,'choubi 5'315 248 OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown') 316 print*,'choubi 5a'317 249 #ifdef SINGLE_DISK_IO 318 250 ELSE … … 320 252 standardMessageUnit=errorMessageUnit 321 253 ENDIF 322 print*,'choubi 6'323 254 IF( myProcId .EQ. 0 ) THEN 324 255 WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ', … … 331 262 & SQUEEZE_RIGHT, myThid ) 332 263 ENDIF 333 print*,'choubi 7' 334 #endif 335 print*,'choubi 8' 336 ENDIF 337 print*,'choubi 10' 264 #endif 265 ENDIF 266 338 267 #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) 339 268 WRITE(standardMessageUnit,'(2(A,I6))') … … 366 295 #endif /* ALLOW_USE_MPI */ 367 296 ENDIF 368 print*,'choubi 11' 297 369 298 C-- Under MPI only allow same number of processes as proc grid size. 370 299 C Strictly we are allowed more procs but knowing there … … 382 311 CALL F_HPMINIT(myProcId, "mitgcmuv") 383 312 #endif 384 print*,'choubi 12' 313 385 314 999 CONTINUE 386 315 RETURN
Note:
See TracChangeset
for help on using the changeset viewer.