source: issm/trunk-jpl/src/m/classes/clusters/fram.py@ 27262

Last change on this file since 27262 was 27262, checked in by jdquinn, 3 years ago

CHG: Custom cluster class for AWS ISSM solution server instance; typos; clean up

File size: 7.0 KB
RevLine 
[22203]1import subprocess
[26344]2
[22203]3import numpy as np
[26344]4
[22203]5from fielddisplay import fielddisplay
[26344]6try:
7 from fram_settings import fram_settings
8except ImportError:
9 print('You need fram_settings.py to proceed, check presence and sys.path')
10from helpers import *
[22203]11from pairoptions import pairoptions
[26344]12from IssmConfig import IssmConfig
[22203]13from issmscpin import issmscpin
14from issmscpout import issmscpout
[26344]15from issmssh import issmssh
[22203]16from QueueRequirements import QueueRequirements
[24213]17
18
[22203]19class fram(object):
[26332]20 """FRAM cluster class definition
21
[24213]22 This is a SLURM queue
23 The priorities are based on a point system, reservation when reaching 20000 and earning 1 point per min.
[26332]24 - Devel queue starts at 19990
25 - Normal starts at 19940
26 - Normal unpri atarts at 19400
[22203]27
[24213]28 Jobs can be:
[26332]29 - normal (4 to 30 nodes, more if asked, 48h max walltime, 60Gb per nodes)
30 - bigmem for big memory nodes (8 512Gb nodes and 2 6Tb nodes, shared nodes, 14days max walltime
[22203]31
[26332]32 Usage:
33 cluster = fram()
[24213]34 """
[22203]35
[26332]36 def __init__(self, *args): # {{{
[24213]37 self.name = 'fram'
38 self.login = ''
39 self.numnodes = 2
40 self.cpuspernode = 20
41 self.mem = 1.6
42 self.queue = 'normal'
43 self.time = 2 * 60
44 self.codepath = ''
45 self.executionpath = ''
46 self.interactive = 0
47 self.port = []
48 self.accountname = ''
49 self.profiling = 0
[26332]50 # Use provided options to change fields
[24213]51 options = pairoptions(*args)
[22203]52
[26332]53 # Initialize cluster using user settings if provided
[24213]54 self = fram_settings(self)
[26332]55
56 # OK get other fields
[24213]57 self = options.AssignObjectFields(self)
58 self.np = self.numnodes * self.cpuspernode
59 # }}}
[22203]60
[26332]61 def __repr__(self): # {{{
62 # Display the object
63 s = "class fram object:"
[24213]64 s = "%s\n%s" % (s, fielddisplay(self, 'name', 'name of the cluster'))
65 s = "%s\n%s" % (s, fielddisplay(self, 'login', 'login'))
66 s = "%s\n%s" % (s, fielddisplay(self, 'numnodes', 'number of nodes'))
67 s = "%s\n%s" % (s, fielddisplay(self, 'cpuspernode', 'number of nodes per CPUs'))
68 s = "%s\n%s" % (s, fielddisplay(self, 'mem', 'memory per CPU'))
69 s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue (normal (D), short, singlenode, multinode, devel)'))
70 s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested in minutes'))
71 s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))
72 s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))
73 s = "%s\n%s" % (s, fielddisplay(self, 'interactive', ''))
74 s = "%s\n%s" % (s, fielddisplay(self, 'accountname', 'your cluster account'))
75 s = "%s\n%s" % (s, fielddisplay(self, 'profiling', 'enable profiling if 1 default is 0'))
76 return s
77 # }}}
[22203]78
[24213]79 def checkconsistency(self, md, solution, analyses): # {{{
[26332]80 # Queue dictionary gives queue name as key and max walltime and CPUs as var
[24213]81 queuedict = {'normal': [2 * 24 * 60, 2048],
82 'devel': [4 * 60, 2048]}
83 QueueRequirements(queuedict, self.queue, self.np, self.time)
[22203]84
[26332]85 # Miscellaneous
[24213]86 if not self.login:
87 md = md.checkmessage('login empty')
88 if not self.codepath:
89 md = md.checkmessage('codepath empty')
90 if not self.executionpath:
91 md = md.checkmessage('executionpath empty')
92 if self.interactive == 1:
93 md = md.checkmessage('interactive mode not implemented')
94 return self
95 # }}}
[22203]96
[26332]97 def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling): # {{{
[24213]98 executable = 'issm.exe'
99 if isdakota:
100 version = IssmConfig('_DAKOTA_VERSION_')[0:2]
101 version = float(version)
102 if version >= 6:
103 executable = 'issm_dakota.exe'
104 if isoceancoupling:
105 executable = 'issm_ocean.exe'
[26332]106 # Write queuing script
[24213]107 shortname = modelname[0:min(12, len(modelname))]
108 fid = open(modelname + '.queue', 'w')
[22203]109
[24213]110 fid.write('#!/bin/bash -l\n')
111 fid.write('#SBATCH --job-name=%s \n' % shortname)
112 fid.write('#SBATCH --partition %s \n' % self.queue)
113 fid.write('#SBATCH --nodes=%i' % self.numnodes)
114 fid.write('#SBATCH --ntasks-per-nodes==%i \n' % self.cpuspernode)
115 fid.write('#SBATCH --time=%s\n' % self.time) #walltime is minutes
116 fid.write('#SBATCH --mem-per-cpu=%iGB\n' % self.mem) # mem is in GB
117 if (np.mod(self.np, 16) + np.mod(self.np, 20)) == 0:
118 fid.write('#SBATCH --ntask=%i\n' % self.np)
119 fid.write('#SBATCH --account=%s\n' % self.accountname)
120 fid.write('#SBATCH --output %s/%s /%s.outlog \n' % (self.executionpath, dirname, modelname))
121 fid.write('#SBATCH --error %s/%s /%s.errlog \n\n' % (self.executionpath, dirname, modelname))
[22203]122
[24213]123 fid.write('export ISSM_DIR="%s/../ "\n' % self.codepath)
124 fid.write('module restore system\n')
125 fid.write('module load load Automake/1.15.1-GCCcore-6.3.0\n')
126 fid.write('module load libtool/2.4.6-GCCcore-6.3.0\n')
127 fid.write('module load CMake/3.9.1\n')
128 fid.write('module load PETSc/3.8.0-intel-2017a-Python-2.7.13\n')
129 fid.write('module load ParMETIS/4.0.3-intel-2017a\n')
130 fid.write('cd %s/%s/ \n\n' % (self.executionpath, dirname))
[26332]131 if self.profiling:
[24213]132 fid.write('module load perf-report\n')
133 fid.write('perf-report mpirun -np %i %s/%s %s %s/%s %s\n' % (self.np, self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
134 else:
135 fid.write('mpirun -np %i %s/%s %s %s/%s %s\n' % (self.np, self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
136 fid.close()
137 # }}}
[22203]138
[26332]139 def UploadQueueJob(self, modelname, dirname, filelist): # {{{
140 # Compress the files into one zip
[24213]141 compressstring = 'tar -zcf %s.tar.gz ' % dirname
142 for file in filelist:
[26332]143 compressstring += ' {}'.format(file)
[24213]144 subprocess.call(compressstring, shell=True)
[22203]145
[27262]146 print('uploading input file and queuing script')
[24213]147 issmscpout(self.name, self.executionpath, self.login, self.port, [dirname + '.tar.gz'])
148
149 # }}}
[26332]150 def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch): # {{{
[24213]151 print('launching solution sequence on remote cluster')
[26344]152 if not isempty(restart):
[24213]153 launchcommand = 'cd %s && cd %s && sbatch %s.queue' % (self.executionpath, dirname, modelname)
154 else:
155 launchcommand = 'cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz && sbatch %s.queue' % (self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname)
156 issmssh(self.name, self.login, self.port, launchcommand)
157 # }}}
[26332]158 def Download(self, dirname, filelist): # {{{
159 # Copy files from cluster to current directory
[24213]160 directory = '%s/%s/' % (self.executionpath, dirname)
161 issmscpin(self.name, self.login, self.port, directory, filelist)
162 # }}}
Note: See TracBrowser for help on using the repository browser.