source: issm/oecreview/Archive/23390-24306/ISSM-24159-24160.diff

Last change on this file was 24307, checked in by Mathieu Morlighem, 5 years ago

NEW: adding Archive/23390-24306

File size: 6.9 KB
RevLine 
[24307]1Index: ../trunk-jpl/src/m/classes/clusters/sherlock.m
2===================================================================
3--- ../trunk-jpl/src/m/classes/clusters/sherlock.m (nonexistent)
4+++ ../trunk-jpl/src/m/classes/clusters/sherlock.m (revision 24160)
5@@ -0,0 +1,160 @@
6+%PFE class definition
7+%
8+% Usage:
9+% cluster=sherlock();
10+% cluster=sherlock('np',3);
11+% cluster=sherlock('np',3,'login','username');
12+
13+classdef sherlock
14+ properties (SetAccess=public)
15+ % {{{
16+ name = 'sherlock'
17+ login = '';
18+ numnodes = 1;
19+ cpuspernode = 24;
20+ port = 0;
21+ codepath = '';
22+ executionpath = '';
23+ interactive = 0;
24+ time = 30;
25+ memory = 2;
26+ end
27+ %}}}
28+ methods
29+ function cluster=sherlock(varargin) % {{{
30+
31+ %initialize cluster using default settings if provided
32+ if (exist('sherlock_settings')==2), sherlock_settings; end
33+
34+ %use provided options to change fields
35+ cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);
36+ end
37+ %}}}
38+ function disp(cluster) % {{{
39+ % display the object
40+ disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1)));
41+ disp(sprintf(' name: %s',cluster.name));
42+ disp(sprintf(' login: %s',cluster.login));
43+ disp(sprintf(' port: %i',cluster.port));
44+ disp(sprintf(' numnodes: %i',cluster.numnodes));
45+ disp(sprintf(' cpuspernode: %i',cluster.cpuspernode));
46+ disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes));
47+ disp(sprintf(' codepath: %s',cluster.codepath));
48+ disp(sprintf(' executionpath: %s',cluster.executionpath));
49+ disp(sprintf(' interactive: %i',cluster.interactive));
50+ disp(sprintf(' time: %i',cluster.time));
51+ disp(sprintf(' memory: %i',cluster.memory));
52+ end
53+ %}}}
54+ function numprocs=np(cluster) % {{{
55+ %compute number of processors
56+ numprocs=cluster.numnodes*cluster.cpuspernode;
57+ end
58+ %}}}
59+ function md = checkconsistency(cluster,md,solution,analyses) % {{{
60+
61+ %Miscelaneous
62+ if isempty(cluster.login), md = checkmessage(md,'login empty'); end
63+ if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end
64+ if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end
65+
66+ end
67+ %}}}
68+ function BuildKrigingQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
69+
70+ if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
71+ if(isgprof), disp('gprof not supported by cluster, ignoring...'); end
72+
73+ %write queuing script
74+ fid=fopen([modelname '.queue'],'w');
75+ fprintf(fid,'#!/bin/bash\n');
76+ fprintf(fid,'#SBATCH --job-name=%s\n',mdelname);
77+ fprintf(fid,'#SBATCH -p %s \n',cluster.queue);
78+ fprintf(fid,'#SBATCH -N %i -n %i\n',cluster.numnodes,cluster.cpuspernode);
79+ fprintf(fid,'#SBATCH --time=%i\n',cluster.time*60); %walltime is in seconds.
80+ fprintf(fid,'#SBATCH --mem-per-cpu=%igb\n',cluster.memory);
81+ fprintf(fid,'#SBATCH -o %s.outlog \n',modelname);
82+ fprintf(fid,'#SBATCH -e %s.errlog \n\n',modelname);
83+ fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
84+ fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME
85+ fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,dirname);
86+ fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);
87+ if ~io_gather, %concatenate the output files:
88+ fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
89+ end
90+ fclose(fid);
91+ end
92+ %}}}
93+ function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
94+
95+ if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
96+ if(isgprof), disp('gprof not supported by cluster, ignoring...'); end
97+
98+ %write queuing script
99+ fid=fopen([modelname '.queue'],'w');
100+ fprintf(fid,'#!/bin/bash\n');
101+ fprintf(fid,'#SBATCH --job-name=%s\n',modelname);
102+ fprintf(fid,'#SBATCH -N %i -n %i\n',cluster.numnodes,cluster.cpuspernode);
103+ fprintf(fid,'#SBATCH --time=%i\n',cluster.time*60); %walltime is in seconds.
104+ fprintf(fid,'#SBATCH --mem-per-cpu=%igb\n',cluster.memory);
105+ fprintf(fid,'#SBATCH -o %s.outlog \n',modelname);
106+ fprintf(fid,'#SBATCH -e %s.errlog \n\n',modelname);
107+ fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
108+ fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME
109+ fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,dirname);
110+ fprintf(fid,'mpiexec -n %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,solution,[cluster.executionpath '/' dirname],modelname);
111+ if ~io_gather, %concatenate the output files:
112+ fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
113+ end
114+ fclose(fid);
115+
116+ %in interactive mode, create a run file, and errlog and outlog file
117+ if cluster.interactive,
118+ fid=fopen([modelname '.run'],'w');
119+ fprintf(fid,'mpiexec -n %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,solution,[cluster.executionpath '/' dirname],modelname);
120+ if ~io_gather, %concatenate the output files:
121+ fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
122+ end
123+ fclose(fid);
124+ fid=fopen([modelname '.errlog'],'w');
125+ fclose(fid);
126+ fid=fopen([modelname '.outlog'],'w');
127+ fclose(fid);
128+ end
129+ end %}}}
130+ function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{
131+
132+ %compress the files into one zip.
133+ compressstring=['tar -zcf ' dirname '.tar.gz '];
134+ for i=1:numel(filelist),
135+ compressstring = [compressstring ' ' filelist{i}];
136+ end
137+ if cluster.interactive,
138+ compressstring = [compressstring ' ' modelname '.errlog ' modelname '.outlog '];
139+ end
140+ system(compressstring);
141+
142+ disp('uploading input file and queueing script');
143+ issmscpout(cluster.name,cluster.executionpath,cluster.login,cluster.port,{[dirname '.tar.gz']});
144+
145+ end %}}}
146+ function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{
147+
148+ disp('launching solution sequence on remote cluster');
149+ if ~isempty(restart)
150+ launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && hostname && sbatch ' modelname '.queue '];
151+ else
152+ launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...
153+ ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && hostname && sbatch ' modelname '.queue '];
154+ end
155+ issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
156+ end %}}}
157+ function Download(cluster,dirname,filelist)% {{{
158+
159+ %copy files from cluster to current directory
160+ directory=[cluster.executionpath '/' dirname '/'];
161+ issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist);
162+
163+ end %}}}
164+ end
165+end
Note: See TracBrowser for help on using the repository browser.