source: issm/trunk/src/m/classes/clusters/localpfe.m@ 26744

Last change on this file since 26744 was 26744, checked in by Mathieu Morlighem, 3 years ago

merged trunk-jpl and trunk for revision 26742

File size: 9.6 KB
Line 
1%LOCALPFE cluster class definition
2%
3% Usage:
4% cluster=localpfe('name','astrid','np',3);
5% cluster=localpfe('name',oshostname(),'np',3,'login','username');
6
7classdef localpfe
8 properties (SetAccess=public)
9 % {{{
10 name = '';
11 login = '';
12 np = 1;
13 npocean = 0;
14 port = 0;
15 interactive = 1;
16 codepath = [IssmConfig('ISSM_PREFIX') '/bin'];
17 etcpath = [issmdir() '/etc'];
18 executionpath = [issmdir() '/execution'];
19 valgrind = [issmdir() '/externalpackages/valgrind/install/bin/valgrind'];
20 valgrindlib = [issmdir() '/externalpackages/valgrind/install/lib/libmpidebug.so'];
21 valgrindsup = [issmdir() '/externalpackages/valgrind/issm.supp'];
22 verbose = 1;
23 shell = '/bin/sh';
24 %}}}
25 end
26 methods
27 function cluster=localpfe(varargin) % {{{
28
29 %Change the defaults if ispc and not ismingw
30 if ispc & ~ismingw,
31 cluster.codepath = [issmdir() '\bin'];
32 cluster.etcpath = [issmdir() '\etc'];
33 cluster.executionpath = [issmdir() '\execution'];
34 end
35
36 %use provided options to change fields
37 options=pairoptions(varargin{:});
38
39 %get name
40 cluster.name=getfieldvalue(options,'name',oshostname());
41
42 %initialize cluster using user settings if provided
43 if (exist([cluster.name '_settings'])==2), eval([cluster.name '_settings']); end
44
45 %OK get other fields
46 cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);
47 end
48 %}}}
49 function disp(cluster) % {{{
50 % display the object
51 disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1)));
52 disp(sprintf(' name: %s',cluster.name));
53 disp(sprintf(' login: %s',cluster.login));
54 disp(sprintf(' np: %i',cluster.np));
55 disp(sprintf(' npocean: %i',cluster.npocean));
56 disp(sprintf(' port: %i',cluster.port));
57 disp(sprintf(' interactive: %i',cluster.interactive));
58 disp(sprintf(' codepath: %s',cluster.codepath));
59 disp(sprintf(' etcpath: %s',cluster.etcpath));
60 disp(sprintf(' executionpath: %s',cluster.executionpath));
61 disp(sprintf(' valgrind: %s',cluster.valgrind));
62 disp(sprintf(' valgrindlib: %s',cluster.valgrindlib));
63 disp(sprintf(' valgrindsup: %s',cluster.valgrindsup));
64 disp(sprintf(' verbose: %s',cluster.verbose));
65 disp(sprintf(' shell: %s',cluster.shell));
66 end
67 %}}}
68 function md = checkconsistency(cluster,md,solution,analyses) % {{{
69 if cluster.np<1
70 md = checkmessage(md,['number of processors should be at least 1']);
71 end
72 if isnan(cluster.np),
73 md = checkmessage(md,'number of processors should not be NaN!');
74 end
75 end
76 %}}}
77 function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
78
79 %write queuing script
80 %what is the executable being called?
81 executable='issm.exe';
82 if isdakota,
83 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
84 if (version>=6),
85 executable='issm_dakota.exe';
86 end
87 end
88
89 fid=fopen([modelname '.queue'],'w');
90 fprintf(fid,'#!%s\n',cluster.shell);
91 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s \n',cluster.np,cluster.codepath,executable,solution,cluster.executionpath,modelname);
92 fclose(fid);
93
94
95 %in interactive mode, create a run file, and errlog and outlog file
96 if cluster.interactive,
97 fid=fopen([modelname '.errlog'],'w'); fclose(fid);
98 fid=fopen([modelname '.outlog'],'w'); fclose(fid);
99 end
100 end
101 %}}}
102 function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{
103
104 %some checks:
105 if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end
106
107 %what is the executable being called?
108 executable='issm_slc.exe';
109
110 if ispc & ~ismingw, error('BuildQueueScriptMultipleModels not support yet on windows machines');end;
111
112 %write queuing script
113 fid=fopen([modelname '.queue'],'w');
114
115 fprintf(fid,'#!%s\n',cluster.shell);
116
117 %number of cpus:
118 mpistring=sprintf('mpiexec -np %i ',cluster.np);
119
120 %executable:
121 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)];
122
123 %solution name:
124 mpistring=[mpistring sprintf('%s ',solution)];
125
126 %execution directory and model name:
127 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)];
128
129 %inform main executable of how many icecaps, glaciers and earth models are being run:
130 mpistring=[mpistring sprintf(' %i ',length(dirnames))];
131
132 %icecaps, glaciers and earth location, names and number of processors associated:
133 for i=1:length(dirnames),
134 mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})];
135 end
136
137 %log files:
138 if ~cluster.interactive,
139 mpistring=[mpistring sprintf('2> %s.errlog> %s.outlog',modelname,modelname)];
140 end
141
142 %write this long string to disk:
143 fprintf(fid,mpistring);
144 fclose(fid);
145
146 %in interactive mode, create a run file, and errlog and outlog file
147 if cluster.interactive,
148 fid=fopen([modelname '.errlog'],'w'); fclose(fid);
149 fid=fopen([modelname '.outlog'],'w'); fclose(fid);
150 end
151 end
152 %}}}
153 function BuildQueueScriptIceOcean(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota) % {{{
154
155 %write queuing script
156 %what is the executable being called?
157 executable='issm_ocean.exe';
158
159 fid=fopen([modelname '.queue'],'w');
160 fprintf(fid,'#!%s\n',cluster.shell);
161 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s : -np %i ./mitgcmuv\n',cluster.np,cluster.codepath,executable,solution,cluster.executionpath,modelname,cluster.npocean);
162 fclose(fid);
163
164 %in interactive mode, create a run file, and errlog and outlog file
165 if cluster.interactive,
166 fid=fopen([modelname '.errlog'],'w'); fclose(fid);
167 fid=fopen([modelname '.outlog'],'w'); fclose(fid);
168 end
169 end
170 %}}}
171 function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{
172
173 %write queuing script
174 if ~ispc,
175
176 fid=fopen([modelname '.queue'],'w');
177 fprintf(fid,'#!/bin/sh\n');
178 if ~isvalgrind,
179 if cluster.interactive
180 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s ',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);
181 else
182 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s 2> %s.errlog >%s.outlog ',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname,modelname,modelname);
183 end
184 elseif isgprof,
185 fprintf(fid,'\n gprof %s/kriging.exe gmon.out > %s.performance',cluster.codepath,modelname);
186 else
187 %Add --gen-suppressions=all to get suppression lines
188 fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib);
189 fprintf(fid,'mpiexec -np %i %s --leak-check=full --suppressions=%s %s/kriging.exe %s %s 2> %s.errlog >%s.outlog ',...
190 cluster.np,cluster.valgrind,cluster.valgrindsup,cluster.codepath,[cluster.executionpath '/' modelname],modelname,modelname,modelname);
191 end
192 if ~io_gather, %concatenate the output files:
193 fprintf(fid,'\ncat %s.outbin.* > %s.outbin',modelname,modelname);
194 end
195 fclose(fid);
196
197 else % Windows
198
199 fid=fopen([modelname '.bat'],'w');
200 fprintf(fid,'@echo off\n');
201 if cluster.interactive
202 fprintf(fid,'"%s/issm.exe" %s "%s" %s ',cluster.codepath,solution,[cluster.executionpath '/' modelname],modelname);
203 else
204 fprintf(fid,'"%s/issm.exe" %s "%s" %s 2> %s.errlog >%s.outlog',...
205 cluster.codepath,solution,[cluster.executionpath '/' modelname],modelname,modelname,modelname);
206 end
207 fclose(fid);
208 end
209
210 %in interactive mode, create a run file, and errlog and outlog file
211 if cluster.interactive,
212 fid=fopen([modelname '.errlog'],'w'); fclose(fid);
213 fid=fopen([modelname '.outlog'],'w'); fclose(fid);
214 end
215 end
216 %}}}
217 function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{
218 if ~ispc | ismingw,
219
220 %compress the files into one zip.
221 compressstring=['tar -zcf ' dirname '.tar.gz '];
222 for i=1:numel(filelist),
223 compressstring = [compressstring ' ' filelist{i}];
224 end
225 if cluster.interactive,
226 compressstring = [compressstring ' ' modelname '.errlog ' modelname '.outlog '];
227 end
228 system(compressstring);
229
230 if cluster.verbose, disp('uploading input file and queueing script'); end
231 issmscpout(cluster.name,cluster.executionpath,cluster.login,cluster.port,{[dirname '.tar.gz']});
232 end
233 end %}}}
234 function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{
235
236 %figure out what shell extension we will use:
237 if isempty(strfind(cluster.shell,'csh')),
238 shellext='sh';
239 else
240 shellext='csh';
241 end
242
243 if cluster.verbose, disp('launching solution sequence on remote cluster'); end
244
245 launchcommand=['cd ' cluster.executionpath ' && rm -rf *.lock && rm -rf ADOLC* && tar -zxf ' dirname '.tar.gz && rm -rf *.tar.gz'];
246 issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
247
248 end %}}}
249 function LaunchQueueJobIceOcean(cluster,modelname,dirname,filelist,restart,batch)% {{{
250
251 %figure out what shell extension we will use:
252 if isempty(strfind(cluster.shell,'csh')),
253 shellext='sh';
254 else
255 shellext='csh';
256 end
257
258 if cluster.verbose, disp('launching solution sequence on remote cluster'); end
259
260 launchcommand=['cd ' cluster.executionpath ' && rm -rf *.lock && tar -zxf ' dirname '.tar.gz && rm -rf *.tar.gz'];
261 issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
262
263 end %}}}
264 function Download(cluster,dirname,filelist)% {{{
265
266 if ispc && ~ismingw,
267 %do nothing
268 return;
269 end
270
271 %copy files from cluster to current directory
272 issmscpin(cluster.name,cluster.login,cluster.port,cluster.executionpath,filelist);
273 end %}}}
274 end
275end
Note: See TracBrowser for help on using the repository browser.