Changeset 25300 for issm/trunk-jpl/src/m/classes/clusters/pfe.m
- Timestamp:
- 07/24/20 14:31:10 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/clusters/pfe.m
r25270 r25300 7 7 8 8 classdef pfe 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 %Miscelaneous132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 % 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 %fprintf(fid,'#PBS -N %s\n',modelname);304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 % 364 % 365 % 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 9 properties (SetAccess=public) 10 % {{{ 11 name = 'pfe' 12 login = ''; 13 modules = {'comp-intel/2016.2.181' 'mpi-sgi/mpt'}; 14 numnodes = 20; 15 cpuspernode = 8; 16 port = 1025; 17 queue = 'long'; 18 time = 12*60; 19 processor = 'ivy'; 20 srcpath = ''; 21 codepath = ''; 22 executionpath = ''; 23 grouplist = 's1690'; 24 interactive = 0; 25 bbftp = 0; 26 numstreams = 8; 27 hyperthreading = 0; 28 end 29 %}}} 30 methods 31 function cluster=pfe(varargin) % {{{ 32 33 %initialize cluster using default settings if provided 34 if (exist('pfe_settings')==2), pfe_settings; end 35 36 %use provided options to change fields 37 cluster=AssignObjectFields(pairoptions(varargin{:}),cluster); 38 end 39 %}}} 40 function disp(cluster) % {{{ 41 % display the object 42 disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1))); 43 disp(sprintf(' name: %s',cluster.name)); 44 disp(sprintf(' login: %s',cluster.login)); 45 modules=''; for i=1:length(cluster.modules), modules=[modules cluster.modules{i} ',']; end; modules=modules(1:end-1); 46 disp(sprintf(' modules: %s',modules)); 47 disp(sprintf(' port: %i',cluster.port)); 48 disp(sprintf(' numnodes: %i',cluster.numnodes)); 49 disp(sprintf(' cpuspernode: %i',cluster.cpuspernode)); 50 disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes)); 51 disp(sprintf(' queue: %s',cluster.queue)); 52 disp(sprintf(' time: %i',cluster.time)); 53 disp(sprintf(' processor: %s',cluster.processor)); 54 disp(sprintf(' codepath: %s ($ISSM_DIR on pfe)',cluster.codepath)); 55 disp(sprintf(' executionpath: %s (directory containing issm.exe on pfe)',cluster.executionpath)); 56 disp(sprintf(' grouplist: %s',cluster.grouplist)); 57 disp(sprintf(' interactive: %i',cluster.interactive)); 58 disp(sprintf(' hyperthreading: %i',cluster.hyperthreading)); 59 end 60 %}}} 61 function numprocs=np(cluster) % {{{ 62 %compute number of processors 63 numprocs=cluster.numnodes*cluster.cpuspernode; 64 end 65 %}}} 66 function md = checkconsistency(cluster,md,solution,analyses) % {{{ 67 68 available_queues={'long','normal','debug','devel','alphatst@pbspl233'}; 69 queue_requirements_time=[5*24*60 8*60 2*60 2*60 24*60]; 70 queue_requirements_np=[2048 2048 150 150 2048]; 71 72 QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,cluster.time) 73 74 %now, check cluster.cpuspernode according to processor type 75 if strcmpi(cluster.processor,'wes'), 76 if cluster.hyperthreading, 77 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), 78 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''wes'' processors in hyperthreading mode'); 79 end 80 else 81 if ((cluster.cpuspernode>12 ) | (cluster.cpuspernode<1)), 82 md = checkmessage(md,'cpuspernode should be between 1 and 12 for ''wes'' processors'); 83 end 84 end 85 elseif strcmpi(cluster.processor,'ivy'), 86 if cluster.hyperthreading, 87 if ((cluster.cpuspernode>40 ) | (cluster.cpuspernode<1)), 88 md = checkmessage(md,'cpuspernode should be between 1 and 40 for ''ivy'' processors in hyperthreading mode'); 89 end 90 else 91 if ((cluster.cpuspernode>20 ) | (cluster.cpuspernode<1)), 92 md = checkmessage(md,'cpuspernode should be between 1 and 20 for ''ivy'' processors'); 93 end 94 end 95 elseif strcmpi(cluster.processor,'bro'), 96 if cluster.hyperthreading, 97 if ((cluster.cpuspernode>56 ) | (cluster.cpuspernode<1)), 98 md = checkmessage(md,'cpuspernode should be between 1 and 56 for ''bro'' processors in hyperthreading mode'); 99 end 100 else 101 if ((cluster.cpuspernode>28 ) | (cluster.cpuspernode<1)), 102 md = checkmessage(md,'cpuspernode should be between 1 and 28 for ''bro'' processors'); 103 end 104 end 105 elseif strcmpi(cluster.processor,'has'), 106 if cluster.hyperthreading, 107 if ((cluster.cpuspernode>48 ) | (cluster.cpuspernode<1)), 108 md = checkmessage(md,'cpuspernode should be between 1 and 48 for ''has'' processors in hyperthreading mode'); 109 end 110 else 111 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), 112 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''has'' processors'); 113 end 114 end 115 116 elseif strcmpi(cluster.processor,'san'), 117 if cluster.hyperthreading, 118 if ((cluster.cpuspernode>32 ) | (cluster.cpuspernode<1)), 119 md = checkmessage(md,'cpuspernode should be between 1 and 32 for ''san'' processors in hyperthreading mode'); 120 end 121 else 122 if ((cluster.cpuspernode>16 ) | (cluster.cpuspernode<1)), 123 md = checkmessage(md,'cpuspernode should be between 1 and 16 for ''san'' processors'); 124 end 125 end 126 127 else 128 md = checkmessage(md,'unknown processor type, should be ''wes'' or ''has'' or ''ivy'' or ''san'''); 129 end 130 131 %Miscellaneous 132 if isempty(cluster.login), md = checkmessage(md,'login empty'); end 133 if isempty(cluster.srcpath), md = checkmessage(md,'srcpath empty'); end 134 if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end 135 if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end 136 if isempty(cluster.grouplist), md = checkmessage(md,'grouplist empty'); end 137 138 end 139 %}}} 140 function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{ 141 142 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end 143 144 executable='issm.exe'; 145 if isdakota, 146 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3)); 147 if (version>=6), 148 executable='issm_dakota.exe'; 149 end 150 end 151 if isoceancoupling, 152 executable='issm_ocean.exe'; 153 end 154 155 %write queuing script 156 fid=fopen([modelname '.queue'],'w'); 157 fprintf(fid,'#PBS -S /bin/bash\n'); 158 % fprintf(fid,'#PBS -N %s\n',modelname); 159 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 160 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 161 fprintf(fid,'#PBS -q %s \n',cluster.queue); 162 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 163 fprintf(fid,'#PBS -m e\n'); 164 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); 165 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); 166 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 167 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 168 fprintf(fid,'export PATH="$PATH:."\n\n'); 169 fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n'); 170 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 171 fprintf(fid,'export ISSM_DIR="%s"\n',cluster.srcpath); %FIXME 172 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 173 fprintf(fid,'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n'); 174 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); 175 if ~isvalgrind, 176 fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); 177 else 178 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); 179 end 180 if ~io_gather, %concatenate the output files: 181 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 182 end 183 fclose(fid); 184 185 %in interactive mode, create a run file, and errlog and outlog file 186 if cluster.interactive, 187 fid=fopen([modelname '.run'],'w'); 188 if cluster.interactive==10, 189 fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n'); 190 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname); 191 else 192 if ~isvalgrind, 193 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); 194 else 195 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); 196 end 197 end 198 if ~io_gather, %concatenate the output files: 199 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 200 end 201 fclose(fid); 202 fid=fopen([modelname '.errlog'],'w'); 203 fclose(fid); 204 fid=fopen([modelname '.outlog'],'w'); 205 fclose(fid); 206 end 207 end %}}} 208 function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{ 209 210 %some checks: 211 if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end 212 213 %what is the executable being called? 214 executable='issm_slr.exe'; 215 216 if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end; 217 218 %write queuing script 219 fid=fopen([modelname '.queue'],'w'); 220 221 fprintf(fid,'#PBS -S /bin/bash\n'); 222 fprintf(fid,'#PBS -N %s\n',modelname); 223 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 224 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 225 fprintf(fid,'#PBS -q %s \n',cluster.queue); 226 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 227 fprintf(fid,'#PBS -m e\n'); 228 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); 229 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); 230 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 231 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 232 fprintf(fid,'export PATH="$PATH:."\n\n'); 233 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 234 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME 235 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 236 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); 237 238 %number of cpus: 239 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); 240 241 %executable: 242 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; 243 244 %solution name: 245 mpistring=[mpistring sprintf('%s ',solution)]; 246 247 %execution directory and model name: 248 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; 249 250 %inform main executable of how many icecaps, glaciers and earth models are being run: 251 mpistring=[mpistring sprintf(' %i ',length(dirnames))]; 252 253 %icecaps, glaciers and earth location, names and number of processors associated: 254 for i=1:length(dirnames), 255 mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})]; 256 end 257 258 %write this long string to disk: 259 fprintf(fid,mpistring); 260 fclose(fid); 261 262 if cluster.interactive, 263 fid=fopen([modelname '.run'],'w'); 264 265 %number of cpus: 266 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); 267 268 %executable: 269 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; 270 271 %solution name: 272 mpistring=[mpistring sprintf('%s ',solution)]; 273 274 %execution directory and model name: 275 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; 276 277 %inform main executable of how many icecaps, glaciers and earth models are being run: 278 mpistring=[mpistring sprintf(' %i ',length(dirnames))]; 279 280 %icecaps, glaciers and earth location, names and number of processors associated: 281 for i=1:length(dirnames), 282 mpistring=[mpistring sprintf(' %s/Interactive%i %s %i ',cluster.executionpath,cluster.interactive,modelnames{i},nps{i})]; 283 end 284 285 %write this long string to disk: 286 fprintf(fid,mpistring); 287 fclose(fid); 288 289 fid=fopen([modelname '.errlog'],'w'); 290 fclose(fid); 291 fid=fopen([modelname '.outlog'],'w'); 292 fclose(fid); 293 end 294 end 295 %}}} 296 function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{ 297 298 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end 299 300 %write queuing script 301 fid=fopen([modelname '.queue'],'w'); 302 fprintf(fid,'#PBS -S /bin/bash\n'); 303 % fprintf(fid,'#PBS -N %s\n',modelname); 304 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 305 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 306 fprintf(fid,'#PBS -q %s \n',cluster.queue); 307 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 308 fprintf(fid,'#PBS -m e\n'); 309 fprintf(fid,'#PBS -o %s.outlog \n',modelname); 310 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); 311 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 312 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 313 fprintf(fid,'export PATH="$PATH:."\n'); 314 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME 315 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 316 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 317 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname); 318 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); %FIXME 319 if ~io_gather, %concatenate the output files: 320 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 321 end 322 fclose(fid); 323 324 %in interactive mode, create a run file, and errlog and outlog file 325 if cluster.interactive, 326 fid=fopen([modelname '.run'],'w'); 327 if ~isvalgrind, 328 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); 329 else 330 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); 331 end 332 if ~io_gather, %concatenate the output files: 333 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 334 end 335 fclose(fid); 336 fid=fopen([modelname '.errlog'],'w'); 337 fclose(fid); 338 fid=fopen([modelname '.outlog'],'w'); 339 fclose(fid); 340 end 341 end %}}} 342 function BuildOceanQueueScript(np,cluster,modelname) % {{{ 343 344 %write queuing script 345 fid=fopen([modelname '.queue'],'w'); 346 fprintf(fid,'#PBS -S /bin/bash\n'); 347 fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor); 348 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 349 fprintf(fid,'#PBS -q %s \n',cluster.queue); 350 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 351 fprintf(fid,'#PBS -m e\n'); 352 fprintf(fid,'#PBS -o %s.outlog \n',modelname); 353 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); 354 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 355 %for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end %FIXME: should use this! 356 fprintf(fid,'module load comp-intel/2016.2.181\n'); 357 fprintf(fid,'module load netcdf/4.4.1.1_mpt\n'); 358 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); 359 fprintf(fid,'export PATH="$PATH:."\n'); 360 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 361 fprintf(fid,['cd ' pwd() ' \n\n']); 362 fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np); 363 % if ~io_gather, %concatenate the output files: 364 % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 365 % end 366 fclose(fid); 367 368 %in interactive mode, create a run file, and errlog and outlog file 369 if cluster.interactive, 370 fid=fopen([modelname '.run'],'w'); 371 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); 372 fprintf(fid,['mpiexec -np %i ./mitgcmuv \n'],np); 373 fprintf(fid,['touch ' modelname '.lock %s\n']); 374 fclose(fid); 375 fid=fopen([modelname '.errlog'],'w'); 376 fclose(fid); 377 fid=fopen([modelname '.outlog'],'w'); 378 fclose(fid); 379 end 380 381 end %}}} 382 function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{ 383 384 %compress the files into one zip. 385 compressstring=['tar -zcf ' dirname '.tar.gz ']; 386 for i=1:numel(filelist), 387 compressstring = [compressstring ' ' filelist{i}]; 388 end 389 if cluster.interactive, 390 compressstring = [compressstring ' ' modelname '.run ' modelname '.errlog ' modelname '.outlog ']; 391 end 392 system(compressstring); 393 394 disp('uploading input file and queueing script'); 395 if cluster.interactive==10, 396 directory=[pwd() '/run/']; 397 elseif cluster.interactive, 398 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive)]; 399 else 400 directory=cluster.executionpath; 401 end 402 403 if ~cluster.bbftp, 404 issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']}); 405 else 406 issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']}); 407 end 408 409 end 410 %}}} 411 function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{ 412 413 %lauch command, to be executed via ssh 414 if ~cluster.interactive, 415 if ~isempty(restart) 416 launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue ']; 417 else 418 launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ... 419 ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && qsub ' modelname '.queue ']; 420 end 421 else 422 if ~isempty(restart) 423 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive)]; 424 else 425 if cluster.interactive==10, 426 launchcommand=['cd ' pwd() '/run && tar -zxf ' dirname '.tar.gz']; 427 else 428 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive) ' && tar -zxf ' dirname '.tar.gz']; 429 end 430 end 431 end 432 433 disp('launching solution sequence on remote cluster'); 434 issmssh(cluster.name,cluster.login,cluster.port,launchcommand); 435 end 436 %}}} 437 function Download(cluster,dirname,filelist)% {{{ 438 439 %copy files from cluster to current directory 440 if cluster.interactive==10, 441 directory=[pwd() '/run/']; 442 elseif ~cluster.interactive, 443 directory=[cluster.executionpath '/' dirname '/']; 444 else 445 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive) '/']; 446 end 447 448 if ~cluster.bbftp, 449 issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist); 450 else 451 issmbbftpin(cluster.name, cluster.login, cluster.port, cluster.numstreams, directory, filelist); 452 end 453 454 end %}}} 455 455 end 456 456 end
Note:
See TracChangeset
for help on using the changeset viewer.