Ice Sheet System Model  4.18
Code documentation
IssmMpiSparseMat.h
Go to the documentation of this file.
1 
8 #ifndef _ISSM_MPI_SPARSE_MAT_H_
9 #define _ISSM_MPI_SPARSE_MAT_H_
10 
11 /*Headers:*/
12 /*{{{*/
13 #ifdef HAVE_CONFIG_H
14  #include <config.h>
15 #else
16 #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
17 #endif
18 
19 #include "../../datastructures/datastructures.h"
20 #include "../../shared/shared.h"
21 #include "../mumps/mumpsincludes.h"
22 #include "./Bucket.h"
23 #include "./IssmMpiVec.h"
24 #include "./SparseRow.h"
25 #include <math.h>
26 /*}}}*/
27 
28 /*We need to template this class, in case we want to create Matrices that hold
29  IssmDouble* matrix or IssmPDouble* matrix.
30  Such matrices would be useful for use without or with the matlab or python
31  interface (which do not care for IssmDouble types, but only rely on
32  IssmPDouble types)*/
33 
34 template <class doubletype> class IssmAbsMat;
35 
36 template <class doubletype>
37 class IssmMpiSparseMat:public IssmAbsMat<doubletype>{
38 
39  public:
40 
41  int M,N; //global size
42  int m; //local number of rows
43  SparseRow<doubletype>** matrix; /*here, doubletype is either IssmDouble or IssmPDouble*/
44  DataSet* buckets; /*here, we store buckets of values that we will Assemble into a global matrix.*/
45  /*IssmMpiSparseMat constructors, destructors*/
46  IssmMpiSparseMat(){/*{{{*/
47  this->M=0;
48  this->N=0;
49  this->m=0;
50  this->matrix=NULL;
51  this->buckets=new DataSet();
52  }
53  /*}}}*/
54  IssmMpiSparseMat(int Min,int Nin){/*{{{*/
55  this->Init(Min,Nin);
56  }
57  /*}}}*/
58  IssmMpiSparseMat(int pM,int pN, doubletype sparsity){/*{{{*/
59  /*no sparsity involved here, the sparsity pattern is resolve during the assemble phase: */
60  this->Init(pM,pN);
61  }
62  /*}}}*/
63  IssmMpiSparseMat(int min,int nin,int Min,int Nin,int* d_nnz,int* o_nnz){/*{{{*/
64 
65  int i;
66 
67  /*no sparsity involved here, the sparsity pattern is resolved at the assemble phase: */
68  this->buckets=new DataSet();
69 
70  this->M=Min;
71  this->N=Nin;
72  this->m=min;
73 
74  /*Initialize pointer: */
75  this->matrix=NULL;
76 
77  /*Allocate: */
78  if (m*N){
79  this->matrix=xNew<SparseRow<doubletype>*>(m);
80  for(i=0;i<m;i++){
81  this->matrix[i]=new SparseRow<doubletype>(N);
82  }
83  }
84  }
85  /*}}}*/
86  IssmMpiSparseMat(int pM,int pN, int connectivity,int numberofdofspernode){/*{{{*/
87  /*this is not needed, sparsity pattern is resolved at assemble phase: */
88  this->Init(pM,pN);
89  }
90  /*}}}*/
91  void Init(int Min,int Nin){/*{{{*/
92 
93  this->buckets=new DataSet();
94 
95  this->M=Min;
96  this->N=Nin;
97 
98  /*Figure out local number of rows: */
99  this->m=DetermineLocalSize(this->M,IssmComm::GetComm());
100 
101  /*Initialize pointer: */
102  this->matrix=NULL;
103 
104  /*Allocate: */
105  if (m*N){
106  this->matrix=xNew<SparseRow<doubletype>*>(m);
107  for(int i=0;i<m;i++){
108  this->matrix[i]=new SparseRow<doubletype>(N);
109  }
110  }
111  }
112  /*}}}*/
114  if(m*N){
115  for(int i=0;i<m;i++){
116  delete this->matrix[i];
117  }
118  xDelete<SparseRow<doubletype>*>(this->matrix);
119  }
120  this->M=0;
121  this->N=0;
122  this->m=0;
123  delete this->buckets;
124  }
125  /*}}}*/
126 
127  /*IssmMpiSparseMat specific routines */
128  void Echo(void){/*{{{*/
129 
130  /*Do a synchronized dump across all the rows: */
131  int my_rank=IssmComm::GetRank();
132  for(int i=0;i<IssmComm::GetSize();i++){
133  if(my_rank==i){
134  _printf_("cpu " << i << " #rows: " << this->m << "\n");
135  for(int j=0;j<this->m;j++){
136  _printf_("row " << j << ":");
137  this->matrix[j]->Echo();
138  _printf_("\n");
139  }
140  }
142  }
143 
144  }
145  /*}}}*/
146  void Assemble(){/*{{{*/
147 
148  int *RowRank = NULL;
149  int *row_indices_forcpu = NULL;
150  int *col_indices_forcpu = NULL;
151  int *modes_forcpu = NULL;
152  doubletype *values_forcpu = NULL;
153  int *numvalues_forcpu = NULL;
154  DataSet **bucketsforcpu = NULL;
155  int **row_indices_fromcpu = NULL;
156  int **col_indices_fromcpu = NULL;
157  int **modes_fromcpu = NULL;
158  doubletype **values_fromcpu = NULL;
159  int *numvalues_fromcpu = NULL;
160  int lower_row;
161  int upper_row;
162  int *sendcnts = NULL;
163  int *displs = NULL;
164  int this_row_numvalues;
165  int *this_row_cols = NULL;
166  int *this_row_mods = NULL;
167  int *numvalues_perrow = NULL;
168 
169  doubletype **values_perrow = NULL;
170  int **cols_perrow = NULL;
171  int **mods_perrow = NULL;
172  int *counters_perrow = NULL;
173 
174  /*Early exit: */
175  if(this->M*this->N==0) return;
176 
177  /*some communicator info: */
178  int num_procs=IssmComm::GetSize();
180 
181  /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */
182  RowRank=DetermineRowRankFromLocalSize(M,m,comm);
183 
184  /*Now, sort out our dataset of buckets according to cpu ownership of rows*/
185  bucketsforcpu=xNew<DataSet*>(num_procs);
186  for(int i=0;i<num_procs;i++){
187  DataSet* bucketsofcpu_i=new DataSet();
188  for(int j=0;j<buckets->Size();j++){
190  bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank);
191  }
192  bucketsforcpu[i]=bucketsofcpu_i;
193  }
194 
195  /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this
196  * dataset owns correspond to rows that are owned by cpu i, not j!. Out of all the buckets we own, make row,col,value,insert_mode
197  * vectors that will be shipped around the cluster: */
198  this->BucketsBuildScatterBuffers(&numvalues_forcpu,&row_indices_forcpu,&col_indices_forcpu,&values_forcpu,&modes_forcpu,bucketsforcpu,num_procs);
199 
200  /*Now, we need to allocate on each cpu arrays to receive data from all the other cpus. To know what we need to allocate, we need
201  *some scatter calls: */
202  numvalues_fromcpu = xNew<int>(num_procs);
203  for(int i=0;i<num_procs;i++) ISSM_MPI_Scatter(numvalues_forcpu,1,ISSM_MPI_INT,numvalues_fromcpu+i,1,ISSM_MPI_INT,i,comm);
204 
205  row_indices_fromcpu=xNew<int*>(num_procs);
206  col_indices_fromcpu=xNew<int*>(num_procs);
207  values_fromcpu=xNew<doubletype*>(num_procs);
208  modes_fromcpu=xNew<int*>(num_procs);
209  for(int i=0;i<num_procs;i++){
210  int size=numvalues_fromcpu[i];
211  if(size){
212  row_indices_fromcpu[i]=xNew<int>(size);
213  col_indices_fromcpu[i]=xNew<int>(size);
214 // AD performance is sensitive to calls to ensurecontiguous.
215 // Providing "t" will cause ensurecontiguous to be called.
216 #ifdef _HAVE_AD_
217  values_fromcpu[i]=xNew<doubletype>(size,"t");
218 #else
219  values_fromcpu[i]=xNew<doubletype>(size);
220 #endif
221  modes_fromcpu[i]=xNew<int>(size);
222  }
223  else{
224  row_indices_fromcpu[i] = NULL;
225  col_indices_fromcpu[i] = NULL;
226  values_fromcpu[i] = NULL;
227  modes_fromcpu[i] = NULL;
228  }
229  }
230 
231  /*Scatter values around*/
232  /*Now, to scatter values across the cluster, we need sendcnts and displs. Our sendbufs have been built by BucketsBuildScatterBuffers, with a stride given
233  * by numvalues_forcpu. Get this ready to go before starting the scatter itslef. For reference, here is the ISSM_MPI_Scatterv prototype:
234  * int ISSM_MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm) :*/
235  sendcnts=xNew<int>(num_procs);
236  displs=xNew<int>(num_procs);
237  int count=0;
238  for(int i=0;i<num_procs;i++){
239  sendcnts[i]=numvalues_forcpu[i];
240  displs[i]=count;
241  count+=numvalues_forcpu[i];
242  }
243 
244  for(int i=0;i<num_procs;i++){
245  ISSM_MPI_Scatterv( row_indices_forcpu, sendcnts, displs, ISSM_MPI_INT, row_indices_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm);
246  ISSM_MPI_Scatterv( col_indices_forcpu, sendcnts, displs, ISSM_MPI_INT, col_indices_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm);
247  ISSM_MPI_Scatterv( values_forcpu, sendcnts, displs, ISSM_MPI_DOUBLE, values_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_DOUBLE, i, comm);
248  ISSM_MPI_Scatterv( modes_forcpu, sendcnts, displs, ISSM_MPI_INT, modes_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm);
249  }
250 
251  /*Plug values into global matrix. To do so, we are going to first figure out how many overall values each sparse row is going to get, then we fill up these values, and give it to each sparse row*/
252  GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm);
253 
254  /*Figure out how many values each row is going to get: */
255  numvalues_perrow=xNewZeroInit<int>(this->m);
256  for(int i=0;i<num_procs;i++){
257  int numvalues=numvalues_fromcpu[i];
258  int* rows=row_indices_fromcpu[i];
259  for(int j=0;j<numvalues;j++)numvalues_perrow[rows[j]-lower_row]++;
260  }
261 
262  /*Allocate all the values, cols and mods from each cpu: */
263  values_perrow=xNew<doubletype*>(this->m);
264  cols_perrow=xNew<int*>(this->m);
265  mods_perrow=xNew<int*>(this->m);
266  counters_perrow=xNewZeroInit<int>(this->m);
267 
268  for(int i=0;i<this->m;i++){
269  values_perrow[i]=xNewZeroInit<doubletype>(numvalues_perrow[i]);
270  cols_perrow[i]=xNewZeroInit<int>(numvalues_perrow[i]);
271  mods_perrow[i]=xNewZeroInit<int>(numvalues_perrow[i]);
272  }
273 
274  /*collect:*/
275  for(int i=0;i<num_procs;i++){
276  int numvalues=numvalues_fromcpu[i];
277  int* rows=row_indices_fromcpu[i];
278  int* cols=col_indices_fromcpu[i];
279  doubletype* values=values_fromcpu[i];
280  int* mods=modes_fromcpu[i];
281  for(int j=0;j<numvalues;j++){
282  int row=rows[j]-lower_row;
283  int counter=counters_perrow[row];
284  values_perrow[row][counter]=values[j];
285  cols_perrow[row][counter]=cols[j];
286  mods_perrow[row][counter]=mods[j];
287  counter=counters_perrow[row]++;
288  }
289  }
290 
291  /*Plug into matrix: */
292  for(int i=0;i<this->m;i++) this->matrix[i]->SetValues(numvalues_perrow[i],cols_perrow[i],values_perrow[i],mods_perrow[i]);
293 
294  /*Free ressources*/
295  xDelete<int>(numvalues_perrow);
296  xDelete<int>(RowRank);
297  xDelete<int>(row_indices_forcpu);
298  xDelete<int>(col_indices_forcpu);
299  xDelete<int>(modes_forcpu);
300  xDelete<doubletype>(values_forcpu);
301  xDelete<int>(numvalues_forcpu);
302  for(int i=0;i<num_procs;i++){
303  DataSet* buckets=bucketsforcpu[i];
304  delete buckets;
305  }
306  xDelete<DataSet*>(bucketsforcpu);
307  for(int i=0;i<num_procs;i++){
308  int* rows=row_indices_fromcpu[i];
309  int* cols=col_indices_fromcpu[i];
310  int* modes=modes_fromcpu[i];
311  doubletype* values=values_fromcpu[i];
312  xDelete<int>(rows);
313  xDelete<int>(cols);
314  xDelete<int>(modes);
315  xDelete<doubletype>(values);
316  }
317  xDelete<int*>(row_indices_fromcpu);
318  xDelete<int*>(col_indices_fromcpu);
319  xDelete<int*>(modes_fromcpu);
320  xDelete<doubletype*>(values_fromcpu);
321  xDelete<int>(numvalues_fromcpu);
322  xDelete<int>(sendcnts);
323  xDelete<int>(displs);
324  for(int i=0;i<this->m;i++){
325  doubletype* values=values_perrow[i]; xDelete<doubletype>(values);
326  int* cols=cols_perrow[i]; xDelete<int>(cols);
327  int* mods=mods_perrow[i]; xDelete<int>(mods);
328  }
329  xDelete<int>(counters_perrow);
330  xDelete<doubletype*>(values_perrow);
331  xDelete<int*>(cols_perrow);
332  xDelete<int*>(mods_perrow);
333  }/*}}}*/
334  doubletype Norm(NormMode mode){/*{{{*/
335 
336  doubletype norm,local_norm;
337  doubletype absolute;
338  int i;
339 
340  switch(mode){
341  case NORM_INF:
342  local_norm=0;
343  for(i=0;i<this->m;i++){
344  local_norm=max(local_norm,this->matrix[i]->Norm(mode));
345  }
346  ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_MAX, 0, IssmComm::GetComm());
348  return norm;
349  break;
350  case NORM_FROB:
351  local_norm=0;
352  for(i=0;i<this->m;i++){
353  local_norm+=this->matrix[i]->Norm(mode);
354  }
355  ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_SUM, 0, IssmComm::GetComm());
357  return sqrt(norm);
358  break;
359 
360  default:
361  _error_("unknown norm !");
362  break;
363  }
364  }
365  /*}}}*/
366  void GetSize(int* pM,int* pN){/*{{{*/
367  *pM=M;
368  *pN=N;
369  }
370  /*}}}*/
371  void GetLocalSize(int* pM,int* pN){/*{{{*/
372  *pM=m;
373  *pN=N;
374  }
375  /*}}}*/
377 
378  /*A check on the types: */
379  if(IssmVecTypeFromToolkitOptions()!=MpiEnum)_error_("MatMult operation only possible with 'mpi' vectors");
380 
381  /*Now that we are sure, cast vectors: */
384 
385  /*Serialize input Xin: */
386  doubletype* X_serial=X->ToMPISerial();
387 
388  /*Every cpu has a serial version of the input vector. Use it to do the Matrix-Vector multiply
389  *locally and plug it into AXin: */
390  for(int i=0;i<this->m;i++){
391  AX->vector[i]=this->matrix[i]->Mult(X_serial);
392  }
393 
394  /*Free ressources: */
395  xDelete<doubletype>(X_serial);
396  }
397  /*}}}*/
399 
400  _error_("not supported yet!");
401 
402  }
403  /*}}}*/
404  doubletype* ToSerial(void){/*{{{*/
405  _error_("not supported yet!");
406  }
407  /*}}}*/
408  void SetValues(int min,int* idxm,int nin,int* idxn,doubletype* values,InsMode mode){/*{{{*/
409 
410  /*we need to store all the values we collect here in order to Assemble later.
411  * Indeed, the values we are collecting here most of the time will not belong
412  * to us, but to another part of the matrix on another cpu: */
413  _assert_(buckets);
414 
415  buckets->AddObject(new Bucket<doubletype>(min,idxm,nin,idxn,values,mode));
416 
417  }
418  /*}}}*/
419  void SetZero(void){/*{{{*/
420 
421  /*Reset buckets*/
422  delete this->buckets;
423  this->buckets=new DataSet();
424 
425  /*reset matrix*/
426  if(m*N){
427  for(int i=0;i<m;i++) delete this->matrix[i];
428  xDelete<SparseRow<doubletype>*>(this->matrix);
429 
430  this->matrix=xNew<SparseRow<doubletype>*>(m);
431  for(int i=0;i<m;i++) this->matrix[i]=new SparseRow<doubletype>(N);
432  }
433 
434  /*Reallocate matrix*/
435  }/*}}}*/
436  void Convert(MatrixType type){/*{{{*/
437  _error_("not supported yet!");
438  }
439  /*}}}*/
440  void BucketsBuildScatterBuffers(int** pnumvalues_forcpu,int** prow_indices_forcpu,int** pcol_indices_forcpu,doubletype** pvalues_forcpu,int** pmodes_forcpu,DataSet** bucketsforcpu,int num_procs){/*{{{*/
441 
442  /*intermediary: */
443  int count;
444  int *temp_row_indices_forcpu = NULL;
445  int *temp_col_indices_forcpu = NULL;
446  doubletype *temp_values_forcpu = NULL;
447  int *temp_modes_forcpu = NULL;
448 
449  /*output: */
450  int *numvalues_forcpu = NULL;
451  int *row_indices_forcpu = NULL;
452  int *col_indices_forcpu = NULL;
453  doubletype *values_forcpu = NULL;
454  int *modes_forcpu = NULL;
455 
456  /*figure out size of buffers per cpu: */
457 
458  numvalues_forcpu=xNew<int>(num_procs);
459  for(int i=0;i<num_procs;i++){
460  DataSet *buckets = bucketsforcpu[i];
461  count=0;
462  for(int j=0;j<buckets->Size();j++){
464  count+=bucket->MarshallSize();
465  }
466  numvalues_forcpu[i]=count;
467  }
468 
469  /*now, figure out size of total buffers (for all cpus!): */
470  count=0;
471  for(int i=0;i<num_procs;i++) count+=numvalues_forcpu[i];
472  int total_size=count;
473 
474  /*Allocate buffers: */
475  row_indices_forcpu = xNew<int>(total_size);
476  col_indices_forcpu = xNew<int>(total_size);
477 // AD performance is sensitive to calls to ensurecontiguous.
478 // Providing "t" will cause ensurecontiguous to be called.
479 #ifdef _HAVE_AD_
480  values_forcpu = xNew<doubletype>(total_size,"t");
481 #else
482  values_forcpu = xNew<doubletype>(total_size);
483 #endif
484  modes_forcpu = xNew<int>(total_size);
485 
486  /*we are going to march through the buffers, and marshall data onto them, so in order to not
487  *lose track of where these buffers are located in memory, we are going to work using copies
488  of them: */
489  temp_row_indices_forcpu = row_indices_forcpu;
490  temp_col_indices_forcpu = col_indices_forcpu;
491  temp_values_forcpu = values_forcpu;
492  temp_modes_forcpu = modes_forcpu;
493 
494  /*Fill buffers: */
495  for(int i=0;i<num_procs;i++){
496  DataSet *buckets = bucketsforcpu[i];
497  for(int j=0;j<buckets->Size();j++){
499  bucket->Marshall(&temp_row_indices_forcpu,&temp_col_indices_forcpu,&temp_values_forcpu,&temp_modes_forcpu); //pass in the address of the buffers, so as to have the Marshall routine increment them.
500  }
501  }
502 
503  /*sanity check: */
504  if(temp_row_indices_forcpu!=row_indices_forcpu+total_size)_error_("problem with marshalling of buckets");
505  if(temp_col_indices_forcpu!=col_indices_forcpu+total_size)_error_("problem with marshalling of buckets");
506  if(temp_values_forcpu!=values_forcpu+total_size)_error_("problem with marshalling of buckets");
507  if(temp_modes_forcpu!=modes_forcpu+total_size)_error_("problem with marshalling of buckets");
508 
509  /*output buffers: */
510  *pnumvalues_forcpu = numvalues_forcpu;
511  *prow_indices_forcpu = row_indices_forcpu;
512  *pcol_indices_forcpu = col_indices_forcpu;
513  *pvalues_forcpu = values_forcpu;
514  *pmodes_forcpu = modes_forcpu;
515  }
516  /*}}}*/
517  #ifndef _HAVE_WRAPPERS_
518  /*Solve{{{*/
520 
521  /*output: */
522  IssmMpiVec<IssmDouble>* uf=NULL;
523  IssmMpiVec<IssmDouble>* pf=NULL;
524 
525  /*Assume we are getting an IssmMpiVec in input, downcast: */
526  pf=(IssmMpiVec<IssmDouble>*)pfin;
527 
529  case MumpsEnum: {
530  /*Initialize output: */
531  uf=pf->Duplicate();
532  #ifdef _HAVE_MUMPS_
533  MpiSparseMumpsSolve(/*output*/ uf->vector,uf->M,uf->m, /*stiffness matrix:*/ this->matrix,this->M,this->N,this->m, /*right hand side load vector: */ pf->vector,pf->M,pf->m,parameters);
534  #else
535  _error_("IssmMpiSparseMat solver requires MUMPS solver");
536  #endif
537  return (IssmAbsVec<IssmDouble>*)uf;
538  }
539  case GslEnum: {
540 
541  IssmDouble* x=NULL;
542  #ifdef _HAVE_GSL_
543 
544  _error_("not implemented yet!");
545  //SparseGslSolve(/*output*/ &x,/*stiffness matrix:*/ this->matrix,this->M,this->N, /*right hand side load vector: */ pf->vector,pf->M,parameters);
546 
547  uf=new IssmMpiVec<IssmDouble>(x,this->N); xDelete(x);
548 
549  return (IssmAbsVec<IssmDouble>*)uf;
550  #else
551  _error_("GSL support not compiled in!");
552  #endif
553  }
554  default:
555  _error_("solver type not supported yet!");
556  }
557 
558  }/*}}}*/
559  #endif
560 };
561 
562 #endif //#ifndef _ISSM_MPI_SPARSE_MAT_H_
DataSet::Size
int Size()
Definition: DataSet.cpp:399
_assert_
#define _assert_(ignore)
Definition: exceptions.h:37
IssmMpiVec::vector
doubletype * vector
Definition: IssmMpiVec.h:40
IssmDouble
double IssmDouble
Definition: types.h:37
DataSet::AddObject
int AddObject(Object *object)
Definition: DataSet.cpp:252
_printf_
#define _printf_(StreamArgs)
Definition: Print.h:22
IssmMpiSparseMat::N
int N
Definition: IssmMpiSparseMat.h:41
Parameters
Declaration of Parameters class.
Definition: Parameters.h:18
ISSM_MPI_SUM
#define ISSM_MPI_SUM
Definition: issmmpi.h:134
IssmComm::GetComm
static ISSM_MPI_Comm GetComm(void)
Definition: IssmComm.cpp:30
GetOwnershipBoundariesFromRange
void GetOwnershipBoundariesFromRange(int *plower_row, int *pupper_row, int range, ISSM_MPI_Comm comm)
Definition: GetOwnershipBoundariesFromRange.cpp:16
NORM_INF
@ NORM_INF
Definition: toolkitsenums.h:15
ISSM_MPI_MAX
#define ISSM_MPI_MAX
Definition: issmmpi.h:131
IssmMpiVec.h
implementation of parallel dense ISSM vector. Internally, the parallel dense vector is split in rows ...
IssmMpiSparseMat::Convert
void Convert(MatrixType type)
Definition: IssmMpiSparseMat.h:436
IssmMpiSparseMat::GetSize
void GetSize(int *pM, int *pN)
Definition: IssmMpiSparseMat.h:366
Bucket
Definition: Bucket.h:19
IssmMpiSparseMat::Solve
IssmAbsVec< IssmDouble > * Solve(IssmAbsVec< IssmDouble > *pfin, Parameters *parameters)
Definition: IssmMpiSparseMat.h:519
IssmMpiSparseMat::GetLocalSize
void GetLocalSize(int *pM, int *pN)
Definition: IssmMpiSparseMat.h:371
IssmAbsMat
Definition: IssmAbsMat.h:27
MpiEnum
@ MpiEnum
Definition: EnumDefinitions.h:1193
DetermineLocalSize
int DetermineLocalSize(int global_size, ISSM_MPI_Comm comm)
Definition: DetermineLocalSize.cpp:9
IssmSolverTypeFromToolkitOptions
int IssmSolverTypeFromToolkitOptions(void)
Definition: IssmToolkitUtils.cpp:86
Bucket::MarshallSize
int MarshallSize(void)
Definition: Bucket.h:233
IssmMpiVec::ToMPISerial
doubletype * ToMPISerial(void)
Definition: IssmMpiVec.h:413
ISSM_MPI_DOUBLE
#define ISSM_MPI_DOUBLE
Definition: issmmpi.h:125
IssmMpiSparseMat::IssmMpiSparseMat
IssmMpiSparseMat(int Min, int Nin)
Definition: IssmMpiSparseMat.h:54
IssmMpiSparseMat::SetValues
void SetValues(int min, int *idxm, int nin, int *idxn, doubletype *values, InsMode mode)
Definition: IssmMpiSparseMat.h:408
MatrixType
MatrixType
Definition: toolkitsenums.h:16
ISSM_MPI_INT
#define ISSM_MPI_INT
Definition: issmmpi.h:127
IssmMpiSparseMat::MatMult
void MatMult(IssmAbsVec< doubletype > *Xin, IssmAbsVec< doubletype > *AXin)
Definition: IssmMpiSparseMat.h:376
MumpsEnum
@ MumpsEnum
Definition: EnumDefinitions.h:1195
SparseRow::Norm
doubletype Norm(NormMode mode)
Definition: SparseRow.h:101
IssmMpiSparseMat::~IssmMpiSparseMat
~IssmMpiSparseMat()
Definition: IssmMpiSparseMat.h:113
IssmMpiSparseMat::m
int m
Definition: IssmMpiSparseMat.h:42
xDelete
void xDelete(T **&aT_pp)
Definition: MemOps.h:97
SparseRow::Mult
doubletype Mult(doubletype *X)
Definition: SparseRow.h:126
SparseRow
Definition: SparseRow.h:13
IssmMpiSparseMat::matrix
SparseRow< doubletype > ** matrix
Definition: IssmMpiSparseMat.h:43
Bucket.h
: header file for Bucket object
NORM_FROB
@ NORM_FROB
Definition: toolkitsenums.h:15
IssmComm::GetSize
static int GetSize(void)
Definition: IssmComm.cpp:46
SparseRow::Echo
void Echo()
Definition: SparseRow.h:45
IssmMpiSparseMat::Assemble
void Assemble()
Definition: IssmMpiSparseMat.h:146
IssmMpiSparseMat::Echo
void Echo(void)
Definition: IssmMpiSparseMat.h:128
IssmMpiSparseMat::IssmMpiSparseMat
IssmMpiSparseMat(int pM, int pN, int connectivity, int numberofdofspernode)
Definition: IssmMpiSparseMat.h:86
IssmMpiSparseMat::M
int M
Definition: IssmMpiSparseMat.h:41
ISSM_MPI_Bcast
int ISSM_MPI_Bcast(void *buffer, int count, ISSM_MPI_Datatype datatype, int root, ISSM_MPI_Comm comm)
Definition: issmmpi.cpp:162
IssmAbsVec
Definition: IssmAbsVec.h:24
Bucket::SpawnBucketsPerCpu
void SpawnBucketsPerCpu(DataSet *bucketsofcpu_i, int rank_i, int *rowranks)
Definition: Bucket.h:137
IssmMpiSparseMat::Init
void Init(int Min, int Nin)
Definition: IssmMpiSparseMat.h:91
NormMode
NormMode
Definition: toolkitsenums.h:15
IssmMpiSparseMat::IssmMpiSparseMat
IssmMpiSparseMat(int pM, int pN, doubletype sparsity)
Definition: IssmMpiSparseMat.h:58
IssmMpiSparseMat::Duplicate
IssmMpiSparseMat< doubletype > * Duplicate(void)
Definition: IssmMpiSparseMat.h:398
IssmComm::GetRank
static int GetRank(void)
Definition: IssmComm.cpp:34
Bucket::Marshall
void Marshall(char **pmarshalled_data, int *pmarshalled_data_size, int marshall_direction)
Definition: Bucket.h:131
_error_
#define _error_(StreamArgs)
Definition: exceptions.h:49
DataSet::GetObjectByOffset
Object * GetObjectByOffset(int offset)
Definition: DataSet.cpp:334
min
IssmDouble min(IssmDouble a, IssmDouble b)
Definition: extrema.cpp:14
IssmMpiSparseMat
Definition: IssmMpiSparseMat.h:37
IssmMpiSparseMat::SetZero
void SetZero(void)
Definition: IssmMpiSparseMat.h:419
ISSM_MPI_Comm
int ISSM_MPI_Comm
Definition: issmmpi.h:118
IssmMpiSparseMat::IssmMpiSparseMat
IssmMpiSparseMat()
Definition: IssmMpiSparseMat.h:46
IssmMpiVec::m
int m
Definition: IssmMpiVec.h:39
IssmMpiSparseMat::buckets
DataSet * buckets
Definition: IssmMpiSparseMat.h:44
ISSM_MPI_Barrier
int ISSM_MPI_Barrier(ISSM_MPI_Comm comm)
Definition: issmmpi.cpp:148
InsMode
InsMode
Definition: toolkitsenums.h:14
IssmVecTypeFromToolkitOptions
int IssmVecTypeFromToolkitOptions(void)
Definition: IssmToolkitUtils.cpp:56
IssmMpiVec
Definition: IssmMpiVec.h:34
IssmMpiSparseMat::ToSerial
doubletype * ToSerial(void)
Definition: IssmMpiSparseMat.h:404
ISSM_MPI_Reduce
int ISSM_MPI_Reduce(void *sendbuf, void *recvbuf, int count, ISSM_MPI_Datatype datatype, ISSM_MPI_Op op, int root, ISSM_MPI_Comm comm)
Definition: issmmpi.cpp:373
max
IssmDouble max(IssmDouble a, IssmDouble b)
Definition: extrema.cpp:24
DataSet
Declaration of DataSet class.
Definition: DataSet.h:14
IssmMpiSparseMat::IssmMpiSparseMat
IssmMpiSparseMat(int min, int nin, int Min, int Nin, int *d_nnz, int *o_nnz)
Definition: IssmMpiSparseMat.h:63
bamg::Min
T Min(const T &a, const T &b)
Definition: extrema.h:6
IssmMpiVec::M
int M
Definition: IssmMpiVec.h:38
IssmMpiSparseMat::Norm
doubletype Norm(NormMode mode)
Definition: IssmMpiSparseMat.h:334
DetermineRowRankFromLocalSize
int * DetermineRowRankFromLocalSize(int global_size, int localsize, ISSM_MPI_Comm comm)
Definition: DetermineRowRankFromLocalSize.cpp:12
IssmMpiSparseMat::BucketsBuildScatterBuffers
void BucketsBuildScatterBuffers(int **pnumvalues_forcpu, int **prow_indices_forcpu, int **pcol_indices_forcpu, doubletype **pvalues_forcpu, int **pmodes_forcpu, DataSet **bucketsforcpu, int num_procs)
Definition: IssmMpiSparseMat.h:440
GslEnum
@ GslEnum
Definition: EnumDefinitions.h:1094
IssmMpiVec::Duplicate
IssmMpiVec< doubletype > * Duplicate(void)
Definition: IssmMpiVec.h:374
ISSM_MPI_Scatter
int ISSM_MPI_Scatter(void *sendbuf, int sendcnt, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm)
Definition: issmmpi.cpp:407
ISSM_MPI_Scatterv
int ISSM_MPI_Scatterv(void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm)
Definition: issmmpi.cpp:444
SparseRow.h