OOFEM  2.4
OOFEM.org - Object Oriented Finite Element Solver
petscsparsemtrx.C
Go to the documentation of this file.
1 /*
2  *
3  * ##### ##### ###### ###### ### ###
4  * ## ## ## ## ## ## ## ### ##
5  * ## ## ## ## #### #### ## # ##
6  * ## ## ## ## ## ## ## ##
7  * ## ## ## ## ## ## ## ##
8  * ##### ##### ## ###### ## ##
9  *
10  *
11  * OOFEM : Object Oriented Finite Element Code
12  *
13  * Copyright (C) 1993 - 2013 Borek Patzak
14  *
15  *
16  *
17  * Czech Technical University, Faculty of Civil Engineering,
18  * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19  *
20  * This library is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU Lesser General Public
22  * License as published by the Free Software Foundation; either
23  * version 2.1 of the License, or (at your option) any later version.
24  *
25  * This program is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28  * Lesser General Public License for more details.
29  *
30  * You should have received a copy of the GNU Lesser General Public
31  * License along with this library; if not, write to the Free Software
32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33  */
34 
35 #include "petscsparsemtrx.h"
36 #include "engngm.h"
37 #include "activebc.h"
38 #include "element.h"
39 #include "dofmanager.h"
40 #include "sparsemtrxtype.h"
41 #include "classfactory.h"
42 
43 #include <vector>
44 #include <petscksp.h>
45 #include <petscvec.h>
46 #include <petscmat.h>
47 
48 namespace oofem {
49 REGISTER_SparseMtrx(PetscSparseMtrx, SMT_PetscMtrx);
50 
51 
53  mtrx(NULL), symmFlag(false), leqs(0), geqs(0), blocksize(1), di(0), kspInit(false), newValues(true), localIS(NULL), globalIS(NULL) { }
54 
56  mtrx(NULL), symmFlag(false), leqs(0), geqs(0), blocksize(1), di(0), kspInit(false), newValues(true), localIS(NULL), globalIS(NULL){ }
57 
58 
60 {
61  MatDestroy(& this->mtrx);
62  if ( this->kspInit ) {
63  KSPDestroy(& this->ksp);
64  }
65  if ( localIS ) {
66  ISDestroy(& localIS);
67  ISDestroy(& globalIS);
68  }
69 }
70 
71 
72 SparseMtrx *
74 {
76  MatDuplicate( this->mtrx, MAT_COPY_VALUES, & ( answer->mtrx ) );
77  answer->symmFlag = this->symmFlag;
78  answer->mType = this->mType;
79  answer->leqs = this->leqs;
80  answer->geqs = this->geqs;
81  answer->di = this->di;
82  answer->emodel = this->emodel;
83  answer->kspInit = false;
84  answer->newValues = this->newValues;
85 
86  return answer;
87 }
88 
89 void
91 {
92  if ( emodel->isParallel() ) {
93  answer.resize(x.giveSize());
94 
95  Vec globX;
96  Vec globY;
97 
98  /*
99  * scatter and gather x to global representation
100  */
101  this->createVecGlobal(& globX);
102  this->scatterL2G(x, globX);
103 
104  VecDuplicate(globX, & globY);
105 
106  MatMult(this->mtrx, globX, globY);
107 
108  this->scatterG2L(globY, answer);
109 
110  VecDestroy(& globX);
111  VecDestroy(& globY);
112  } else {
113  if ( this->giveNumberOfColumns() != x.giveSize() ) {
114  OOFEM_ERROR("Dimension mismatch");
115  }
116 
117  Vec globX, globY;
118  VecCreateSeqWithArray(PETSC_COMM_SELF, 1, x.giveSize(), x.givePointer(), & globX);
119  VecCreate(PETSC_COMM_SELF, & globY);
120  VecSetType(globY, VECSEQ);
121  VecSetSizes(globY, PETSC_DECIDE, this->nRows);
122 
123  MatMult(this->mtrx, globX, globY);
124  double *ptr;
125  VecGetArray(globY, & ptr);
126  answer.resize(this->nRows);
127  for ( int i = 0; i < this->nRows; i++ ) {
128  answer(i) = ptr [ i ];
129  }
130 
131  VecRestoreArray(globY, & ptr);
132  VecDestroy(& globX);
133  VecDestroy(& globY);
134  }
135 }
136 
137 void
139 {
140  if ( this->giveNumberOfRows() != x.giveSize() ) {
141  OOFEM_ERROR("Dimension mismatch");
142  }
143 
144  if ( emodel->isParallel() ) {
145  OOFEM_ERROR("Not implemented");
146  }
147 
148  Vec globX, globY;
149  VecCreateSeqWithArray(PETSC_COMM_SELF, 1, x.giveSize(), x.givePointer(), & globX);
150  VecCreate(PETSC_COMM_SELF, & globY);
151  VecSetType(globY, VECSEQ);
152  VecSetSizes(globY, PETSC_DECIDE, this->nColumns);
153 
154  MatMultTranspose(this->mtrx, globX, globY);
155  double *ptr;
156  VecGetArray(globY, & ptr);
157  answer.resize(this->nColumns);
158  for ( int i = 0; i < this->nColumns; i++ ) {
159  answer(i) = ptr [ i ];
160  }
161 
162  VecRestoreArray(globY, & ptr);
163  VecDestroy(& globX);
164  VecDestroy(& globY);
165 }
166 
167 
168 void
170 {
171  if ( this->giveNumberOfColumns() != B.giveNumberOfRows() ) {
172  OOFEM_ERROR("Dimension mismatch");
173  }
174 
175  if ( emodel->isParallel() ) {
176  OOFEM_ERROR("Not implemented");
177  }
178 
179  // I'm opting to work with a set of vectors, as i think it might be faster and more robust. / Mikael
180 
181  int nr = this->giveNumberOfRows();
182  int nc = B.giveNumberOfColumns();
183  answer.resize(nr, nc);
184 
185 #if 1
186  // Using several vectors are necessary because PETSc is missing most of it operations on symmetric matrices.
187  // If PETSc ever adds the missing "MatMatMult_***_***" implementations, we can use the second approach below
188  Vec globX, globY;
189  VecCreate(PETSC_COMM_SELF, & globY);
190  VecSetType(globY, VECSEQ);
191  VecSetSizes(globY, PETSC_DECIDE, nr);
192  int nrB = B.giveNumberOfRows();
193  FloatArray colVals(nrB);
194  double *resultsPtr;
195  for ( int k = 0; k < nc; ++k ) {
196  B.copyColumn(colVals, k + 1);
197  VecCreateSeqWithArray(PETSC_COMM_SELF, 1, nrB, colVals.givePointer(), & globX);
198  MatMult(this->mtrx, globX, globY);
199  VecGetArray(globY, & resultsPtr);
200  for ( int i = 0; i < nr; ++i ) {
201  answer(i, k) = resultsPtr [ i ];
202  }
203  VecRestoreArray(globY, & resultsPtr);
204  VecDestroy(& globX);
205  }
206  VecDestroy(& globY);
207 #else
208  double *aptr = answer.givePointer();
209  Mat globB, globC;
210  MatCreateSeqDense(PETSC_COMM_SELF, B.giveNumberOfRows(), B.giveNumberOfColumns(), B.givePointer(), & globB);
211  MatMatMult(this->mtrx, globB, MAT_INITIAL_MATRIX, PETSC_DEFAULT, & globC);
212  const double *vals;
213  for ( int r = 0; r < nr; r++ ) {
214  MatGetRow(globC, r, NULL, NULL, & vals);
215  for ( int i = 0, i2 = r; i < nc; i++, i2 += nr ) {
216  aptr [ i2 ] = vals [ i ];
217  }
218  MatRestoreRow(globC, r, NULL, NULL, & vals);
219  }
220 
221  MatDestroy(& globB);
222  MatDestroy(& globC);
223 #endif
224 }
225 
226 void
228 {
229  if ( this->giveNumberOfRows() != B.giveNumberOfRows() ) {
230  OOFEM_ERROR("Dimension mismatch");
231  }
232 
233  if ( emodel->isParallel() ) {
234  OOFEM_ERROR("Not implemented");
235  }
236 
237  int nr = this->giveNumberOfColumns();
238  int nc = B.giveNumberOfColumns();
239  answer.resize(nr, nc);
240  double *aptr = answer.givePointer();
241 
242  // For some reason SEQAIJ and SEQDENSE are incompatible with each other for MatMatMultTranspose (MatMatMult is OK). I don't know why.
243  // Either way, this is not to bad, except for an extra transposition.
244 
245  Mat globB, globC;
246  FloatMatrix BT;
247  BT.beTranspositionOf(B);
248  MatCreateSeqDense(PETSC_COMM_SELF, BT.giveNumberOfRows(), BT.giveNumberOfColumns(), BT.givePointer(), & globB);
249  MatMatMult(globB, this->mtrx, MAT_INITIAL_MATRIX, PETSC_DEFAULT, & globC);
250  const double *vals;
251  for ( int r = 0; r < nc; r++ ) {
252  MatGetRow(globC, r, NULL, NULL, & vals);
253  for ( int i = 0; i < nr; i++ ) {
254  * aptr++ = vals [ i ];
255  }
256  MatRestoreRow(globC, r, NULL, NULL, & vals);
257  }
258 
259  MatDestroy(& globB);
260  MatDestroy(& globC);
261 }
262 
263 void
265 {
266  MatScale(this->mtrx, x);
267 }
268 
269 
270 void
272 {
273  PetscSparseMtrx *M = dynamic_cast< PetscSparseMtrx * >( &m );
274  MatAXPY(*this->giveMtrx(), x, *M->giveMtrx(), SAME_NONZERO_PATTERN);
275 }
276 
277 
278 void
280 {
281  OOFEM_WARNING("Calling function that has not been tested (remove this message when it is verified)");
282  Vec globM;
283  if ( emodel->isParallel() ) {
284  this->createVecGlobal(& globM);
285  this->scatterL2G(m, globM);
286  } else {
287  VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m.giveSize(), m.givePointer(), & globM);
288  }
289  MatDiagonalSet(this->mtrx, globM, ADD_VALUES);
290  VecDestroy(& globM);
291 }
292 
293 int
294 PetscSparseMtrx :: buildInternalStructure(EngngModel *eModel, int n, int m, const IntArray &I, const IntArray &J)
295 {
296  if ( mtrx ) {
297  MatDestroy(& mtrx);
298  }
299 
300  if ( this->kspInit ) {
301  KSPDestroy(& ksp);
302  this->kspInit = false; // force ksp to be initialized
303  }
304 
305  this->emodel = eModel;
306  this->di = 0;
307 
308  if ( emodel->isParallel() ) {
309  OOFEM_ERROR("Parallel is not supported in manual matrix creation");
310  }
311 
312  nRows = geqs = leqs = n;
313  nColumns = m;
314  blocksize = 1;
315 
316  int total_nnz;
317  IntArray d_nnz(leqs), d_nnz_sym(leqs);
318  {
319  //determine nonzero structure of matrix
320  std :: vector< IntArray > rows_upper(nRows), rows_lower(nRows);
321 
322  for ( int ii : I ) {
323  if ( ii > 0 ) {
324  for ( int jj : J ) {
325  if ( jj > 0 ) {
326  if ( jj >= ii ) {
327  rows_upper [ ii - 1 ].insertSortedOnce(jj - 1);
328  } else {
329  rows_lower [ ii - 1 ].insertSortedOnce(jj - 1);
330  }
331  }
332  }
333  }
334  }
335 
336  total_nnz = 0;
337  for ( int i = 0; i < leqs; i++ ) {
338  d_nnz(i) = rows_upper [ i ].giveSize() + rows_lower [ i ].giveSize();
339  }
340  }
341 
342  // create PETSc mat
343  MatCreate(PETSC_COMM_SELF, & mtrx);
344  MatSetSizes(mtrx, nRows, nColumns, nRows, nColumns);
345  MatSetFromOptions(mtrx);
346 
347  if ( total_nnz / nColumns > nRows / 10 ) { // More than 10% nnz, then we just force the dense matrix.
348  MatSetType(mtrx, MATDENSE);
349  } else {
350  MatSetType(mtrx, MATSEQAIJ);
351  }
352 
353  //The incompatible preallocations are ignored automatically.
354  MatSetUp(mtrx);
355  MatSeqAIJSetPreallocation( mtrx, 0, d_nnz.givePointer() );
356 
357  MatSetOption(mtrx, MAT_ROW_ORIENTED, PETSC_FALSE); // To allow the insertion of values using MatSetValues in column major order
358  MatSetOption(mtrx, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
359 
360  this->newValues = true;
361  return true;
362 }
363 
365 int
367 {
368  IntArray loc;
369  Domain *domain = eModel->giveDomain(di);
370  blocksize = 1;
371 
372  if ( mtrx ) {
373  MatDestroy(& mtrx);
374  }
375 
376  if ( this->kspInit ) {
377  KSPDestroy(& ksp);
378  this->kspInit = false; // force ksp to be initialized
379  }
380 
381  this->emodel = eModel;
382  this->di = di;
383 
384  if ( emodel->isParallel() ) {
385  OOFEM_ERROR("Not implemented");
386  }
387 
388  nRows = eModel->giveNumberOfDomainEquations(di, r_s);
389  nColumns = eModel->giveNumberOfDomainEquations(di, c_s);
390 
391  geqs = leqs = nRows;
392 
393  int total_nnz;
394  IntArray d_nnz(leqs), d_nnz_sym(leqs);
395 
396  {
397  //determine nonzero structure of matrix
398  IntArray r_loc, c_loc;
399  std :: vector< IntArray > rows_upper(nRows), rows_lower(nRows);
400 
401  for ( auto &elem : domain->giveElements() ) {
402  elem->giveLocationArray(r_loc, r_s);
403  elem->giveLocationArray(c_loc, c_s);
404  for ( int ii : r_loc ) {
405  if ( ii > 0 ) {
406  for ( int jj : c_loc ) {
407  if ( jj > 0 ) {
408  if ( jj >= ii ) {
409  rows_upper [ ii - 1 ].insertSortedOnce(jj - 1, c_loc.giveSize() / 2);
410  } else {
411  rows_lower [ ii - 1 ].insertSortedOnce(jj - 1, c_loc.giveSize() / 2);
412  }
413  }
414  }
415  }
416  }
417  }
418  // Structure from active boundary conditions.
419  std :: vector< IntArray >r_locs, c_locs;
420  for ( auto &gbc : domain->giveBcs() ) {
421  ActiveBoundaryCondition *activebc = dynamic_cast< ActiveBoundaryCondition * >( gbc.get() );
422  if ( activebc ) {
423  activebc->giveLocationArrays(r_locs, c_locs, TangentStiffnessMatrix, r_s, c_s);
424  for ( std :: size_t k = 0; k < r_locs.size(); k++ ) {
425  IntArray &krloc = r_locs [ k ];
426  IntArray &kcloc = c_locs [ k ];
427  for ( int ii : krloc ) {
428  if ( ii > 0 ) {
429  for ( int jj : kcloc ) {
430  if ( jj > 0 ) {
431  if ( jj >= ii ) {
432  rows_upper [ ii - 1 ].insertSortedOnce(jj - 1, kcloc.giveSize() / 2);
433  } else {
434  rows_lower [ ii - 1 ].insertSortedOnce(jj - 1, kcloc.giveSize() / 2);
435  }
436  }
437  }
438  }
439  }
440  }
441  }
442  }
443 
444  total_nnz = 0;
445  for ( int i = 0; i < leqs; i++ ) {
446  d_nnz(i) = rows_upper [ i ].giveSize() + rows_lower [ i ].giveSize();
447  total_nnz += d_nnz(i);
448  }
449  }
450 
451  // create PETSc mat
452  MatCreate(PETSC_COMM_SELF, & mtrx);
453  MatSetSizes(mtrx, nRows, nColumns, nRows, nColumns);
454  MatSetFromOptions(mtrx);
455 
456  if ( total_nnz > (double)nRows * (double)nColumns / 10.0 ) { // More than 10% nnz, then we just force the dense matrix.
457  MatSetType(mtrx, MATDENSE);
458  } else {
459  MatSetType(mtrx, MATSEQAIJ);
460  }
461 
462  //The incompatible preallocations are ignored automatically.
463  MatSetUp(mtrx);
464  MatSeqAIJSetPreallocation( mtrx, 0, d_nnz.givePointer() );
465 
466  MatSetOption(mtrx, MAT_ROW_ORIENTED, PETSC_FALSE); // To allow the insertion of values using MatSetValues in column major order
467  MatSetOption(mtrx, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
468 
469  this->newValues = true;
470  return true;
471 }
472 
473 int
475 {
476  IntArray loc;
477  Domain *domain = eModel->giveDomain(di);
478  blocksize = 1;
479 
480  // Delete old stuff first;
481  if ( mtrx ) {
482  MatDestroy(& mtrx);
483  }
484 
485  if ( this->kspInit ) {
486  KSPDestroy(& ksp);
487  this->kspInit = false; // force ksp to be initialized
488  }
489 
490  if ( localIS ) {
491  ISDestroy(& localIS);
492  ISDestroy(& globalIS);
493  localIS = NULL;
494  globalIS = NULL;
495  }
496 
497  this->emodel = eModel;
498  this->di = di;
499 
500 
501 #ifdef __PARALLEL_MODE
502  if ( eModel->isParallel() ) {
505  ParallelContext *context = eModel->giveParallelContext(di);
506  n2g = context->giveN2Gmap();
507  n2l = context->giveN2Lmap();
508 
509  n2l->init(eModel, di, s);
510  n2g->init(eModel, di, s);
511 
512  #ifdef __VERBOSE_PARALLEL
513  VERBOSEPARALLEL_PRINT( "PetscSparseMtrx:: buildInternalStructure", "", eModel->giveRank() );
514  #endif
515 
516  leqs = n2g->giveNumberOfLocalEqs();
517  geqs = n2g->giveNumberOfGlobalEqs();
518 
519  //printf("%d, leqs = %d, geqs = %d\n", this->emodel->giveRank(), leqs, geqs);
520 
521  #ifdef __VERBOSE_PARALLEL
522  OOFEM_LOG_INFO( "[%d]PetscSparseMtrx:: buildInternalStructure: l_eqs = %d, g_eqs = %d, n_eqs = %d\n", eModel->giveRank(), leqs, geqs, eModel->giveNumberOfDomainEquations(di, s) );
523  #endif
524 
525  IntArray d_nnz(leqs), o_nnz(leqs), d_nnz_sym(leqs), o_nnz_sym(leqs);
526 
527  {
528  // determine nonzero structure of a "local (maintained)" part of matrix, and the off-diagonal part
529  // allocation map
531  std :: vector< IntArray > d_rows_upper(leqs), d_rows_lower(leqs); // diagonal sub-matrix allocation
532  std :: vector< IntArray > o_rows_upper(leqs), o_rows_lower(leqs); // off-diagonal allocation
533 
534  IntArray lloc, gloc;
535 
536  //fprintf (stderr,"[%d] n2l map: ",rank);
537  //for (n=1; n<=n2l.giveN2Lmap()->giveSize(); n++) fprintf (stderr, "%d ", n2l.giveN2Lmap()->at(n));
538 
539  for ( auto &elem : domain->giveElements() ) {
540  //fprintf (stderr, "(elem %d) ", n);
541  elem->giveLocationArray(loc, s);
542  n2l->map2New(lloc, loc, 0); // translate natural->local numbering (remark, 1-based indexing)
543  n2g->map2New(gloc, loc, 0); // translate natural->global numbering (remark, 0-based indexing)
544  // See the petsc manual for details on how this allocation is constructed.
545  int ii, jj;
546  for ( int i = 1; i <= lloc.giveSize(); i++ ) {
547  if ( ( ii = lloc.at(i) ) ) {
548  for ( int j = 1; j <= lloc.giveSize(); j++ ) {
549  if ( ( jj = gloc.at(j) ) >= 0 ) { // if negative, then it is prescribed
550  if ( lloc.at(j) ) { // if true, then its the local part (the diagonal block matrix)
551  if ( jj >= ( ii - 1 ) ) { // NOTE: ii (the rows) is in 1-based indexing, jj is in 0-base (!)
552  d_rows_upper [ ii - 1 ].insertSortedOnce(jj, loc.giveSize() / 2);
553  } else {
554  d_rows_lower [ ii - 1 ].insertSortedOnce(jj, loc.giveSize() / 2);
555  }
556  } else { // Otherwise it must be off-diagonal
557  if ( jj >= ( ii - 1 ) ) {
558  o_rows_upper [ ii - 1 ].insertSortedOnce(jj, loc.giveSize() / 2);
559  } else {
560  o_rows_lower [ ii - 1 ].insertSortedOnce(jj, loc.giveSize() / 2);
561  }
562  }
563  }
564  }
565  }
566  }
567  }
569 
570  // Diagonal must always be allocated; this code ensures that for every local line, it adds the global column number
571  IntArray *n2gmap = n2g->giveN2Gmap();
572  IntArray *n2lmap = n2l->giveN2Lmap();
573  for ( int n = 1; n <= n2lmap->giveSize(); ++n ) {
574  if ( n2lmap->at(n) ) {
575  d_rows_upper [ n2lmap->at(n) - 1 ].insertSortedOnce( n2gmap->at(n) );
576  }
577  }
578 
579  for ( int i = 0; i < leqs; i++ ) {
580  d_nnz(i) = d_rows_upper [ i ].giveSize() + d_rows_lower [ i ].giveSize();
581  o_nnz(i) = o_rows_upper [ i ].giveSize() + o_rows_lower [ i ].giveSize();
582 
583  d_nnz_sym(i) = d_rows_upper [ i ].giveSize();
584  o_nnz_sym(i) = o_rows_upper [ i ].giveSize();
585  }
586  }
587 
588  //fprintf (stderr,"\n[%d]PetscSparseMtrx: Profile ...",rank);
589  //for (i=0; i<leqs; i++) fprintf(stderr, "%d ", d_nnz(i));
590  //fprintf (stderr,"\n[%d]PetscSparseMtrx: Creating MPIAIJ Matrix ...\n",rank);
591 
592  // create PETSc mat
593  MatCreate(this->emodel->giveParallelComm(), & mtrx);
594  MatSetSizes(mtrx, leqs, leqs, geqs, geqs);
595  MatSetType(mtrx, MATMPIAIJ);
596  MatSetFromOptions(mtrx);
597  MatMPIAIJSetPreallocation( mtrx, 0, d_nnz.givePointer(), 0, o_nnz.givePointer() );
598  //MatMPIBAIJSetPreallocation( mtrx, 1, 0, d_nnz.givePointer(), 0, o_nnz.givePointer() );
599  //MatMPISBAIJSetPreallocation( mtrx, 1, 0, d_nnz_sym.givePointer(), 0, o_nnz_sym.givePointer() );
600  //MatXAIJSetPreallocation( mtrx, 1, d_nnz.givePointer(), o_nnz.givePointer(), d_nnz_sym.givePointer(), o_nnz_sym.givePointer());
601 
602  // Creates scatter context for PETSc.
603  ISCreateGeneral(this->emodel->giveParallelComm(), context->giveNumberOfNaturalEqs(), n2g->giveN2Gmap()->givePointer(), PETSC_USE_POINTER, & globalIS);
604  ISCreateStride(this->emodel->giveParallelComm(), context->giveNumberOfNaturalEqs(), 0, 1, & localIS);
605 
606  #ifdef __VERBOSE_PARALLEL
607  VERBOSEPARALLEL_PRINT("PetscSparseMtrx:: buildInternalStructure", "done", eModel->giveRank());
608  #endif
609  } else
610 #endif
611  {
612  leqs = geqs = eModel->giveNumberOfDomainEquations(di, s);
613  IntArray d_nnz, d_nnz_sym, indices, rowstart;
614 
615  MatType type = MATSEQBAIJ;
616 
617  MatCreate(PETSC_COMM_SELF, & mtrx);
618  MatSetSizes(mtrx, leqs, leqs, geqs, geqs);
619  MatSetType(mtrx, type);
620  MatSetFromOptions(mtrx);
621  MatGetType( mtrx, &type );
622 
623  if ( strcmp(type, MATDENSE) != 0 ) {
624  // Dry assembly (computes the nonzero structure) (and the blocks)
625  std :: vector< IntArray > rows_upper(leqs), blocks;
626 
627  for ( auto &elem : domain->giveElements() ) {
628  elem->giveLocationArray(loc, s);
629  for ( int ii : loc ) {
630  if ( ii > 0 ) {
631  for ( int jj : loc ) {
632  if ( jj >= ii ) {
633  rows_upper [ ii - 1 ].insertSortedOnce( jj - 1, loc.giveSize() );
634  }
635  }
636  }
637  }
638  }
639 
640  // Structure from active boundary conditions.
641  std :: vector< IntArray > r_locs, c_locs;
642  for ( auto &gbc : domain->giveBcs() ) {
643  ActiveBoundaryCondition *activebc = dynamic_cast< ActiveBoundaryCondition * >( gbc.get() );
644  if ( activebc ) {
645  activebc->giveLocationArrays(r_locs, c_locs, TangentStiffnessMatrix, s, s);
646  for ( std :: size_t k = 0; k < r_locs.size(); k++ ) {
647  for ( int ii : r_locs [ k ] ) {
648  if ( ii > 0 ) {
649  for ( int jj : c_locs [ k ] ) {
650  if ( jj >= ii ) {
651  rows_upper [ ii - 1 ].insertSortedOnce( jj - 1, loc.giveSize() );
652  }
653  }
654  }
655  }
656  }
657  }
658  }
659 
660 
661  int nnz = 0;
662  for ( auto &row_upper : rows_upper ) {
663  nnz += row_upper.giveSize();
664  }
665 
666  if ( strcmp(type, MATSEQAIJ) != 0 ) {
667  // Compute optimal block size (test various sizes and compute the "wasted" zeros).
668  int maxblock = domain->giveNumberOfDofManagers() > 0 ? domain->giveDofManager(1)->giveNumberOfDofs() : 0;
669  for ( int bsize = maxblock; bsize > 1; --bsize ) {
670  int nblocks = ceil(rows_upper.size() / (double)bsize);
671  blocks.clear();
672  blocks.resize(nblocks);
673  for ( int i = 0; i < leqs; i++ ) {
674  int blockrow = i / bsize;
675  for ( int j : rows_upper [ i ] ) {
676  int blockcol = j / bsize;
677  blocks [ blockrow ].insertSortedOnce( blockcol );
678  }
679  }
680  // See how much space (and operations) we risk wasting with this block size:
681  int bnnz = 0;
682  for ( auto &block : blocks ) {
683  bnnz += block.giveSize() * bsize * bsize;
684  }
685  OOFEM_LOG_DEBUG("Block size %d resulted in nnz = %d -> %d using %d^2 blocks\n", bsize, nnz, bnnz, nblocks);
686  if ( bnnz <= nnz * 1.2 ) {
687  blocksize = bsize;
688  break;
689  }
690  }
691  }
692 
693  if ( blocksize == 1 ) {
694  blocks = rows_upper;
695  }
696 
697  int nblocks = ceil(rows_upper.size() / (double)blocksize);
698  d_nnz_sym.resize(nblocks);
699  d_nnz.resize(nblocks);
700  for ( int i = 0; i < nblocks; i++ ) {
701  d_nnz_sym[i] = blocks [ i ].giveSize();
702  // We can optimize to use only upper half by using the fact that the problem has symmetric nonzero-structure.
703  d_nnz[i] += d_nnz_sym[i];
704  for ( int jj : blocks [ i ] ) {
705  if ( jj != i ) {
706  d_nnz[ jj ]++;
707  }
708  }
709  }
710  }
711 
712  // Based on the user selected type, determine the block size;
713  if ( strcmp(type, MATSEQAIJ) == 0 ) {
714  MatSeqAIJSetPreallocation( mtrx, 0, d_nnz.givePointer() );
715  } else if ( strcmp(type, MATSEQBAIJ) == 0 ) {
716  MatSeqBAIJSetPreallocation( mtrx, blocksize, 0, d_nnz.givePointer() );
717  } else if ( strcmp(type, MATSEQSBAIJ) == 0 ) {
718  MatSeqSBAIJSetPreallocation( mtrx, blocksize, 0, d_nnz_sym.givePointer() );
719  }
720  }
721 
722  MatSetOption(mtrx, MAT_ROW_ORIENTED, PETSC_FALSE); // To allow the insertion of values using MatSetValues in column major order
723  MatSetOption(mtrx, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
724  MatSetOption(mtrx, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
725 
726  nRows = nColumns = geqs;
727  this->newValues = true;
728  return true;
729 }
730 
731 int
733 {
734  int ndofe = mat.giveNumberOfRows();
735  IntArray gloc(ndofe);
736 
737 #ifdef __PARALLEL_MODE
738  if ( emodel->isParallel() ) {
739  // translate local code numbers to global ones
740  emodel->giveParallelContext(this->di)->giveN2Gmap()->map2New(gloc, loc, 0);
741 
742  //fprintf (stderr, "[?] gloc=");
743  //for (int i=1; i<=ndofe; i++) fprintf (stderr, "%d ", gloc.at(i));
744  } else
745 #endif
746  {
747  for ( int i = 1; i <= ndofe; i++ ) {
748  gloc.at(i) = loc.at(i) - 1;
749  }
750  }
751 
752  this->version++;
753  this->newValues = true;
754 
755  // Block'ify the matrix in order to use MatSetValuesBlocked (doing this work really is faster)
756  if ( this->blocksize > 1 && ndofe % this->blocksize == 0 ) {
757  int nblocke = ndofe / this->blocksize;
758  IntArray bloc(nblocke);
759  for ( int b = 0; b < nblocke; ++b ) {
760  int i = b * blocksize;
761  // Have to check alignment inside the block:
762  if ( gloc[i] != -1 && gloc[i] % this->blocksize != 0 ) {
763  MatSetValues(this->mtrx, ndofe, gloc.givePointer(), ndofe, gloc.givePointer(), mat.givePointer(), ADD_VALUES);
764  return 1;
765  }
766  // Have to verify that this is indeed a series of increments in blocks:
767  for ( int k = 1; k < this->blocksize; ++k ) {
768  bool ok_seq = gloc[i] + k == gloc[i+k];
769  bool ok_bc = gloc[i] == -1 && gloc[i+k] == -1;
770  if ( !(ok_bc || ok_seq) ) {
771  // Then we can't use block assembling for this element.
772  MatSetValues(this->mtrx, ndofe, gloc.givePointer(), ndofe, gloc.givePointer(), mat.givePointer(), ADD_VALUES);
773  return 1;
774  }
775  }
776  bloc[b] = gloc[i] == -1 ? -1 : gloc[i] / blocksize;
777  }
778  MatSetValuesBlocked(this->mtrx, nblocke, bloc.givePointer(), nblocke, bloc.givePointer(), mat.givePointer(), ADD_VALUES);
779  } else {
780  MatSetValues(this->mtrx, ndofe, gloc.givePointer(), ndofe, gloc.givePointer(), mat.givePointer(), ADD_VALUES);
781  }
782 
783  //MatView(this->mtrx,PETSC_VIEWER_STDOUT_SELF);
784  return 1;
785 }
786 
787 int
788 PetscSparseMtrx :: assemble(const IntArray &rloc, const IntArray &cloc, const FloatMatrix &mat)
789 {
790 #ifdef __PARALLEL_MODE
791  if ( emodel->isParallel() ) {
792  // translate eq numbers
793  IntArray grloc( rloc.giveSize() ), gcloc( cloc.giveSize() );
794  emodel->giveParallelContext(this->di)->giveN2Gmap()->map2New(grloc, rloc, 0);
795  emodel->giveParallelContext(this->di)->giveN2Gmap()->map2New(gcloc, cloc, 0);
796 
797  MatSetValues(this->mtrx, grloc.giveSize(), grloc.givePointer(),
798  gcloc.giveSize(), gcloc.givePointer(), mat.givePointer(), ADD_VALUES);
799  } else {
800 #endif
801  int rsize = rloc.giveSize(), csize = cloc.giveSize();
802  IntArray grloc(rsize), gcloc(csize);
803  for ( int i = 1; i <= rsize; i++ ) {
804  grloc.at(i) = rloc.at(i) - 1;
805  }
806 
807  for ( int i = 1; i <= csize; i++ ) {
808  gcloc.at(i) = cloc.at(i) - 1;
809  }
810 
811  MatSetValues(this->mtrx, rsize, grloc.givePointer(),
812  csize, gcloc.givePointer(), mat.givePointer(), ADD_VALUES);
813 
814 #ifdef __PARALLEL_MODE
815 }
816 #endif
817  // increment version
818  this->version++;
819  this->newValues = true;
820  return 1;
821 }
822 
823 int
825 {
826  return MatAssemblyBegin(this->mtrx, MAT_FINAL_ASSEMBLY);
827 }
828 
829 int
831 {
832  this->newValues = true;
833  int val = MatAssemblyEnd(this->mtrx, MAT_FINAL_ASSEMBLY);
834  //MatShift(mtrx, 0);
835  return val;
836 }
837 
838 
839 void
841 {
842  // test if receiver is already assembled
843  PetscBool assembled;
844  MatAssembled(this->mtrx, & assembled);
845  if ( assembled ) {
846  MatZeroEntries(this->mtrx);
847  }
848  this->newValues = true;
849 }
850 
851 double
853 {
854  double norm;
855  MatNorm(this->mtrx, NORM_1, & norm);
856  return norm;
857 }
858 
859 double &
861 {
862  static double a;
863  OOFEM_ERROR("unsupported");
864  return a;
865 }
866 
867 double
868 PetscSparseMtrx :: at(int i, int j) const
869 {
870  OOFEM_ERROR("unsupported");
871  return 0;
872  //double value;
873  //int row = i-1, col = j-1;
874  //MatGetValues(this->mtrx, 1, &row, 1, &col, &value);
875  //return value;
876 }
877 
878 SparseMtrx *
880 {
881 #ifdef __PARALLEL_MODE
882  auto comm = this->emodel->giveParallelComm();
883 #else
884  auto comm = PETSC_COMM_SELF;
885 #endif
886  IntArray prows = rows, pcols = cols;
887  prows.add(-1);
888  pcols.add(-1);
889 
890  PetscSparseMtrx *answer = new PetscSparseMtrx(prows.giveSize(), pcols.giveSize());
891  answer->emodel = this->emodel;
892  IS is_rows;
893  IS is_cols;
894 
895  ISCreateGeneral(comm, prows.giveSize(), prows.givePointer(), PETSC_USE_POINTER, & is_rows);
896  ISCreateGeneral(comm, pcols.giveSize(), pcols.givePointer(), PETSC_USE_POINTER, & is_cols);
897  MatGetSubMatrix(this->mtrx, is_rows, is_cols, MAT_INITIAL_MATRIX, & answer->mtrx);
898  ISDestroy(& is_rows);
899  ISDestroy(& is_cols);
900 
901  return answer;
902 }
903 
904 void
906 {
907  IntArray rows, cols;
908  rows.enumerate(this->giveNumberOfRows());
909  rows.add(-1);
910  cols.enumerate(this->giveNumberOfColumns());
911  cols.add(-1);
912 
913  FloatMatrix ansT(this->giveNumberOfColumns(), this->giveNumberOfRows());
914  MatGetValues(this->mtrx, rows.giveSize(), rows.givePointer(), cols.giveSize(), cols.givePointer(), ansT.givePointer());
915  answer.beTranspositionOf(ansT);
916 }
917 
918 void
920 {
921  PetscViewerSetFormat(PETSC_VIEWER_STDOUT_SELF, PETSC_VIEWER_ASCII_INFO);
922  MatView(this->mtrx, PETSC_VIEWER_STDOUT_SELF);
923 }
924 
925 void
927 {
928  PetscViewerSetFormat(PETSC_VIEWER_STDOUT_SELF, PETSC_VIEWER_ASCII_DENSE);
929  MatView(this->mtrx, PETSC_VIEWER_STDOUT_SELF);
930 }
931 
932 void
934 {
935  PetscViewerSetFormat(PETSC_VIEWER_STDOUT_SELF, PETSC_VIEWER_ASCII_MATLAB);
936  MatView(this->mtrx, PETSC_VIEWER_STDOUT_SELF);
937 }
938 
939 void
940 PetscSparseMtrx :: writeToFile(const char *fname) const
941 {
942  PetscViewer viewer;
943 #ifdef __PARALLEL_MODE
944  PetscViewerASCIIOpen(this->emodel->giveParallelComm(), fname, & viewer);
945 #else
946  PetscViewerASCIIOpen(PETSC_COMM_SELF, fname, & viewer);
947 #endif
948  MatView(this->mtrx, viewer);
949  PetscViewerDestroy(& viewer);
950 }
951 
952 
953 void
955 {
956 #ifdef __PARALLEL_MODE
957  if ( emodel->isParallel() ) {
958  VecCreate(this->emodel->giveParallelComm(), answer);
959  VecSetSizes(* answer, this->leqs, this->geqs);
960  VecSetFromOptions(* answer);
961  } else {
962 #endif
963  VecCreateSeq(PETSC_COMM_SELF, this->giveNumberOfRows(), answer);
964 #ifdef __PARALLEL_MODE
965 }
966 #endif
967 }
968 
969 
970 int
972 {
973  PetscScalar *ptr;
974 
975 #ifdef __PARALLEL_MODE
976  if ( emodel->isParallel() ) {
978  int neqs = context->giveNumberOfNaturalEqs();
979  Vec locVec;
980  VecCreateSeq(PETSC_COMM_SELF, neqs, & locVec);
981 
982  VecScatter n2gvecscat;
983  VecScatterCreate(locVec, localIS, src, globalIS, & n2gvecscat);
984  VecScatterBegin(n2gvecscat, src, locVec, INSERT_VALUES, SCATTER_REVERSE);
985  VecScatterEnd(n2gvecscat, src, locVec, INSERT_VALUES, SCATTER_REVERSE);
986  VecScatterDestroy(& n2gvecscat);
987 
988  dest.resize(neqs);
989  VecGetArray(locVec, & ptr);
990  for ( int i = 0; i < neqs; i++ ) {
991  dest.at(i + 1) = ptr [ i ];
992  }
993 
994  VecRestoreArray(locVec, & ptr);
995  VecDestroy(& locVec);
996  } else {
997 #endif
998  int neqs = this->giveNumberOfRows();
999  dest.resize(neqs);
1000  VecGetArray(src, & ptr);
1001  for ( int i = 0; i < neqs; i++ ) {
1002  dest.at(i + 1) = ptr [ i ];
1003  }
1004  VecRestoreArray(src, & ptr);
1005 #ifdef __PARALLEL_MODE
1006 }
1007 #endif
1008  return 1;
1009 }
1010 
1011 
1012 int
1013 PetscSparseMtrx :: scatterL2G(const FloatArray &src, Vec dest) const
1014 {
1015  const PetscScalar *ptr = src.givePointer();
1016 
1017 #ifdef __PARALLEL_MODE
1018  if ( emodel->isParallel() ) {
1019  ParallelContext *context = this->emodel->giveParallelContext(di);
1020  int size = src.giveSize();
1021 
1022  Natural2LocalOrdering *n2l = context->giveN2Lmap();
1023  Natural2GlobalOrdering *n2g = context->giveN2Gmap();
1024  for ( int i = 0; i < size; i++ ) {
1025  if ( n2l->giveNewEq(i + 1) ) {
1026  int eqg = n2g->giveNewEq(i + 1);
1027  VecSetValue(dest, eqg, ptr [ i ], INSERT_VALUES);
1028  }
1029  }
1030 
1031  VecAssemblyBegin(dest);
1032  VecAssemblyEnd(dest);
1033  } else {
1034 #endif
1035 
1036  int size = src.giveSize();
1037  for ( int i = 0; i < size; i++ ) {
1038  //VecSetValues(dest, 1, & i, ptr + i, mode);
1039  VecSetValue(dest, i, ptr [ i ], INSERT_VALUES);
1040  }
1041 
1042  VecAssemblyBegin(dest);
1043  VecAssemblyEnd(dest);
1044 #ifdef __PARALLEL_MODE
1045 }
1046 #endif
1047  return 1;
1048 }
1049 
1050 
1052 {
1053  return SMT_PetscMtrx;
1054 }
1055 
1057 {
1058  return !symmFlag;
1059 }
1060 
1062 {
1063  return & this->mtrx;
1064 }
1065 
1067 {
1068  return symmFlag;
1069 }
1070 
1071 int PetscSparseMtrx :: setOption(MatOption op, PetscBool flag)
1072 {
1073  return MatSetOption(this->mtrx, op, flag);
1074 }
1075 
1077 {
1078  return leqs;
1079 }
1080 
1082 {
1083  return di;
1084 }
1085 } // end namespace oofem
virtual void writeToFile(const char *fname) const
Helpful for debugging, writes the matrix to given file.
int giveNumberOfColumns() const
Returns number of columns of receiver.
Definition: floatmatrix.h:158
int nColumns
Number of columns.
Definition: sparsemtrx.h:69
void enumerate(int maxVal)
Resizes receiver and enumerates from 1 to the maximum value given.
Definition: intarray.C:136
Class and object Domain.
Definition: domain.h:115
int giveNumberOfLocalEqs()
Returns number of local eqs; ie.
int setOption(MatOption op, PetscBool flag)
virtual int giveNumberOfDomainEquations(int di, const UnknownNumberingScheme &num)
Returns number of equations for given domain in active (current time step) time step.
Definition: engngm.C:391
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition: domain.h:432
Base class for all matrices stored in sparse format.
Definition: sparsemtrx.h:60
virtual void init(EngngModel *, int di, const UnknownNumberingScheme &n)
Initiates the receiver.
double & at(int i)
Coefficient access function.
Definition: floatarray.h:131
void createVecGlobal(Vec *answer) const
Creates a global vector that fits the instance of this matrix.
virtual double computeNorm() const
Returns the norm of receiver.
virtual void zero()
Zeroes the receiver.
bool insertSortedOnce(int value, int allocChunk=0)
Inserts given value into a receiver, which is assumed to be sorted.
Definition: intarray.C:360
Ordering from oofem natural ordering (includes all local and shared eqs) to local ordering...
virtual int assembleBegin()
Starts assembling the elements.
virtual void printStatistics() const
Prints the receiver statistics (one-line) to stdout.
bool isParallel() const
Returns true if receiver in parallel mode.
Definition: engngm.h:1056
virtual SparseMtrx * GiveCopy() const
Returns a newly allocated copy of receiver.
const int * givePointer() const
Breaks encapsulation.
Definition: intarray.h:336
const double * givePointer() const
Exposes the internal values of the matrix.
Definition: floatmatrix.h:558
KSP ksp
Linear solver context.
#define OOFEM_LOG_DEBUG(...)
Definition: logger.h:128
Ordering from oofem natural ordering (includes all local and shared eqs) to global ordering...
Class implementing an array of integers.
Definition: intarray.h:61
int & at(int i)
Coefficient access function.
Definition: intarray.h:103
virtual SparseMtrx * giveSubMatrix(const IntArray &rows, const IntArray &cols)
Natural2LocalOrdering * giveN2Lmap()
int nRows
Number of rows.
Definition: sparsemtrx.h:67
virtual int giveNewEq(int leq)
Finds the global equation from a local equation.
std::vector< std::unique_ptr< GeneralBoundaryCondition > > & giveBcs()
Definition: domain.h:322
int giveNumberOfRows() const
Returns number of rows of receiver.
Definition: sparsemtrx.h:114
#define OOFEM_LOG_INFO(...)
Definition: logger.h:127
int giveNumberOfDofs() const
Definition: dofmanager.C:279
#define OOFEM_ERROR(...)
Definition: error.h:61
REGISTER_SparseMtrx(CompCol, SMT_CompCol)
SparseMtrxType
Enumerative type used to identify the sparse matrix type.
virtual void init(EngngModel *, int di, const UnknownNumberingScheme &n)
Initiates the receiver.
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition: parallel.h:50
virtual void addDiagonal(double x, FloatArray &m)
Adds x * m (treats m as a diagonal matrix, stored as an array)
virtual double & at(int i, int j)
Returns coefficient at position (i,j).
Abstract base class allowing to control the way, how equations are assigned to individual DOFs...
bool giveSymmetryFlag() const
virtual int giveNewEq(int leq)
Finds the global equation from a local equation.
int scatterL2G(const FloatArray &src, Vec dest) const
Scatters and gathers vector in local form to global (parallel) one.
virtual ParallelContext * giveParallelContext(int n)
Returns the parallel context corresponding to given domain (n) and unknown type Default implementatio...
Definition: engngm.C:1745
virtual void toFloatMatrix(FloatMatrix &answer) const
Converts receiving sparse matrix to a dense float matrix.
virtual int assemble(const IntArray &loc, const FloatMatrix &mat)
Assembles sparse matrix from contribution of local elements.
Abstract base class for all active boundary conditions.
Definition: activebc.h:63
void resize(int n)
Checks size of receiver towards requested bounds.
Definition: intarray.C:124
MPI_Comm giveParallelComm()
Returns the communication object of reciever.
Definition: engngm.h:549
PETSc library mtrx representation.
SparseMtrxVersionType version
Allows to track if receiver changes.
Definition: sparsemtrx.h:80
void add(int val)
Adds given scalar to all values of receiver.
Definition: intarray.C:58
virtual void map2New(IntArray &answer, const IntArray &src, int baseOffset=0)
Class representing vector of real numbers.
Definition: floatarray.h:82
Implementation of matrix containing floating point numbers.
Definition: floatmatrix.h:94
virtual SparseMtrxType giveType() const
Sparse matrix type identification.
double norm(const FloatArray &x)
Definition: floatarray.C:985
bool kspInit
Flag if context initialized.
virtual void timesT(const FloatArray &x, FloatArray &answer) const
Evaluates .
void resize(int rows, int cols)
Checks size of receiver towards requested bounds.
Definition: floatmatrix.C:1358
IS localIS
Context or scattering/collecting parallel PETSc vectors.
This class provides an communication context for distributed memory parallelism.
virtual void add(double x, SparseMtrx &m)
Adds x * m.
void copyColumn(FloatArray &dest, int c) const
Fetches the values from the specified column.
Definition: floatmatrix.C:662
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1)
Definition: engngm.h:1058
bool newValues
Flag if matrix has changed since last solve.
void beTranspositionOf(const FloatMatrix &src)
Assigns to the receiver the transposition of parameter.
Definition: floatmatrix.C:323
virtual void times(const FloatArray &x, FloatArray &answer) const
Evaluates .
const double * givePointer() const
Gives the pointer to the raw data, breaking encapsulation.
Definition: floatarray.h:469
Natural2GlobalOrdering * giveN2Gmap()
virtual bool isAsymmetric() const
Returns true if asymmetric.
int giveNumberOfColumns() const
Returns number of columns of receiver.
Definition: sparsemtrx.h:116
std::vector< std::unique_ptr< Element > > & giveElements()
Definition: domain.h:279
Abstract base class representing the "problem" under consideration.
Definition: engngm.h:181
virtual int buildInternalStructure(EngngModel *eModel, int n, int m, const IntArray &I, const IntArray &J)
Builds internal structure of receiver based on I and J.
int giveSize() const
Definition: intarray.h:203
int giveSize() const
Returns the size of receiver.
Definition: floatarray.h:218
the oofem namespace is to define a context or scope in which all oofem names are defined.
Domain * giveDomain(int n)
Service for accessing particular problem domain.
Definition: engngm.C:1720
int scatterG2L(Vec src, FloatArray &dest) const
Scatters global vector to natural one.
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
Definition: domain.C:314
virtual void printYourself() const
Prints receiver to stdout. Works only for relatively small matrices.
int giveNumberOfRows() const
Returns number of rows of receiver.
Definition: floatmatrix.h:156
This class provides an sparse matrix interface to PETSc Matrices.
virtual int assembleEnd()
Returns when assemble is completed.
#define OOFEM_WARNING(...)
Definition: error.h:62
virtual void map2New(IntArray &answer, const IntArray &src, int baseOffset=0)
virtual void giveLocationArrays(std::vector< IntArray > &rows, std::vector< IntArray > &cols, CharType type, const UnknownNumberingScheme &r_s, const UnknownNumberingScheme &c_s)
Gives a list of location arrays that will be assembled.
Definition: activebc.h:138
void resize(int s)
Resizes receiver towards requested size.
Definition: floatarray.C:631

This page is part of the OOFEM documentation. Copyright (c) 2011 Borek Patzak
Project e-mail: info@oofem.org
Generated at Tue Jan 2 2018 20:07:30 for OOFEM by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2011