OOFEM  2.4
OOFEM.org - Object Oriented Finite Element Solver
fetisolver.C
Go to the documentation of this file.
1 /*
2  *
3  * ##### ##### ###### ###### ### ###
4  * ## ## ## ## ## ## ## ### ##
5  * ## ## ## ## #### #### ## # ##
6  * ## ## ## ## ## ## ## ##
7  * ## ## ## ## ## ## ## ##
8  * ##### ##### ## ###### ## ##
9  *
10  *
11  * OOFEM : Object Oriented Finite Element Code
12  *
13  * Copyright (C) 1993 - 2013 Borek Patzak
14  *
15  *
16  *
17  * Czech Technical University, Faculty of Civil Engineering,
18  * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19  *
20  * This library is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU Lesser General Public
22  * License as published by the Free Software Foundation; either
23  * version 2.1 of the License, or (at your option) any later version.
24  *
25  * This program is distributed in the hope that it will be useful,
26  * but WITHOUT ANY WARRANTY; without even the implied warranty of
27  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28  * Lesser General Public License for more details.
29  *
30  * You should have received a copy of the GNU Lesser General Public
31  * License along with this library; if not, write to the Free Software
32  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33  */
34 
35 #include "../sm/FETISolver/feticommunicator.h"
36 #include "mathfem.h"
37 #include "fetisolver.h"
38 #include "skyline.h"
39 #include "verbose.h"
40 #include "dofmanager.h"
41 #include "engngm.h"
42 #include "domain.h"
43 #include "unknownnumberingscheme.h"
44 #include "classfactory.h"
45 
46 namespace oofem {
48 
49 FETISolver :: FETISolver(Domain *d, EngngModel *m) : SparseLinearSystemNM(d, m), pcbuff(CBT_static), processCommunicator(& pcbuff, 0)
50 {
51  err = 1.e-6;
52  ni = 20;
54 }
55 
56 
58 {
59  if ( engngModel->giveRank() == 0 ) {
60  delete commBuff;
61  delete masterCommunicator;
62  }
63 }
64 
65 
66 int
67 FETISolver :: estimateMaxPackSize(IntArray &map, DataStream &buff, int &packUnpackType)
68 {
69  int rank = domain->giveEngngModel()->giveRank();
70  int count = 0;
72 
73  if ( rank == 0 ) {
74  // master comm maps contain boundary dof managers
75  for ( int m: map ) {
77  }
78  } else {
79  for ( int m: map ) {
80  for ( Dof *dof: *domain->giveDofManager( m ) ) {
81  if ( dof->giveEquationNumber(dn) != 0 ) {
82  count++;
83  }
84  }
85  }
86  }
87 
88  return ( buff.givePackSizeOfDouble(1) * count * FETISOLVER_MAX_RBM );
89 }
90 
91 
94 {
95  IRResultType result; // Required by IR_GIVE_FIELD macro
96 
102 
103  if ( fabs(limit) < 1.e-20 ) {
104  limit = 1.e-20;
105  }
106 
107  if ( err < 1.e-20 ) {
108  err = 1.e-20;
109  }
110 
111  if ( err > 0.1 ) {
112  err = 0.1;
113  }
114 
115  return IRRT_OK;
116 }
117 
119 {
120  int nnodes = domain->giveNumberOfDofManagers();
121  int boundaryDofManNum = 0;
122  int indx = 1, neq;
123  StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
124  IntArray commMap;
126 
127  // determine the total number of boundary dof managers
128  for ( int i = 1; i <= nnodes; i++ ) {
130  boundaryDofManNum++;
131  }
132  }
133 
134  if ( domain->giveEngngModel()->giveRank() != 0 ) {
135  commBuff.resize( commBuff.givePackSizeOfInt(1) );
136  commBuff.write(boundaryDofManNum);
137 
138 #ifdef __VERBOSE_PARALLEL
139  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Sending data to partition 0 (send %d)\n",
140  this->giveEngngModel()->giveRank(),
141  "FETISolver :: setUpCommunicationMaps : send number of boundary dofMans", boundaryDofManNum);
142 #endif
144 
145  MPI_Barrier(MPI_COMM_WORLD);
146  }
147 
148 #ifdef __VERBOSE_PARALLEL
149  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Barrier reached\n",
150  this->giveEngngModel()->giveRank(), "FETISolver :: setUpCommunicationMaps");
151 #endif
152 
153 
154 
155 
156  commMap.resize(boundaryDofManNum);
157 
158  commBuff.resize(commBuff.givePackSizeOfInt(1) * 2 * boundaryDofManNum);
159  commBuff.init();
160  // determine total number of DOFs per each boundary node
161  // and build communication map simultaneously
162  indx = 1;
163  IntArray locNum;
164  if ( domain->giveEngngModel()->giveRank() != 0 ) {
165  for ( int i = 1; i <= nnodes; i++ ) {
167  // remember comm map entry
168  commMap.at(indx++) = i;
169  // determine number of DOFs
171  neq = 0;
172  for ( int j = 1; j <= locNum.giveSize(); j++ ) {
173  if ( locNum.at(j) ) {
174  neq++;
175  }
176  }
177 
178  commBuff.write( domain->giveDofManager(i)->giveGlobalNumber() );
179  commBuff.write(neq);
180  }
181  }
182  } else {
183  for ( int i = 1; i <= nnodes; i++ ) {
185  // remember comm map entry
186  commMap.at(indx++) = i;
187  }
188  }
189  }
190 
191  if ( domain->giveEngngModel()->giveRank() != 0 ) {
192  processCommunicator.setToSendArry(this, commMap, 0);
193  processCommunicator.setToRecvArry(this, commMap, 0);
195 #ifdef __VERBOSE_PARALLEL
196  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Barrier reached\n",
197  this->giveEngngModel()->giveRank(), "FETISolver :: setUpCommunicationMaps");
198 #endif
199  MPI_Barrier(MPI_COMM_WORLD);
200 #ifdef __VERBOSE_PARALLEL
201  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Barrier reached\n",
202  this->giveEngngModel()->giveRank(), "FETISolver :: setUpCommunicationMaps");
203 #endif
204  MPI_Barrier(MPI_COMM_WORLD);
205  } else {
206  // remember comm map for direct mapping at master
207  this->masterCommMap = commMap;
208  }
209 }
210 
211 
212 
213 
214 
215 /*******************************************************************/
216 /*******************************************************************/
217 /*******************************************************************/
218 /*******************************************************************/
219 
220 void
222 /*
223  * funkce provadi projekci v modifikovane metode sdruzenych gradientu
224  *
225  * vstupy
226  * v - vektor pred projekci
227  * l - matice L(m,n)
228  * l1 - inverzni matice k matici L^T L, rozmery (n,n)
229  *
230  * vystup
231  * v - vektor po projekci
232  *
233  * 7.12.1998
234  */
235 {
236  FloatArray help1, help2;
237 
238  help1.beTProductOf(l, v);
239  help2.beProductOf(l1, help1);
240  help1.beProductOf(l, help2);
241 
242  v.subtract(help1);
243 }
244 
245 
246 
247 
248 int
250 {
251  int result = 1;
252  int i, ir, size;
253  int j, ndofs, eqNum;
254  IntArray const *toSendMap = processComm.giveToSendMap();
255  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
256  IntArray locationArray;
258 
259  size = toSendMap->giveSize();
260  for ( i = 1; i <= size; i++ ) {
261  domain->giveDofManager( toSendMap->at(i) )->giveCompleteLocationArray(locationArray, dn);
262  ndofs = locationArray.giveSize();
263  for ( j = 1; j <= ndofs; j++ ) {
264  if ( ( eqNum = locationArray.at(j) ) ) {
265  for ( ir = 1; ir <= nse; ir++ ) {
266  result &= send_buff->write( rbm.at(eqNum, ir) );
267  }
268 
269  if ( nse == 0 ) {
270  result &= send_buff->write(0.0);
271  }
272  }
273  }
274  }
275 
276  return result;
277 }
278 
279 
280 int
282 {
283  int to, result = 1;
284  int size, receivedRank;
285  int nshared, part, eqNum;
286  IntArray const *toRecvMap = processComm.giveToRecvMap();
287  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
288  IntArray locationArray;
289  double value;
290 
291  receivedRank = processComm.giveRank();
292  size = toRecvMap->giveSize();
293  if ( receivedRank != 0 ) {
294  // for (irbm = 1; irbm <= nsem.at(receivedRank+1); irbm++) {
295  for ( int i = 1; i <= size; i++ ) {
296  to = toRecvMap->at(i);
297  //
298  // loop over all dofs
299  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++ ) {
300  for ( int irbm = 1; irbm <= nsem.at(receivedRank + 1); irbm++ ) {
301  // unpack contribution
302  result &= recv_buff->read(value);
303  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
304  // corresponding DOFs in boundary node
305 
306  // localize to corresponding places
308  for ( int j = 1; j <= nshared; j++ ) {
310  if ( part == processComm.giveRank() ) {
311  continue;
312  }
313 
314  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
315  l.at(eqNum, rbmAddr.at(receivedRank + 1) + irbm - 1) = value;
316  }
317  } else { // no reference partition
318  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
319  l.at(eqNum, rbmAddr.at(receivedRank + 1) + irbm - 1) = ( -1.0 ) * value;
320  }
321  }
322  }
323  }
324  }
325 
326  return result;
327 }
328 
329 int
331 {
332  int to, from, receivedRank = 0, nshared, part, eqNum, result;
333  IntArray locationArray;
334  double value;
335  int size = masterCommMap.giveSize();
336  // master will map its own values directly
337  int locpos;
339 
340  for ( int irbm = 1; irbm <= nsem.at(1); irbm++ ) {
341  for ( int i = 1; i <= size; i++ ) {
343  // use receive map, send map is empty to prevent master to send
344  // itself any data. Note, however, that send and receive maps are same.
345  from = masterCommMap.at(i);
346 
347  domain->giveDofManager(from)->giveCompleteLocationArray(locationArray, dn);
348  locpos = 1;
349  //
350  // ndofs = locationArray.giveSize(); // including supported
351  // for (j=1; j<=ndofs; j++) {
352  // if (eqNum = locationArray.at(j)) {
353 
354  //
355  // loop over all dofs
356  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++, locpos++ ) {
357  // extract source value
358  while ( locationArray.at(locpos) == 0 ) {
359  locpos++;
360  // test if position of nonzero dof is allowed
361  if ( locpos > locationArray.giveSize() ) {
362  OOFEM_ERROR("Consistency dof error");
363  }
364  }
365 
366  value = rbm.at(locationArray.at(locpos), irbm);
367  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
368  // corresponding DOFs in boundary node
369 
370  // localize to corresponding places
372  for ( int j = 1; j <= nshared; j++ ) {
374  if ( part == 0 ) {
375  continue;
376  }
377 
378  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
379  l.at(eqNum, rbmAddr.at(receivedRank + 1) + irbm - 1) = value;
380  }
381  } else { // no reference partition
382  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
383  l.at(eqNum, rbmAddr.at(receivedRank + 1) + irbm - 1) = ( -1.0 ) * value;
384  }
385  }
386  }
387  }
388 
389  result = 1;
390  return result;
391 }
392 
393 
394 int
396 {
397  int result = 1;
398  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
399  IntArray locationArray;
400 
401  for ( int i = 1; i <= nse; i++ ) {
402  result &= send_buff->write( qq.at(i) );
403  }
404 
405  return result;
406 }
407 
408 
409 int
411 {
412  int result = 1;
413  int receivedRank;
414  //IntArray const* toRecvMap = processComm.giveToRecvMap();
415  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
416  IntArray locationArray;
417  //double value;
418 
419  receivedRank = processComm.giveRank();
420 
421  if ( receivedRank != 0 ) {
422  for ( int i = 1; i <= nsem.at(receivedRank + 1); i++ ) {
423  result &= recv_buff->read( q.at(rbmAddr.at(receivedRank + 1) + i - 1) );
424  }
425  }
426 
427  return result;
428 }
429 
430 
431 int
433 {
434  int result = 0;
435 
436  for ( int i = 1; i <= nsem.at(1); i++ ) {
437  q.at(rbmAddr.at(1) + i - 1) = qq.at(i);
438  result = 1;
439  }
440 
441  return result;
442 }
443 
444 
445 int
447 {
448  // master
449 
450  int result = 1;
451  int size;
452  int ndofs, eqNum, nshared, part, from;
453  double val;
454  IntArray const *toSendMap = processComm.giveToSendMap();
455  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
456  IntArray locationArray;
457 
458  int rank = processComm.giveRank();
459  size = toSendMap->giveSize();
460  for ( int i = 1; i <= size; i++ ) {
461  from = toSendMap->at(i);
463  if ( rank == masterCommunicator->giveDofManager(from)->giveReferencePratition() ) {
464  // summ corresponding values (multipliers)
466  for ( int k = 1; k <= ndofs; k++ ) {
467  val = 0.0;
468  for ( int j = 1; j <= nshared; j++ ) {
470  if ( part == processComm.giveRank() ) {
471  continue;
472  }
473 
474  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(part, k);
475  val += w.at(eqNum);
476  }
477 
478  result &= send_buff->write(val);
479  }
480  } else {
481  masterCommunicator->giveDofManager(from)->giveCompleteLocationArray(rank, locationArray);
482  for ( int j = 1; j <= ndofs; j++ ) {
483  if ( ( eqNum = locationArray.at(j) ) ) {
484  result &= send_buff->write( ( -1.0 ) * w.at(eqNum) );
485  }
486  }
487  }
488  }
489 
490  return result;
491 }
492 
493 
494 int
496 {
497  // slaves unpack their slotion contributions
498  int result = 1, to;
499  int size;
500  int ndofs, eqNum;
501  double value;
502  IntArray const *toRecvMap = processComm.giveToRecvMap();
503  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
504  IntArray locationArray;
506 
507  size = toRecvMap->giveSize();
508  for ( int i = 1; i <= size; i++ ) {
509  to = toRecvMap->at(i);
510  domain->giveDofManager(to)->giveCompleteLocationArray(locationArray, dn);
511  ndofs = locationArray.giveSize();
512  for ( int j = 1; j <= ndofs; j++ ) {
513  if ( ( eqNum = locationArray.at(j) ) ) {
514  result &= recv_buff->read(value);
515  dd.at(eqNum) = value;
516 
517 #ifdef __VERBOSE_PARALLEL
518  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Unpacking solution value %f at %d\n",
519  this->giveEngngModel()->giveRank(), "FETISolver :: solveYourselfAt", value, eqNum);
520 #endif
521  }
522  }
523  }
524 
525  return result;
526 }
527 
528 int
530 {
531  int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
532  IntArray locationArray;
533  double value;
534  int size = masterCommMap.giveSize();
536 
537  for ( int i = 1; i <= size; i++ ) {
539  // use receive map, send map is empty to prevent master to send
540  // itself any data. Note, however, that send and receive maps are same.
541  to = masterCommMap.at(i);
542 
543  domain->giveDofManager(to)->giveCompleteLocationArray(locationArray, dn);
544  locpos = 1;
545  //
546  // loop over all dofs
547  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(from)->giveNumberOfDofs(); idof++, locpos++ ) {
548  // extract source value
549  while ( locationArray.at(locpos) == 0 ) {
550  locpos++;
551  // test if position of nonzero dof is allowed
552  if ( locpos > locationArray.giveSize() ) {
553  OOFEM_ERROR("Consistency dof error");
554  }
555  }
556 
557  value = 0.0;
558  if ( masterCommunicator->giveDofManager(from)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
559  // corresponding DOFs in boundary node
560 
561  // localize to corresponding places
563  for ( int j = 1; j <= nshared; j++ ) {
565  if ( part == 0 ) {
566  continue;
567  }
568 
569  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(part, idof);
570  value += w.at(eqNum);
571  }
572  } else { // no reference partition
573  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(receivedRank, idof);
574  value = ( -1.0 ) * w.at(eqNum);
575  }
576 
577  // unpack to locaL CORRESPONDING EQUATION
578  dd.at( locationArray.at(locpos) ) = value;
579  }
580  }
581 
582  result = 1;
583  return result;
584 }
585 
586 
587 
588 int
590 {
591  // slaves
592 
593  int result = 1;
594  int size;
595  int ndofs, eqNum;
596  IntArray const *toSendMap = processComm.giveToSendMap();
597  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
598  IntArray locationArray;
600 
601  size = toSendMap->giveSize();
602  for ( int i = 1; i <= size; i++ ) {
603  domain->giveDofManager( toSendMap->at(i) )->giveCompleteLocationArray(locationArray, dn);
604  ndofs = locationArray.giveSize();
605  for ( int j = 1; j <= ndofs; j++ ) {
606  if ( ( eqNum = locationArray.at(j) ) ) {
607  result &= send_buff->write( pp.at(eqNum) );
608  }
609  }
610  }
611 
612  return result;
613 }
614 
615 
616 int
618 {
619  // master
620 
621  int result = 1;
622  int size, receivedRank, to;
623  int nshared, part, eqNum;
624  IntArray const *toRecvMap = processComm.giveToRecvMap();
625  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
626  IntArray locationArray;
627  double value;
628 
629  receivedRank = processComm.giveRank();
630 
631  size = toRecvMap->giveSize();
632  if ( receivedRank != 0 ) {
633  for ( int i = 1; i <= size; i++ ) {
634  to = toRecvMap->at(i);
635  //
636  // loop over all dofs
637  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++ ) {
638  // unpack contribution
639  result &= recv_buff->read(value);
640  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
641  // corresponding DOFs in boundary node
642 
643  // localize to corresponding places
645  for ( int j = 1; j <= nshared; j++ ) {
647  if ( part == processComm.giveRank() ) {
648  continue;
649  }
650 
651  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
652  g.at(eqNum) += value;
653  }
654  } else { // no reference partition
655  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
656  g.at(eqNum) += ( -1.0 ) * value;
657  }
658  }
659  }
660  }
661 
662  return result;
663 }
664 
665 int
667 { // master will map its own values directly
668  int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
669  IntArray locationArray;
670  double value;
671  int size = masterCommMap.giveSize();
673 
674  for ( int i = 1; i <= size; i++ ) {
676  // use receive map, send map is empty to prevent master to send
677  // itself any data. Note, however, that send and receive maps are same.
678  from = masterCommMap.at(i);
679 
680  domain->giveDofManager(from)->giveCompleteLocationArray(locationArray, dn);
681  locpos = 1;
682  //
683  // ndofs = locationArray.giveSize(); // including supported
684  // for (j=1; j<=ndofs; j++) {
685  // if (eqNum = locationArray.at(j)) {
686 
687  //
688  // loop over all dofs
689  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++, locpos++ ) {
690  // extract source value
691  while ( locationArray.at(locpos) == 0 ) {
692  locpos++;
693  // test if position of nonzero dof is allowed
694  if ( locpos > locationArray.giveSize() ) {
695  OOFEM_ERROR("Consistency dof error");
696  }
697  }
698 
699  value = pp.at( locationArray.at(locpos) );
700  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
701  // corresponding DOFs in boundary node
702 
703  // localize to corresponding places
705  for ( int j = 1; j <= nshared; j++ ) {
707  if ( part == 0 ) {
708  continue;
709  }
710 
711  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
712  g.at(eqNum) += value;
713  }
714  } else { // no reference partition
715  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
716  g.at(eqNum) += ( -1.0 ) * value;
717  }
718  }
719  }
720 
721  result = 1;
722  return result;
723 }
724 
725 
726 
727 
728 int
730 {
731  // master
732 
733  int result = 1;
734  int size;
735  int ndofs, eqNum, nshared, part, from;
736  double val;
737  IntArray const *toSendMap = processComm.giveToSendMap();
738  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
739  IntArray locationArray;
740 
741  int rank = processComm.giveRank();
742  size = toSendMap->giveSize();
743  for ( int i = 1; i <= size; i++ ) {
744  from = toSendMap->at(i);
745 
747  if ( rank == masterCommunicator->giveDofManager(from)->giveReferencePratition() ) {
748  // summ corresponding values (multipliers)
750  for ( int k = 1; k <= ndofs; k++ ) {
751  val = 0.0;
752  for ( int j = 1; j <= nshared; j++ ) {
754  if ( part == processComm.giveRank() ) {
755  continue;
756  }
757 
758  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(part, k);
759  val += d.at(eqNum);
760  }
761 
762  result &= send_buff->write(val);
763  }
764  } else {
765  masterCommunicator->giveDofManager(from)->giveCompleteLocationArray(rank, locationArray);
766  for ( int j = 1; j <= ndofs; j++ ) {
767  if ( ( eqNum = locationArray.at(j) ) ) {
768  result &= send_buff->write( ( -1.0 ) * d.at(eqNum) );
769  }
770  }
771  }
772  }
773 
774  return result;
775 }
776 
777 
778 int
780 {
781  // slaves unpack their slotion contributions
782  int result = 1;
783  int size;
784  int ndofs, eqNum;
785  IntArray const *toRecvMap = processComm.giveToRecvMap();
786  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
787  IntArray locationArray;
788  // int receivedRank = domainComm.giveRank();
790 
791  size = toRecvMap->giveSize();
792  // if (receivedRank != 0) {
793  for ( int i = 1; i <= size; i++ ) {
794  domain->giveDofManager( toRecvMap->at(i) )->giveCompleteLocationArray(locationArray, dn);
795  ndofs = locationArray.giveSize();
796  for ( int j = 1; j <= ndofs; j++ ) {
797  if ( ( eqNum = locationArray.at(j) ) ) {
798  result &= recv_buff->read( dd.at(eqNum) );
799  }
800  }
801  }
802 
803  // }
804  return result;
805 }
806 
807 int
809 {
810  int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
811  IntArray locationArray;
812  double value;
813  int size = masterCommMap.giveSize();
815 
816  for ( int i = 1; i <= size; i++ ) {
818  // use receive map, send map is empty to prevent master to send
819  // itself any data. Note, however, that send and receive maps are same.
820  to = masterCommMap.at(i);
821 
822  domain->giveDofManager(to)->giveCompleteLocationArray(locationArray, dn);
823  locpos = 1;
824  //
825  // loop over all dofs
826  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(from)->giveNumberOfDofs(); idof++, locpos++ ) {
827  // extract source value
828  while ( locationArray.at(locpos) == 0 ) {
829  locpos++;
830  // test if position of nonzero dof is allowed
831  if ( locpos > locationArray.giveSize() ) {
832  OOFEM_ERROR("Consistency dof error");
833  }
834  }
835 
836  value = 0.0;
837  if ( masterCommunicator->giveDofManager(from)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
838  // corresponding DOFs in boundary node
839 
840  // localize to corresponding places
842  for ( int j = 1; j <= nshared; j++ ) {
844  if ( part == 0 ) {
845  continue;
846  }
847 
848  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(part, idof);
849  value += d.at(eqNum);
850  }
851  } else { // no reference partition
852  eqNum = masterCommunicator->giveDofManager(from)->giveCodeNumber(receivedRank, idof);
853  value = ( -1.0 ) * d.at(eqNum);
854  }
855 
856  // unpack to locaL CORRESPONDING EQUATION
857  dd.at( locationArray.at(locpos) ) = value;
858  }
859  }
860 
861  result = 1;
862  return result;
863 }
864 
865 
866 int
868 {
869  // slaves
870 
871  int result = 1;
872  int size;
873  int ndofs, eqNum;
874  IntArray const *toSendMap = processComm.giveToSendMap();
875  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
876  IntArray locationArray;
878 
879  size = toSendMap->giveSize();
880  for ( int i = 1; i <= size; i++ ) {
881  domain->giveDofManager( toSendMap->at(i) )->giveCompleteLocationArray(locationArray, dn);
882  ndofs = locationArray.giveSize();
883  for ( int j = 1; j <= ndofs; j++ ) {
884  if ( ( eqNum = locationArray.at(j) ) ) {
885  result &= send_buff->write( pp.at(eqNum) );
886  }
887  }
888  }
889 
890  return result;
891 }
892 
893 
894 int
896 {
897  // master
898 
899  int result = 1;
900  int size, receivedRank, to;
901  int nshared, part, eqNum;
902  IntArray const *toRecvMap = processComm.giveToRecvMap();
903  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
904  IntArray locationArray;
905  double value;
906 
907  receivedRank = processComm.giveRank();
908 
909  size = toRecvMap->giveSize();
910  if ( receivedRank != 0 ) {
911  for ( int i = 1; i <= size; i++ ) {
912  to = toRecvMap->at(i);
913  //
914  // loop over all dofs
915  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++ ) {
916  // unpack contribution
917  result &= recv_buff->read(value);
918  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
919  // corresponding DOFs in boundary node
920 
921  // localize to corresponding places
923  for ( int j = 1; j <= nshared; j++ ) {
925  if ( part == processComm.giveRank() ) {
926  continue;
927  }
928 
929  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
930  p.at(eqNum) += value;
931  }
932  } else { // no reference partition
933  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
934  p.at(eqNum) += ( -1.0 ) * value;
935  }
936  }
937  }
938  }
939 
940  return result;
941 }
942 
943 int
945 { // master will map its own values directly
946  int to, from, receivedRank = 0, nshared, part, eqNum, result, locpos;
947  IntArray locationArray;
948  double value;
949  int size = masterCommMap.giveSize();
951 
952  for ( int i = 1; i <= size; i++ ) {
954  // use receive map, send map is empty to prevent master to send
955  // itself any data. Note, however, that send and receive maps are same.
956  from = masterCommMap.at(i);
957 
958  domain->giveDofManager(from)->giveCompleteLocationArray(locationArray, dn);
959  locpos = 1;
960  //
961  // ndofs = locationArray.giveSize(); // including supported
962  // for (j=1; j<=ndofs; j++) {
963  // if (eqNum = locationArray.at(j)) {
964 
965  //
966  // loop over all dofs
967  for ( int idof = 1; idof <= masterCommunicator->giveDofManager(to)->giveNumberOfDofs(); idof++, locpos++ ) {
968  // extract source value
969  while ( locationArray.at(locpos) == 0 ) {
970  locpos++;
971  // test if position of nonzero dof is allowed
972  if ( locpos > locationArray.giveSize() ) {
973  OOFEM_ERROR("Consistency dof error");
974  }
975  }
976 
977  value = pp.at( locationArray.at(locpos) );
978  if ( masterCommunicator->giveDofManager(to)->giveReferencePratition() == receivedRank ) { // contribution from reference partition localizes to all
979  // corresponding DOFs in boundary node
980 
981  // localize to corresponding places
983  for ( int j = 1; j <= nshared; j++ ) {
985  if ( part == 0 ) {
986  continue;
987  }
988 
989  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(part, idof);
990  p.at(eqNum) += value;
991  }
992  } else { // no reference partition
993  eqNum = masterCommunicator->giveDofManager(to)->giveCodeNumber(receivedRank, idof);
994  p.at(eqNum) += ( -1.0 ) * value;
995  }
996  }
997  }
998 
999  result = 1;
1000  return result;
1001 }
1002 
1003 
1004 int
1006 {
1007  // master
1008 
1009  int result = 1;
1010  int rank = processComm.giveRank();
1011  CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
1012 
1013  for ( int irbm = 1; irbm <= nsem.at(rank + 1); irbm++ ) {
1014  result &= send_buff->write( gamma.at(rbmAddr.at(rank + 1) + irbm - 1) );
1015  }
1016 
1017  return result;
1018 }
1019 
1020 
1021 int
1023 {
1024  // slaves
1025  int result = 1;
1026  CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
1027 
1029  for ( int irbm = 1; irbm <= nse; irbm++ ) {
1030  result &= recv_buff->read( localGammas.at(irbm) );
1031  }
1032 
1033  return result;
1034 }
1035 
1036 int
1038 {
1039  // slaves
1040  int result = 1;
1041 
1043  for ( int irbm = 1; irbm <= nse; irbm++ ) {
1044  localGammas.at(irbm) = gamma.at(rbmAddr.at(1) + irbm - 1);
1045  }
1046 
1047  return result;
1048 }
1049 
1050 
1051 NM_Status
1052 FETISolver :: solve(SparseMtrx &A, FloatArray &partitionLoad, FloatArray &partitionSolution)
1053 {
1054  int tnse = 0, rank = domain->giveEngngModel()->giveRank();
1055  int source, tag;
1056  int masterLoopStatus;
1057  double nom = 0.0, denom, alpha, beta, energyNorm = 0.0;
1058  FloatMatrix l1;
1059  StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
1060  Skyline *partitionStiffness = dynamic_cast< Skyline * >(&A);
1061  if ( !partitionStiffness ) {
1062  OOFEM_ERROR("unsuported sparse matrix type");
1063  }
1064 
1065 
1066  if ( ( partitionSolution.giveSize() ) != partitionLoad.giveSize() ) {
1067  OOFEM_ERROR("size mismatch");
1068  }
1069 
1070  int neq = partitionStiffness->giveNumberOfRows();
1071  int size = domain->giveEngngModel()->giveNumberOfProcesses();
1072  nsem.resize(size);
1073 
1074  // initialize communication maps
1075  this->setUpCommunicationMaps();
1076  if ( rank == 0 ) {
1077  this->commBuff = new CommunicatorBuff(size);
1078  masterCommunicator = new FETICommunicator(engngModel, this->commBuff, engngModel->giveRank(), size);
1080  }
1081 
1082  MPI_Barrier(MPI_COMM_WORLD);
1083 
1084 #ifdef __VERBOSE_PARALLEL
1085  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Barrier reached\n",
1086  this->giveEngngModel()->giveRank(), "FETISolver :: solveYourselfAt");
1087 #endif
1088 
1089  /*********************************/
1090  /* rozklad matice */
1091  /* vypocet rigid body modes */
1092  /*********************************/
1093  /* pole indexu zavislych rovnic */
1094  se.resize(6);
1095  se.zero();
1096  /* pole posunu jako tuheho telesa */
1097  // rbm.resize (neq, 6); rbm.zero();
1098 
1099 #ifdef __VERBOSE_PARALLEL
1100  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: rbmodes computation startup, lneq is %d\n",
1101  rank, "FETISolver :: solveYourselfAt", neq);
1102 #endif
1103 
1104  partitionStiffness->rbmodes(rbm, nse, se, limit, 3);
1105 
1106 #ifdef __VERBOSE_PARALLEL
1107  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Number of Rigid body modes %3d\n",
1108  rank, "FETISolver :: solveYourselfAt", nse);
1109 
1110  //fprintf(stdout, "\n[process rank %3d]: %-30s: RBM dump",
1111  // rank,"FETISolver :: solveYourselfAt");
1112  //rbm.printYourself();
1113 
1114 #endif
1115 
1116  /*****************************/
1117  /* vypocet soucinu R^T . f */
1118  /*****************************/
1119 
1120  if ( nse != 0 ) {
1121  qq.resize(nse);
1122  qq.zero();
1123 
1124  qq.beTProductOf(rbm, partitionLoad);
1125  qq.negated();
1126  }
1127 
1128  /***************************************************************************/
1129  /* zjisteni rozmeru matice L, vektoru q, poctu neznamych na podoblastech */
1130  /***************************************************************************/
1131 
1132  // commBuff.init();
1133  if ( rank == 0 ) {
1134  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1135  //
1136  // receive data
1137  //
1138  for ( int i = 1; i < size; i++ ) {
1139  commBuff.iRecv(MPI_ANY_SOURCE, FETISolver :: NumberOfRBMMsg);
1140  while ( !commBuff.testCompletion(source, tag) ) {
1141  ;
1142  }
1143 
1144  // unpack data
1145 #ifdef __VERBOSE_PARALLEL
1146  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d\n",
1147  rank, "FETICommunicator :: setUpCommunicationMaps : received number of partition rbm", source);
1148 #endif
1149  commBuff.init();
1150  commBuff.read( nsem.at(source + 1) );
1151  tnse += nsem.at(source + 1);
1152  }
1153 
1154  nsem.at(1) = nse;
1155  tnse += nse;
1156 
1157  OOFEM_LOG_INFO("Number of RBM per partion\npart. rbm\n-------------------------------\n");
1158  for ( int i = 1; i <= size; i++ ) {
1159  OOFEM_LOG_INFO( "%-4d %8d\n", i - 1, nsem.at(i) );
1160  }
1161 
1162 
1163  // initialize partition start indexes into rbm array
1164  rbmAddr.resize(size);
1165  rbmAddr.zero();
1166  rbmAddr.at(1) = 1;
1167  for ( int i = 2; i <= size; i++ ) {
1168  rbmAddr.at(i) = rbmAddr.at(i - 1) + nsem.at(i - 1);
1169  }
1170  } else { // slave code
1171 #ifdef __VERBOSE_PARALLEL
1172  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Sending number of Rigid body modes %3d\n",
1173  rank, "FETISolver :: solveYourselfAt", nse);
1174 #endif
1175 
1176  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1177  commBuff.write(nse);
1178  commBuff.iSend(0, FETISolver :: NumberOfRBMMsg);
1179  }
1180 
1181  MPI_Barrier(MPI_COMM_WORLD);
1182 
1183  /*****************************/
1184  /* sestaveni cele matice L */
1185  /*****************************/
1186 
1187  if ( rank == 0 ) {
1188  // assemble l matrix containing rbm localized from partition contibutions
1189  if ( tnse ) {
1191  l.zero();
1192  }
1193 
1196  this->masterMapRBM();
1197 
1198  if ( tnse ) {
1199  l.negated();
1200  }
1201  } else {
1202  // pack RBMs
1205  MPI_Barrier(MPI_COMM_WORLD);
1206 #ifdef __VERBOSE_PARALLEL
1207  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: RBMMessage Barrier finished",
1208  rank, "FETISolver :: solveYourselfAt");
1209 #endif
1210  }
1211 
1212  if ( rank == 0 ) {
1213  // receive products of rbm * LoadVector
1214  if ( tnse ) {
1215  q.resize(tnse);
1216  q.zero();
1217  }
1218 
1221  this->masterMapQQProduct();
1222  } else {
1223  // pack QQ products
1226  MPI_Barrier(MPI_COMM_WORLD);
1227  }
1228 
1229  /*******************************************/
1230  /*******************************************/
1231  /* sestaveni matice L^T.L a jeji inverze */
1232  /*******************************************/
1233  /*******************************************/
1234 
1235  if ( ( rank == 0 ) && ( tnse != 0 ) ) {
1236  FloatMatrix l2;
1237  // compute l1, the inverse of (l^Tl)
1238  l2.beTProductOf(l, l);
1239  l1.beInverseOf(l2);
1240  }
1241 
1242  /****************************************************************************/
1243  /****************************************************************************/
1244  /* vypocet pocatecni aproximace neznamych do modifikovane met. sdr. grad. */
1245  /****************************************************************************/
1246  /****************************************************************************/
1247 
1248  /****************************/
1249  /* alokace nekterych poli */
1250  /****************************/
1251  /* vektor smeru na jedne podoblasti */
1252  dd.resize(neq);
1253  /* pomocny vektor na jedne podoblasti */
1254  pp.resize(neq);
1255 
1256 
1257  if ( rank == 0 ) {
1258  /* vektor smeru */
1260  /* pomocny vektor */
1262  /* vektor gradientu */
1264  /* vektor neznamych */
1266 
1267  /* soucin (L^T.L)^{-1}.q */
1268  if ( tnse ) {
1269  FloatArray help(tnse);
1270  help.beProductOf(l1, q);
1271  w.beProductOf(l, help);
1272  } else {
1273  w.zero();
1274  }
1275 
1276  // send aproximation of solution to slaves
1279  this->masterMapSolution();
1280  } else {
1281  // receive send aproximation of solution
1283  while ( !processCommunicator.receiveCompleted() ) {
1284  ;
1285  }
1286 
1288  }
1289 
1290  MPI_Barrier(MPI_COMM_WORLD);
1291 
1292 #ifdef __VERBOSE_PARALLEL
1293  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Solution Approx Barrier finished\n",
1294  rank, "FETISolver :: solveYourselfAt");
1295 #endif
1296 
1297  dd.subtract(partitionLoad);
1298  partitionStiffness->ldl_feti_sky(pp, dd, nse, limit, se);
1299 
1300  if ( rank == 0 ) {
1301  // receive contributions (to resudals) from slaves
1302 
1305  this->masterMapResiduals();
1306  } else {
1307  // send contributions (to resudals) to master
1308 #ifdef __VERBOSE_PARALLEL
1309  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Residual contribution packing initiated\n",
1310  rank, "FETISolver :: solveYourselfAt");
1311 #endif
1313 #ifdef __VERBOSE_PARALLEL
1314  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Residual contribution send initiated\n",
1315  rank, "FETISolver :: solveYourselfAt");
1316 #endif
1318 
1319  MPI_Barrier(MPI_COMM_WORLD);
1320 #ifdef __VERBOSE_PARALLEL
1321  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Residual contribution Barrier finished\n",
1322  rank, "FETISolver :: solveYourselfAt");
1323 #endif
1324  }
1325 
1326 
1327  /* v tuto chvili je vypocten nulty vektor gradientu */
1328 
1329  /***********************************/
1330  /* vypocet nulteho vektoru smeru */
1331  /***********************************/
1332  if ( rank == 0 ) {
1333  if ( tnse ) {
1334  this->projection(g, l, l1);
1335  }
1336 
1337  d = g;
1338  d.negated();
1339 
1340  /* vypocet nulteho citatele */
1341  nom = g.computeSquaredNorm();
1342 
1343  //#ifdef __VERBOSE_PARALLEL
1344  OOFEM_LOG_DEBUG("\nIteration process\n");
1345  if ( energyNorm_comput_flag ) {
1346  OOFEM_LOG_DEBUG("iteration gradient vector norm energy norm\n=====================================================================\n");
1347  } else {
1348  OOFEM_LOG_DEBUG("iteration gradient vector norm\n================================================\n");
1349  }
1350 
1351  //#endif
1352  }
1353 
1354  /* cyklus vyjadrujici pocet iteraci */
1355  for ( int i = 0; i < ni; i++ ) {
1356  dd.zero();
1357  if ( rank == 0 ) {
1358  /***************************************/
1359  /* vypocet pomocneho vektoru K.d = p */
1360  /***************************************/
1361 
1362  // send direction vector to slaves
1365  this->masterMapDirectionVector();
1366  } else {
1367  // receive direction vector on slaves
1369  while ( !processCommunicator.receiveCompleted() ) {
1370  ;
1371  }
1372 
1374  }
1375 
1376  MPI_Barrier(MPI_COMM_WORLD);
1377 
1378 
1379  pp.zero();
1380  partitionStiffness->ldl_feti_sky(pp, dd, nse, limit, se);
1381 
1382  if ( rank == 0 ) {
1383  p.zero();
1384 
1385  // receive contributions (to resudals) from slaves
1386 
1389  this->masterMapPPVector();
1390  } else {
1391  // pack pp on all partitions
1394  MPI_Barrier(MPI_COMM_WORLD);
1395  }
1396 
1397  if ( rank == 0 ) {
1398  /* calculation of the denominator of the fraction defining Alpha */
1399  denom = d.dotProduct(p);
1400 
1401  if ( fabs(denom) < FETISOLVER_ZERONUM ) {
1402  OOFEM_LOG_RELEVANT("FETISolver::solve : v modifikovane metode sdruzenych gradientu je nulovy jmenovatel u soucinitele alpha\n");
1403  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1404  commBuff.init();
1405  commBuff.write(FETISolverIterationBreak);
1406  commBuff.bcast(0);
1407  break;
1408  }
1409 
1410  /*******************/
1411  /* vypocet alpha */
1412  /*******************/
1413  alpha = nom / denom;
1414 
1415  /**************************************************************/
1416  /* vypocet noveho gradientu g a nove aproximace neznamych x */
1417  /**************************************************************/
1418  for ( int j = 1; j <= masterCommunicator->giveNumberOfDomainEquations(); j++ ) {
1419  w.at(j) += alpha * d.at(j);
1420  g.at(j) += alpha * p.at(j);
1421  }
1422 
1423 
1424 
1425  if ( tnse ) {
1426  this->projection(g, l, l1);
1427  }
1428 
1429  denom = nom;
1430  if ( fabs(denom) < FETISOLVER_ZERONUM ) {
1431  OOFEM_LOG_RELEVANT("FETISolver::solve : v modifikovane metode sdruzenych gradientu je nulovy jmenovatel u soucinitele beta\n");
1432  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1433  commBuff.init();
1434  commBuff.write(FETISolverIterationBreak);
1435  commBuff.bcast(0);
1436  break;
1437  }
1438 
1439  nom = g.computeSquaredNorm();
1440 
1441  if ( nom < err ) {
1442  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1443  commBuff.init();
1444  commBuff.write(FETISolverIterationBreak);
1445  commBuff.bcast(0);
1446  break;
1447  }
1448 
1449 
1450  /******************/
1451  /* vypocet beta */
1452  /******************/
1453  beta = nom / denom;
1454 
1455  /****************************/
1456  /* vypocet noveho smeru d */
1457  /****************************/
1458  for ( int j = 1; j <= masterCommunicator->giveNumberOfDomainEquations(); j++ ) {
1459  d.at(j) = beta * d.at(j) - g.at(j);
1460  }
1461  }
1462 
1463 
1464  if ( rank == 0 ) {
1465  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1466  commBuff.init();
1468  commBuff.bcast(0);
1469  } else {
1470  commBuff.resize( commBuff.givePackSizeOfInt(1) );
1471  commBuff.init();
1472  commBuff.bcast(0);
1473  commBuff.read(masterLoopStatus);
1474  if ( masterLoopStatus == FETISolverIterationBreak ) {
1475 #ifdef __VERBOSE_PARALLEL
1476  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received Loop break signal from master\n",
1477  rank, "FETISolver :: solveYourselfAt");
1478 #endif
1479 
1480  break;
1481  }
1482  }
1483 
1484 
1485  // computation of energy norm
1486  if ( energyNorm_comput_flag ) {
1487  dd.zero();
1488  if ( rank == 0 ) {
1489  // send aproximation of solution to slaves
1492  this->masterMapSolution();
1493  } else {
1494  // receive send aproximation of solution
1496  while ( !processCommunicator.receiveCompleted() ) {
1497  ;
1498  }
1499 
1501  }
1502 
1503  MPI_Barrier(MPI_COMM_WORLD);
1504 
1505  pp.zero();
1506  partitionStiffness->ldl_feti_sky(pp, dd, nse, limit, se);
1507 
1508  if ( rank == 0 ) {
1509  p.zero();
1510 
1511  // receive contributions (to resudals) from slaves
1512 
1515  this->masterMapPPVector();
1516  } else {
1517  // pack pp on all partitions
1520  MPI_Barrier(MPI_COMM_WORLD);
1521  }
1522 
1523  if ( rank == 0 ) {
1524  energyNorm = p.dotProduct(w);
1525  }
1526  }
1527 
1528  // end of energy norm computation
1529 
1530  //#ifdef __VERBOSE_PARALLEL
1531  if ( rank == 0 ) {
1532  if ( energyNorm_comput_flag ) {
1533  OOFEM_LOG_DEBUG("%-9d%15e %15e\n", i, nom, energyNorm);
1534  } else {
1535  OOFEM_LOG_DEBUG("%-9d%15e\n", i, nom);
1536  }
1537  }
1538 #ifdef __VERBOSE_PARALLEL
1539  if ( rank == 0 ) {
1540  OOFEM_LOG_DEBUG("Konec metody sdruzenych gradientu, nite %d, err %e\n", i, nom);
1541  }
1542 #endif
1543 
1544  } // end iterative loop
1545 
1546 
1547  /* prelom (break) */
1548 
1549  /****************************************************/
1550  /* konec modifikovane metody sdruzenych gradientu */
1551  /****************************************************/
1552 
1553  if ( rank == 0 ) {
1554  OOFEM_LOG_INFO("End of iteration, reached norm %15e\n", nom);
1555 #ifdef __VERBOSE_PARALLEL
1556  OOFEM_LOG_DEBUG("\nVysledne Lagrangeovy multiplikatory\n");
1557  for ( int i = 1; i <= masterCommunicator->giveNumberOfDomainEquations(); i++ ) {
1558  OOFEM_LOG_DEBUG( "lambda %4d %f\n", i, w.at(i) );
1559  }
1560 
1561 #endif
1562  }
1563 
1564  dd.zero();
1565  if ( rank == 0 ) {
1566  // send solution to slaves
1569  this->masterMapSolution();
1570  } else {
1571  // receive solution
1573  while ( !processCommunicator.receiveCompleted() ) {
1574  ;
1575  }
1576 
1578  }
1579 
1580  MPI_Barrier(MPI_COMM_WORLD);
1581 
1582 
1583 #ifdef __VERBOSE_PARALLEL
1584  //fprintf(stdout, "\n[process rank %3d]: %-30s: Partition lagrange multipliers dump:\n",
1585  // rank,"FETISolver :: solveYourselfAt");
1586  //dd.printYourself();
1587 #endif
1588 
1589  partitionLoad.subtract(dd);
1590 #ifdef __VERBOSE_PARALLEL
1591  // fprintf(stdout, "\n[process rank %3d]: %-30s: Partition load dump:\n",
1592  // rank,"FETISolver :: solveYourselfAt");
1593  //partitionLoad.printYourself();
1594 #endif
1595  partitionStiffness->ldl_feti_sky(partitionSolution, partitionLoad, nse, limit, se);
1596  pp = partitionSolution;
1597 
1598 #ifdef __VERBOSE_PARALLEL
1599  //fprintf(stdout, "\n[process rank %3d]: %-30s: Partition solution dump:\n",
1600  // rank,"FETISolver :: solveYourselfAt");
1601  //pp.printYourself();
1602 #endif
1603 
1604  if ( rank == 0 ) {
1605  g.zero();
1606  // receive contributions (to resudals) from slaves
1607 
1610 
1611 #ifdef __VERBOSE_PARALLEL
1612  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received residuals from slaves\n",
1613  rank, "FETISolver :: solveYourselfAt");
1614 #endif
1615 
1616  this->masterMapResiduals();
1617  } else {
1618  // send contributions (to resudals) to master
1619 
1620 #ifdef __VERBOSE_PARALLEL
1621  OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Sending residuals to master\n",
1622  rank, "FETISolver :: solveYourselfAt");
1623 #endif
1624 
1627  MPI_Barrier(MPI_COMM_WORLD);
1628  }
1629 
1630 #ifdef __VERBOSE_PARALLEL
1631  if ( rank == 0 ) {
1632  //fprintf(stdout, "\n[process rank %3d]: %-30s: Partition residuals dump:\n",
1633  // rank,"FETISolver :: solveYourselfAt");
1634  //g.printYourself();
1635  }
1636 
1637 #endif
1638 
1639  if ( rank == 0 ) {
1640  if ( tnse ) {
1641  FloatArray help1;
1642  help1.beTProductOf(l, g);
1643  // coefficient of linear combinations
1644  gamma.beProductOf(l1, help1);
1645  } else {
1646  gamma.zero();
1647  }
1648 
1649  // send corresponding gammas to slaves
1652  this->masterMapGammas();
1653  } else {
1654  // receive gammas
1656  while ( !processCommunicator.receiveCompleted() ) {
1657  ;
1658  }
1659 
1661  }
1662 
1663  MPI_Barrier(MPI_COMM_WORLD);
1664 
1665  if ( nse != 0 ) {
1666  FloatArray help;
1667  help.beProductOf(rbm, localGammas);
1668 
1669  partitionSolution.add(help);
1670  }
1671 
1672  return NM_Success;
1673 }
1674 } // end namespace oofem
int packData(T *emodel, int(T::*packFunc)(ProcessCommunicator &))
Pack nodal data to send buff.
Definition: processcomm.h:259
FloatMatrix l
Rigid body motions of all partitions. On master only.
Definition: fetisolver.h:80
The representation of EngngModel default unknown numbering.
void subtract(const FloatArray &src)
Subtracts array src to receiver.
Definition: floatarray.C:258
#define NM_Success
Numerical method exited with success.
Definition: nmstatus.h:47
#define _IFT_FETISolver_maxerr
Definition: fetisolver.h:49
FloatArray d
Definition: fetisolver.h:88
Class and object Domain.
Definition: domain.h:115
FloatArray qq
Definition: fetisolver.h:88
int giveGlobalNumber() const
Definition: dofmanager.h:501
int giveReferencePratition()
Returns reference partition number of receiver.
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition: domain.h:432
Base class for all matrices stored in sparse format.
Definition: sparsemtrx.h:60
int packGammas(ProcessCommunicator &processComm)
Definition: fetisolver.C:1005
int packAllData(T *ptr, int(T::*packFunc)(ProcessCommunicator &))
Pack all problemCommunicators data to their send buffers.
Definition: communicator.h:223
The purpose of DataStream abstract class is to allow to store/restore context to different streams...
Definition: datastream.h:54
int masterUnpackRBM(ProcessCommunicator &processComm)
Definition: fetisolver.C:281
void zero()
Sets all component to zero.
Definition: intarray.C:52
double & at(int i)
Coefficient access function.
Definition: floatarray.h:131
virtual int iRecv(int source, int tag, int count=0)
Starts standard mode, nonblocking receive.
Definition: combuff.h:346
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toSend array to src.
Definition: processcomm.h:378
This base class is an abstraction for all numerical methods solving sparse linear system of equations...
int giveCodeNumber(int partition_num, int dof_num)
Returns code number corresponding to partition number partition_num and to dof_num-th DOF...
ProcessCommunicator processCommunicator
Definition: fetisolver.h:93
int packQQProducts(ProcessCommunicator &processComm)
Definition: fetisolver.C:395
int packResiduals(ProcessCommunicator &processComm)
Definition: fetisolver.C:589
int estimateMaxPackSize(IntArray &, DataStream &, int &)
Definition: fetisolver.C:67
EngngModel * giveEngngModel()
Returns engineering model to which receiver is associated.
Definition: domain.C:433
IntArray se
Indices of singular equations.
Definition: fetisolver.h:82
int unpackGammas(ProcessCommunicator &processComm)
Definition: fetisolver.C:1022
void ldl_feti_sky(FloatArray &x, FloatArray &y, int nse, double limit, IntArray &se)
Solves the singular system of equations, the receiver should be factorized using rbmodes service...
Definition: skyline.C:932
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
Definition: engngm.h:1060
int unpackSolution(ProcessCommunicator &processComm)
Definition: fetisolver.C:495
void negated()
Changes sign of receiver values.
Definition: floatmatrix.C:1605
CommunicationBuffer * giveRecvBuff()
Returns receive buffer of receiver.
Definition: processcomm.h:167
virtual ~FETISolver()
Definition: fetisolver.C:57
IntArray masterCommMap
List of local nodes (at master) participating in communication (list of boundary dof managers)...
Definition: fetisolver.h:98
int giveRank()
Returns corresponding rank of associated partition.
Definition: processcomm.h:207
unsigned long NM_Status
Mask defining NumMetod Status; which can be asked after finishing computation by Numerical Method...
Definition: nmstatus.h:44
#define OOFEM_LOG_DEBUG(...)
Definition: logger.h:128
int giveCompleteLocationArray(int rank, IntArray &locationArray)
Returns code numbers for all DOFs associated with shared partition.
Class implementing an array of integers.
Definition: intarray.h:61
int & at(int i)
Coefficient access function.
Definition: intarray.h:103
#define OOFEM_LOG_RELEVANT(...)
Definition: logger.h:126
Class implementing sparse matrix stored in skyline form.
Definition: skyline.h:68
const IntArray * giveToSendMap()
Returns receiver to send map.
Definition: processcomm.h:223
Domain * domain
Pointer to domain.
Definition: nummet.h:84
FloatArray q
Definition: fetisolver.h:88
int giveSharedPartition(int i)
Returns number of i-th shared partition of receiver.
int unpackPPVector(ProcessCommunicator &processComm)
Definition: fetisolver.C:895
IntArray rbmAddr
Addresses of initial partition contribution to rbm matrix.
Definition: fetisolver.h:84
int masterUnpackQQProduct(ProcessCommunicator &processComm)
Definition: fetisolver.C:410
virtual int testCompletion()
Tests if the operation identified by this->request is complete.
Definition: combuff.h:348
int giveNumberOfRows() const
Returns number of rows of receiver.
Definition: sparsemtrx.h:114
void setUpCommunicationMaps()
Sets up the communication maps.
Definition: fetisolver.C:118
#define OOFEM_LOG_INFO(...)
Definition: logger.h:127
int ni
Max number of iterations.
Definition: fetisolver.h:72
double dotProduct(const FloatArray &x) const
Computes the dot product (or inner product) of receiver and argument.
Definition: floatarray.C:463
Class CommunicationBuffer provides abstraction for communication buffer.
Definition: combuff.h:208
#define _IFT_FETISolver_maxiter
Definition: fetisolver.h:48
int packDirectionVector(ProcessCommunicator &processComm)
Definition: fetisolver.C:729
virtual int givePackSizeOfInt(int count)
Definition: combuff.C:271
FETICommunicator * masterCommunicator
Definition: fetisolver.h:96
#define OOFEM_ERROR(...)
Definition: error.h:61
virtual int resize(int newSize)
Resizes buffer to given size.
Definition: combuff.h:314
double computeSquaredNorm() const
Computes the square of the norm.
Definition: floatarray.C:846
int giveNumberOfDofs()
Returns number of DOFs (with associated equation) of receiver.
int packPPVector(ProcessCommunicator &processComm)
Definition: fetisolver.C:867
void setUpCommunicationMaps(EngngModel *pm)
Service for setting up the communication patterns with other remote processes.
int unpackResiduals(ProcessCommunicator &processComm)
Definition: fetisolver.C:617
virtual int iSend(int dest, int tag)
Starts standard mode, nonblocking send.
Definition: combuff.h:344
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
Definition: processcomm.h:210
int masterMapDirectionVector()
Definition: fetisolver.C:808
virtual int write(bool data)
Writes a bool value.
Definition: combuff.C:265
const IntArray * giveToRecvMap()
Returns receiver to receive map.
Definition: processcomm.h:227
int masterMapQQProduct()
Definition: fetisolver.C:432
virtual void init()
Initializes buffer to empty state.
Definition: combuff.h:316
Class representing process communicator for engineering model.
Definition: processcomm.h:176
void beProductOf(const FloatMatrix &aMatrix, const FloatArray &anArray)
Receiver becomes the result of the product of aMatrix and anArray.
Definition: floatarray.C:676
#define _IFT_FETISolver_energynormflag
Definition: fetisolver.h:51
FloatMatrix rbm
Rigid body motions.
Definition: fetisolver.h:78
void beTProductOf(const FloatMatrix &aMatrix, const FloatArray &anArray)
Receiver becomes the result of the product of aMatrix^T and anArray.
Definition: floatarray.C:708
virtual int read(int *dest, int n)
Reads count integer values into array pointed by data.
Definition: combuff.h:333
double at(int i, int j) const
Coefficient access function.
Definition: floatmatrix.h:176
void resize(int n)
Checks size of receiver towards requested bounds.
Definition: intarray.C:124
double limit
Linear dep./indep. trigger.
Definition: fetisolver.h:76
virtual IRResultType initializeFrom(InputRecord *ir)
Definition: fetisolver.C:93
int giveNumberOfSharedPartitions()
Returns number of partitions sharing receiver.
void rbmodes(FloatMatrix &r, int &nse, IntArray &se, double limit, int tc)
Splits the receiver to LDLT form, and computes the rigid body motions.
Definition: skyline.C:785
FloatArray p
Definition: fetisolver.h:88
Initializes the variable VERBOSE, in order to get a few intermediate messages on screen: beginning an...
int initSend(int tag)
Initialize the send data exchange with associate problem.
Definition: processcomm.C:74
virtual NM_Status solve(SparseMtrx &A, FloatArray &b, FloatArray &x)
Solves the given linear system by LDL^T factorization.
Definition: fetisolver.C:1052
Class representing vector of real numbers.
Definition: floatarray.h:82
int unpackAllData(T *ptr, int(T::*unpackFunc)(ProcessCommunicator &))
Unpack all problemCommuncators data from recv buffers.
Definition: communicator.h:262
int masterMapSolution()
Definition: fetisolver.C:529
#define FETISOLVER_MAX_RBM
Definition: fetisolver.h:59
int energyNorm_comput_flag
Flag indicating computation of energy norm.
Definition: fetisolver.h:100
int unpackDirectionVector(ProcessCommunicator &processComm)
Definition: fetisolver.C:779
Implementation of matrix containing floating point numbers.
Definition: floatmatrix.h:94
FloatArray dd
Definition: fetisolver.h:88
FloatArray g
Definition: fetisolver.h:88
IRResultType
Type defining the return values of InputRecord reading operations.
Definition: irresulttype.h:47
int packSolution(ProcessCommunicator &processComm)
Definition: fetisolver.C:446
virtual int givePackSizeOfDouble(int count)=0
FETISolver(Domain *d, EngngModel *m)
Definition: fetisolver.C:49
void giveCompleteLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s) const
Returns full location array of receiver containing equation numbers of all dofs of receiver...
Definition: dofmanager.C:229
void resize(int rows, int cols)
Checks size of receiver towards requested bounds.
Definition: floatmatrix.C:1358
Class representing communicator for FETI solver.
Class representing the general Input Record.
Definition: inputrecord.h:101
FloatArray pp
Definition: fetisolver.h:88
CommunicationBuffer * giveSendBuff()
Returns send buffer of receiver.
Definition: processcomm.h:163
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
Sets receiver toRecv array to src.
Definition: processcomm.h:388
void zero()
Zeroes all coefficients of receiver.
Definition: floatarray.C:658
REGISTER_SparseLinSolver(IMLSolver, ST_IML)
Definition: imlsolver.C:56
void beTProductOf(const FloatMatrix &a, const FloatMatrix &b)
Assigns to the receiver product of .
Definition: floatmatrix.C:367
virtual int bcast(int root)
Initializes broadcast over collaborating processes.
Definition: combuff.h:354
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1)
Definition: engngm.h:1058
int masterMapResiduals()
Definition: fetisolver.C:666
virtual int write(const int *src, int n)
Writes count integer values from array pointed by data.
Definition: combuff.h:321
int masterMapPPVector()
Definition: fetisolver.C:944
FETIBoundaryDofManager * giveDofManager(int i)
Returns reference to i-th boundary dof manager.
#define FETISOLVER_ZERONUM
computer zero
Definition: fetisolver.h:61
void zero()
Zeroes all coefficient of receiver.
Definition: floatmatrix.C:1326
int initSend(int tag)
Initializes data send exchange with all problems.
Definition: communicator.C:128
int packRBM(ProcessCommunicator &processComm)
Definition: fetisolver.C:249
The Communicator and corresponding buffers (represented by this class) are separated in order to allo...
Definition: communicator.h:60
Abstract base class representing the "problem" under consideration.
Definition: engngm.h:181
FloatArray localGammas
Definition: fetisolver.h:88
#define IR_GIVE_OPTIONAL_FIELD(__ir, __value, __id)
Macro facilitating the use of input record reading methods.
Definition: inputrecord.h:78
int unpackData(T *emodel, int(T::*unpackFunc)(ProcessCommunicator &))
Unpack nodal data from recv buff.
Definition: processcomm.h:292
int giveSize() const
Definition: intarray.h:203
int giveSize() const
Returns the size of receiver.
Definition: floatarray.h:218
the oofem namespace is to define a context or scope in which all oofem names are defined.
virtual int read(bool &data)
Reads a bool value from data.
Definition: combuff.C:257
#define IR_GIVE_FIELD(__ir, __value, __id)
Macro facilitating the use of input record reading methods.
Definition: inputrecord.h:69
Abstract class Dof represents Degree Of Freedom in finite element mesh.
Definition: dof.h:93
DofManager * giveDofManager(int n)
Service for accessing particular domain dof manager.
Definition: domain.C:314
void beInverseOf(const FloatMatrix &src)
Modifies receiver to become inverse of given parameter.
Definition: floatmatrix.C:835
void negated()
Switches the sign of every coefficient of receiver.
Definition: floatarray.C:739
EngngModel * giveEngngModel()
Definition: nummet.h:99
double err
Max allowed error.
Definition: fetisolver.h:74
IntArray * giveMasterCommMapPtr()
Returns pointer to master comm map stored in receiver.
EngngModel * engngModel
Pointer to engineering model.
Definition: nummet.h:86
int initReceive(int tag)
Initializes data receive exchange with all problems.
Definition: communicator.C:139
#define _IFT_FETISolver_limit
Definition: fetisolver.h:50
DofManager is shared by neighboring partitions, it is necessary to sum contributions from all contrib...
Definition: dofmanager.h:82
dofManagerParallelMode giveParallelMode() const
Return dofManagerParallelMode of receiver.
Definition: dofmanager.h:512
void projection(FloatArray &v, FloatMatrix &l, FloatMatrix &l1)
Definition: fetisolver.C:221
void add(const FloatArray &src)
Adds array src to receiver.
Definition: floatarray.C:156
int initReceive(int tag)
Initialize the receive data exchange with associate problem.
Definition: processcomm.C:89
FloatArray w
Primary unknowns.
Definition: fetisolver.h:86
CommunicatorBuff * commBuff
Common Communicator buffer.
Definition: fetisolver.h:95
void resize(int s)
Resizes receiver towards requested size.
Definition: floatarray.C:631
FloatArray gamma
Definition: fetisolver.h:88

This page is part of the OOFEM documentation. Copyright (c) 2011 Borek Patzak
Project e-mail: info@oofem.org
Generated at Tue Jan 2 2018 20:07:28 for OOFEM by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2011