OOFEM 3.0
Loading...
Searching...
No Matches
parmetisloadbalancer.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
36#include "domain.h"
37#include "engngm.h"
38#include "element.h"
39#include "dofmanager.h"
40#include "connectivitytable.h"
41#include "error.h"
42#include "parallel.h"
43#include "processcomm.h"
44#include "communicator.h"
45#include "classfactory.h"
46
47#include <set>
48#include <stdlib.h>
49
50namespace oofem {
51//#define ParmetisLoadBalancer_DEBUG_PRINT
52
54
55ParmetisLoadBalancer :: ParmetisLoadBalancer(Domain *d) : LoadBalancer(d)
56{
57 elmdist = NULL;
58 tpwgts = NULL;
59}
60
61ParmetisLoadBalancer :: ~ParmetisLoadBalancer()
62{
63 if ( elmdist ) {
64 delete[] elmdist;
65 }
66
67 if ( tpwgts ) {
68 delete[] tpwgts;
69 }
70}
71
72
73void
74ParmetisLoadBalancer :: calculateLoadTransfer()
75{
76 idx_t *eind, *eptr, *xadj, *adjncy, *vwgt, *vsize;
77 idx_t *part;
78 int i, nlocalelems, eind_size, nelem = domain->giveNumberOfElements();
79 int ndofman, idofman, numflag, ncommonnodes, options [ 4 ], ie, nproc;
80 int edgecut, wgtflag, ncon;
81 real_t ubvec [ 1 ], itr;
82 Element *ielem;
83 MPI_Comm communicator = MPI_COMM_WORLD;
84 LoadBalancerMonitor *lbm = domain->giveEngngModel()->giveLoadBalancerMonitor();
85
86 nproc = domain->giveEngngModel()->giveNumberOfProcesses();
87 // init parmetis element numbering
89 // prepare data structures for ParMETIS_V3_Mesh2Dual
90 // count the size of eind array
91 eind_size = 0;
92 nlocalelems = 0;
93 for ( i = 1; i <= nelem; i++ ) {
94 ielem = domain->giveElement(i);
95 if ( ielem->giveParallelMode() == Element_local ) {
96 nlocalelems++;
97 eind_size += ielem->giveNumberOfDofManagers();
98 }
99 }
100
101 // allocate eind and eptr arrays
102 eind = new idx_t [ eind_size ];
103 eptr = new idx_t [ nlocalelems + 1 ];
104 if ( ( eind == NULL ) || ( eptr == NULL ) ) {
105 OOFEM_ERROR("failed to allocate eind and eptr arrays");
106 }
107
108 // fill in the eind and eptr (mesh graph)
109 int eind_pos = 0, eptr_pos = 0;
110 for ( i = 1; i <= nelem; i++ ) {
111 ielem = domain->giveElement(i);
112 if ( ielem->giveParallelMode() == Element_local ) {
113 eptr [ eptr_pos ] = eind_pos;
114 ndofman = ielem->giveNumberOfDofManagers();
115 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
116 eind [ eind_pos++ ] = ielem->giveDofManager(idofman)->giveGlobalNumber() - 1;
117 }
118
119 eptr_pos++;
120 }
121 }
122
123 // last rec
124 eptr [ nlocalelems ] = eind_pos;
125
126 // call ParMETIS_V3_Mesh2Dual to construct dual graph (in parallel)
127 // dual graph: elements are vertices; element edges are graph edges
128 // this is necessary, since cut runs through graph edges
129 numflag = 0;
130 ncommonnodes = 2;
131 ParMETIS_V3_Mesh2Dual(elmdist, eptr, eind, & numflag, & ncommonnodes, & xadj, & adjncy, & communicator);
132
133 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
134 int myrank = domain->giveEngngModel()->giveRank();
135 // DEBUG PRINT
136 fprintf(stderr, "[%d] xadj:", myrank);
137 for ( i = 0; i <= nlocalelems; i++ ) {
138 fprintf(stderr, " %d", xadj [ i ]);
139 }
140
141 fprintf(stderr, "\n[%d] adjncy:", myrank);
142 for ( i = 0; i < xadj [ nlocalelems ]; i++ ) {
143 fprintf(stderr, " %d", adjncy [ i ]);
144 }
145
146 fprintf(stderr, "\n");
147 #endif
148
149
150 // setup imbalance tolerance for each vertex weight - ubvec param
151 ubvec [ 0 ] = 1.05;
152 // setup options array
153 options [ 0 ] = 1; // set to zero for default
154 options [ 1 ] = 1; // get timings
155 options [ 2 ] = 15; // random seed
156 options [ 3 ] = 1; // sub-domains and processors are coupled
157 // set ratio of inter-proc communication compared to data redistribution time
158 itr = 1000.0;
159 // set partition weights by quering load balance monitor
160 const FloatArray &_procweights = lbm->giveProcessorWeights();
161 if ( tpwgts == NULL ) {
162 if ( ( tpwgts = new real_t [ nproc ] ) == NULL ) {
163 OOFEM_ERROR("failed to allocate tpwgts");
164 }
165 }
166
167 for ( i = 0; i < nproc; i++ ) {
168 tpwgts [ i ] = _procweights(i);
169 }
170
171 /*
172 * // log processor weights
173 * OOFEM_LOG_RELEVANT ("[%d] ParmetisLoadBalancer: proc weights: ", myrank);
174 * for (i=0; i<nproc; i++) OOFEM_LOG_RELEVANT ("%4.3f ",tpwgts[i]);
175 * OOFEM_LOG_RELEVANT ("\n");
176 */
177
178 // obtain vertices weights (element weights) representing relative computational cost
179 if ( ( vwgt = new idx_t [ nlocalelems ] ) == NULL ) {
180 OOFEM_ERROR("failed to allocate vwgt");
181 }
182
183 if ( ( vsize = new idx_t [ nlocalelems ] ) == NULL ) {
184 OOFEM_ERROR("failed to allocate vsize");
185 }
186
187 for ( ie = 0, i = 0; i < nelem; i++ ) {
188 ielem = domain->giveElement(i + 1);
189 if ( ielem->giveParallelMode() == Element_local ) {
190 vwgt [ ie ] = ( int ) ( ielem->predictRelativeComputationalCost() * 100.0 );
191 vsize [ ie++ ] = 1; //ielem->predictRelativeRedistributionCost();
192 }
193 }
194
195 wgtflag = 2;
196 numflag = 0;
197 ncon = 1;
198 if ( ( part = new idx_t [ nlocalelems ] ) == NULL ) {
199 OOFEM_ERROR("failed to allocate part");
200 }
201
202 // call ParMETIS balancing routineParMETIS_V3_AdaptiveRepart
203 ParMETIS_V3_AdaptiveRepart(elmdist, xadj, adjncy, vwgt, vsize, NULL, & wgtflag, & numflag, & ncon, & nproc,
204 tpwgts, ubvec, & itr, options, & edgecut, part, & communicator);
205
206 // part contains partition vector for local elements on receiver
207 // we need to map it to domain elements (this is not the same, since
208 // domain may contain not only its local elements but remote elements as well)
209 int loc_num = 0;
210 this->elementPart.resize(nelem);
211 for ( i = 1; i <= nelem; i++ ) {
212 ielem = domain->giveElement(i);
213 if ( ielem->giveParallelMode() == Element_local ) {
214 this->elementPart.at(i) = part [ loc_num++ ];
215 } else {
216 // we can not say anything about remote elements; this information is available on partition
217 // that has its local counterpart
218 this->elementPart.at(i) = -1;
219 }
220 }
221
222 if ( part ) {
223 delete[] part;
224 }
225
226 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
227 // debug
228 fprintf(stderr, "[%d] edgecut: %d elementPart:", myrank, edgecut);
229 for ( i = 1; i <= nelem; i++ ) {
230 fprintf( stderr, " %d", elementPart.at(i) );
231 }
232
233 fprintf(stderr, "\n");
234 #endif
235
236 // delete allocated xadj, adjncy arrays by ParMETIS
237 delete[] eind;
238 delete[] eptr;
239 delete[] vwgt;
240 delete[] vsize;
241 free(xadj);
242 free(adjncy);
243
244 this->labelDofManagers();
245}
246
247void
248ParmetisLoadBalancer :: initGlobalParmetisElementNumbering()
249{
250 int nproc = domain->giveEngngModel()->giveNumberOfProcesses();
251 int myrank = domain->giveEngngModel()->giveRank();
252 IntArray procElementCounts(nproc);
253
254 //if (procElementCounts) delete procElementCounts;
255 if ( elmdist == NULL ) {
256 elmdist = new idx_t [ nproc + 1 ];
257 if ( elmdist == NULL ) {
258 OOFEM_ERROR("failed to allocate elmdist array");
259 }
260 }
261
262 // determine number of local elements for the receiver
263 int i, nlocelem = 0, nelem = domain->giveNumberOfElements();
264 int globnum;
265
266 for ( i = 1; i <= nelem; i++ ) {
267 if ( domain->giveElement(i)->giveParallelMode() == Element_local ) {
268 nlocelem++;
269 }
270 }
271
272 procElementCounts(myrank) = nlocelem;
273
274 MPI_Allgather(& nlocelem, 1, MPI_INT, procElementCounts.givePointer(), 1, MPI_INT, MPI_COMM_WORLD);
275 elmdist [ 0 ] = 0;
276 for ( i = 0; i < nproc; i++ ) {
277 elmdist [ i + 1 ] = elmdist [ i ] + procElementCounts(i);
278 }
279
280 // we need to number elements sequentially on each partition (and we start from rank 0)
281 // compute local offset
282 myGlobNumOffset = 0;
283 for ( i = 0; i < myrank; i++ ) {
284 myGlobNumOffset += procElementCounts(i);
285 }
286
287 /* assemble maps of local numbering
288 * map is necessary since we may have remote elements that are not
289 * part of local domain for load balancing purposes
290 */
291 globnum = myGlobNumOffset + 1;
292 lToGMap.resize(nelem);
293 gToLMap.resize(nelem);
294 for ( i = 1; i <= nelem; i++ ) {
295 if ( domain->giveElement(i)->giveParallelMode() == Element_local ) {
296 lToGMap.at(i) = globnum;
297 gToLMap.at(globnum - myGlobNumOffset) = i;
298 globnum++;
299 } else {
300 lToGMap.at(i) = 0;
301 }
302 }
303}
304
305void
306ParmetisLoadBalancer :: labelDofManagers()
307{
308 int idofman, ndofman = domain->giveNumberOfDofManagers();
309 ConnectivityTable *ct = domain->giveConnectivityTable();
310 const IntArray *dofmanconntable;
311 DofManager *dofman;
312 Element *ielem;
314 std :: set< int, std :: less< int > >__dmanpartitions;
315 int myrank = domain->giveEngngModel()->giveRank();
316 int nproc = domain->giveEngngModel()->giveNumberOfProcesses();
317 int ie, npart;
318
319 // resize label array
320 dofManState.resize(ndofman);
321 dofManState.zero();
322 // resize dof man partitions
323 dofManPartitions.clear();
324 dofManPartitions.resize(ndofman);
325
326 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
327 int _cols = 0;
328 fprintf(stderr, "[%d] DofManager labels:\n", myrank);
329 #endif
330
331 // loop over local dof managers
332 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
333 dofman = domain->giveDofManager(idofman);
334 dmode = dofman->giveParallelMode();
335 if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
336 dofmanconntable = ct->giveDofManConnectivityArray(idofman);
337 __dmanpartitions.clear();
338 for ( ie = 1; ie <= dofmanconntable->giveSize(); ie++ ) {
339 ielem = domain->giveElement( dofmanconntable->at(ie) );
340 // assemble list of partitions sharing idofman dofmanager
341 // set is used to include possibly repeated partition only once
342 if ( ielem->giveParallelMode() == Element_local ) {
343 __dmanpartitions.insert( giveElementPartition( dofmanconntable->at(ie) ) );
344 }
345 }
346
347 npart = __dmanpartitions.size();
348 dofManPartitions [ idofman - 1 ].resize( __dmanpartitions.size() );
349 int i = 1;
350 for ( auto &dm: __dmanpartitions ) {
351 dofManPartitions [ idofman - 1 ].at(i++) = dm;
352 }
353 }
354 }
355
356 // handle master slave links between dofmans (master and slave required on same partition)
358
359
360 /* Exchange new partitions for shared nodes */
361 CommunicatorBuff cb(nproc, CBT_dynamic);
362 Communicator com(domain->giveEngngModel(), &cb, myrank, nproc, CommMode_Dynamic);
363 com.packAllData(this, & ParmetisLoadBalancer :: packSharedDmanPartitions);
365 com.unpackAllData(this, & ParmetisLoadBalancer :: unpackSharedDmanPartitions);
366 com.finishExchange();
367
368 /* label dof managers */
369 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
370 dofman = domain->giveDofManager(idofman);
371 dmode = dofman->giveParallelMode();
372 npart = dofManPartitions [ idofman - 1 ].giveSize();
373 if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
374 // determine its state after balancing -> label
375 dofManState.at(idofman) = this->determineDofManState(idofman, myrank, npart, & dofManPartitions [ idofman - 1 ]);
376 } else {
377 dofManState.at(idofman) = DM_NULL;
378 }
379 }
380
381
382 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
383 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
384 fprintf(stderr, " | %d: ", idofman);
385 if ( dofManState.at(idofman) == DM_NULL ) {
386 fprintf(stderr, "NULL ");
387 } else if ( dofManState.at(idofman) == DM_Local ) {
388 fprintf(stderr, "Local ");
389 } else if ( dofManState.at(idofman) == DM_Shared ) {
390 fprintf(stderr, "Shared");
391 } else if ( dofManState.at(idofman) == DM_Remote ) {
392 fprintf(stderr, "Remote");
393 } else {
394 fprintf(stderr, "Unknown");
395 }
396
397 //else if (dofManState.at(idofman) == DM_SharedExclude)fprintf (stderr, "ShdExc");
398 //else if (dofManState.at(idofman) == DM_SharedNew) fprintf (stderr, "ShdNew");
399 //else if (dofManState.at(idofman) == DM_SharedUpdate) fprintf (stderr, "ShdUpd");
400
401 if ( ( ( ++_cols % 4 ) == 0 ) || ( idofman == ndofman ) ) {
402 fprintf(stderr, "\n");
403 }
404 }
405
406 #endif
407}
408
409int
410ParmetisLoadBalancer :: determineDofManState(int idofman, int myrank, int npart, IntArray *dofManPartitions)
411{
412 dofManagerParallelMode dmode = domain->giveDofManager(idofman)->giveParallelMode();
413 int answer = DM_Local;
414
415 if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
416 if ( ( npart == 1 ) && ( dofManPartitions->at(1) == myrank ) ) {
417 // local remains local
418 answer = DM_Local;
419 } else if ( npart == 1 ) {
420 // local goes to remote partition
421 answer = DM_Remote;
422 } else { // npart > 1
423 // local becomes newly shared
424 answer = DM_Shared;
425 }
426 } else {
427 answer = DM_NULL;
428 }
429
430 /*
431 * if (dmode == DofManager_local) {
432 * if ((npart == 1) && (dofManPartitions->at(1) == myrank)) {
433 * // local remains local
434 * answer = DM_Local;
435 * } else if (npart == 1) {
436 * // local goes to remote partition
437 * answer = DM_Remote;
438 * } else { // npart > 1
439 * // local becomes newly shared
440 * answer = DM_SharedNew;
441 * }
442 * } else if (dmode == DofManager_shared) {
443 * // compare old and new partition list
444 * int i, _same = true, containsMyRank = dofManPartitions->findFirstIndexOf (myrank);
445 * const IntArray* oldpart = domain->giveDofManager(idofman)->givePartitionList();
446 * for (i=1; i<=dofManPartitions->giveSize(); i++) {
447 * if ((dofManPartitions->at(i)!= myrank) &&
448 * (!oldpart->findFirstIndexOf(dofManPartitions->at(i)))) {
449 * _same=false; break;
450 * }
451 * }
452 * if (_same && containsMyRank) {
453 * answer = DM_Shared;
454 * } else if (containsMyRank) {
455 * answer = DM_SharedUpdate;
456 * } else { // !containsMyRank
457 * answer = DM_SharedExclude;
458 * }
459 * } else {
460 * answer = DM_NULL;
461 * }
462 */
463 return answer;
464}
465
466
467LoadBalancer :: DofManMode
468ParmetisLoadBalancer :: giveDofManState(int idofman)
469{
470 return ( LoadBalancer :: DofManMode ) dofManState.at(idofman);
471}
472
473
474IntArray *
475ParmetisLoadBalancer :: giveDofManPartitions(int idofman)
476{
477 return & dofManPartitions [ idofman - 1 ];
478}
479
480int
481ParmetisLoadBalancer :: giveElementPartition(int ielem)
482{
483 return elementPart.at(ielem);
484}
485
486int
487ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
488{
489 int myrank = domain->giveEngngModel()->giveRank();
490 int iproc = pc.giveRank();
491 int ndofman, idofman;
492 DofManager *dofman;
493
494 if ( iproc == myrank ) {
495 return 1; // skip local partition
496 }
497
498 // query process communicator to use
500 // loop over dofManagers and pack shared dofMan data
501 ndofman = domain->giveNumberOfDofManagers();
502 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
503 dofman = domain->giveDofManager(idofman);
504 // test if iproc is in list of existing shared partitions
505 if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
506 ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
507 // send new partitions to remote representation
508 // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
509 pcbuff->write( dofman->giveGlobalNumber() );
510 this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
511 }
512 }
513
514 pcbuff->write((int)PARMETISLB_END_DATA);
515 return 1;
516}
517
518int
519ParmetisLoadBalancer :: unpackSharedDmanPartitions(ProcessCommunicator &pc)
520{
521 int myrank = domain->giveEngngModel()->giveRank();
522 int iproc = pc.giveRank();
523 int _globnum, _locnum;
524 IntArray _partitions;
525
526 if ( iproc == myrank ) {
527 return 1; // skip local partition
528 }
529
530 // query process communicator to use
532 // init domain global2local map
533 domain->initGlobalDofManMap();
534
535 pcbuff->read(_globnum);
536 // unpack dofman data
537 while ( _globnum != PARMETISLB_END_DATA ) {
538 _partitions.restoreYourself(*pcbuff);
539 if ( ( _locnum = domain->dofmanGlobal2Local(_globnum) ) ) {
540 this->addSharedDofmanPartitions(_locnum, _partitions);
541 } else {
542 OOFEM_ERROR("internal error, unknown global dofman %d", _globnum);
543 }
544
545 /*
546 * fprintf (stderr,"[%d] Received shared plist of %d ", myrank, _globnum);
547 * for (int _i=1; _i<=dofManPartitions[_locnum-1].giveSize(); _i++)
548 * fprintf (stderr,"%d ", dofManPartitions[_locnum-1].at(_i));
549 * fprintf (stderr,"\n");
550 */
551 pcbuff->read(_globnum);
552 }
553
554 return 1;
555}
556
557
558void ParmetisLoadBalancer :: addSharedDofmanPartitions(int _locnum, IntArray _partitions)
559{
560 for ( int part: _partitions ) {
561 dofManPartitions [ _locnum - 1 ].insertOnce( part );
562 }
563}
564
565void ParmetisLoadBalancer :: handleMasterSlaveDofManLinks()
566{
567 int idofman, ndofman = domain->giveNumberOfDofManagers();
568 DofManager *dofman;
569 //int myrank = domain->giveEngngModel()->giveRank();
570 int __i, __j, __partition, _master;
571 bool isSlave;
572 IntArray slaveMastersDofMans;
573
574 /*
575 * We assume that in the old partitioning, the master and slave consistency was assured. This means that master is presented
576 * on the same partition as slave. The master can be local (then all slaves are local) or master is shared (then slaves are on
577 * partitions sharing the master).
578 *
579 * If master was local, then its new partitioning can be locally resolved (as all slaves were local).
580 * If the master was shared, the new partitioning of master has to be communicated between old sharing partitions.
581 */
582 // handle master slave links between dofmans (master and slave required on same partition)
583
584 for ( idofman = 1; idofman <= ndofman; idofman++ ) {
585 dofman = domain->giveDofManager(idofman);
586 isSlave = dofman->hasAnySlaveDofs();
587
588 if ( isSlave ) {
589 // ok have a look on its masters
590 dofman->giveMasterDofMans(slaveMastersDofMans);
591 for ( __i = 1; __i <= slaveMastersDofMans.giveSize(); __i++ ) {
592 // loop over all slave masters
593 _master = slaveMastersDofMans.at(__i);
594
595 // now loop over all slave new partitions and annd then to master's partitions
596 for ( __j = 1; __j <= dofManPartitions [ idofman - 1 ].giveSize(); __j++ ) {
597 __partition = dofManPartitions [ idofman - 1 ].at(__j);
598 // add slave partition to master
599 dofManPartitions [ _master - 1 ].insertOnce(__partition);
600 }
601 }
602 }
603 }
604}
605
606} // end namespace oofem
#define REGISTER_LoadBalancer(class)
int unpackAllData(T *ptr, int(T ::*unpackFunc)(ProcessCommunicator &))
int initExchange(int tag)
int packAllData(T *ptr, int(T ::*packFunc)(ProcessCommunicator &))
const IntArray * giveDofManConnectivityArray(int dofman)
int giveGlobalNumber() const
Definition dofmanager.h:515
virtual bool giveMasterDofMans(IntArray &masters)
Definition dofmanager.C:814
const IntArray * givePartitionList()
Definition dofmanager.h:533
virtual bool hasAnySlaveDofs()
Definition dofmanager.C:802
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
virtual double predictRelativeComputationalCost()
Definition element.C:1763
elementParallelMode giveParallelMode() const
Definition element.h:1139
virtual int giveNumberOfDofManagers() const
Definition element.h:695
DofManager * giveDofManager(int i) const
Definition element.C:553
contextIOResultType restoreYourself(DataStream &stream)
Definition intarray.C:254
const int * givePointer() const
Definition intarray.h:345
int findFirstIndexOf(int value) const
Definition intarray.C:280
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
const FloatArray & giveProcessorWeights()
Returns processor weights; the larger weight means more powerful node, sum of weights should equal to...
LoadBalancer(Domain *d)
@ DM_Shared
Shared dofman that remains shared.
@ DM_Local
Local dofman that remains local.
@ DM_NULL
Undefined (undetermined) state, if assigned means internal error.
@ DM_Remote
Local dofman that became remote (became local on remote partition).
int giveElementPartition(int ielem) override
Returns the new partition number assigned to local element after LB.
real_t * tpwgts
Partition weights (user input).
IntArray elementPart
Partition vector of the locally-stored elements.
IntArray gToLMap
Element numbering maps.
IntArray * giveDofManPartitions(int idofman) override
Returns the partition list of given dofmanager after load balancing.
void addSharedDofmanPartitions(int _locnum, IntArray _partitions)
IntArray dofManState
Array of DofManMode(s).
int determineDofManState(int idofman, int myrank, int npart, IntArray *dofManPartitions)
std ::vector< IntArray > dofManPartitions
Array of dof man partitions.
int read(int *data, std::size_t count) override
Reads count integer values into array pointed by data.
Definition processcomm.h:94
int write(const int *data, std::size_t count) override
Writes count integer values from array pointed by data.
Definition processcomm.h:86
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
#define OOFEM_ERROR(...)
Definition error.h:79
@ Element_local
Element is local, there are no contributions from other domains to this element.
Definition element.h:88
dofManagerParallelMode
In parallel mode, this type indicates the mode of DofManager.
Definition dofmanager.h:66
@ DofManager_local
Definition dofmanager.h:67
@ DofManager_shared
Definition dofmanager.h:68
@ CBT_dynamic
#define PARMETISLB_END_DATA
#define SHARED_DOFMAN_PARTITIONS_TAG

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011