OOFEM 3.0
Loading...
Searching...
No Matches
loadbalancer.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
35#include "loadbalancer.h"
36#include "domain.h"
37#include "engngm.h"
38#include "timer.h"
39#include "mathfem.h"
40#include "timestep.h"
41#include "floatarray.h"
42#include "classfactory.h"
43#include "element.h"
44#include "floatarray.h"
45#include "intarray.h"
46
47#ifdef __MPI_PARALLEL_MODE
48#include "processcomm.h"
49#include "datastream.h"
50#include "communicator.h"
52#include "nonlocalmatwtp.h"
53#endif
54
55namespace oofem {
56 //#define __VERBOSE_PARALLEL
57 //#define LoadBalancer_debug_print
58
59#ifndef __MPI_PARALLEL_MODE
60
61LoadBalancer :: LoadBalancer(Domain *d) : wtpList()
62{
63 domain = d;
64}
65
66void LoadBalancer::migrateLoad(Domain *d) {}
67void LoadBalancer::printStatistics() const {}
68void LoadBalancer::initializeFrom(InputRecord &ir) { }
69void LoadBalancerMonitor::initializeFrom(InputRecord &ir) { }
70
71#else
72
73
74LoadBalancer :: LoadBalancer(Domain *d) : wtpList()
75{
76 domain = d;
77}
78
79
80void
81LoadBalancer :: initializeFrom(InputRecord &ir)
82{
83 IntArray wtp;
85
86 this->initializeWtp(wtp);
87}
88
89void
90LoadBalancer :: initializeWtp(IntArray &wtp)
91{
92 int size = wtp.giveSize();
93
94 if ( size ) {
95 wtpList.clear();
96 wtpList.reserve(size);
97 for ( int iwtp: wtp ) {
98 std :: unique_ptr< WorkTransferPlugin > plugin;
99 if ( iwtp == 1 ) {
100 plugin = std::make_unique<NonlocalMaterialWTP>(this);
101 } else {
102 OOFEM_ERROR("Unknown work transfer plugin type");
103 }
104
105 wtpList.push_back(std :: move( plugin ));
106 }
107 }
108}
109
110
111void
112LoadBalancer :: migrateLoad(Domain *d)
113{
114 // domain->migrateLoad(this);
115 int nproc = d->giveEngngModel()->giveNumberOfProcesses();
116 int myrank = d->giveEngngModel()->giveRank();
117
118 OOFEM_LOG_RELEVANT("[%d] LoadBalancer: migrateLoad: migrating load\n", myrank);
119
120 // initialize work transfer plugins before any transfer
121 for ( auto &wtp: wtpList ) {
122 wtp->init(d);
123 }
124
125 CommunicatorBuff cb(nproc, CBT_dynamic);
126 Communicator com(d->giveEngngModel(), &cb, myrank, nproc, CommMode_Dynamic);
127
128 // move existing dofmans and elements, that will be local on current partition,
129 // into local map
130 com.packAllData(this, d, & LoadBalancer :: packMigratingData);
132
133 // do something in between
136
138 this->deleteRemoteElements(d);
139
140 // receive remote data
141 com.unpackAllData(this, d, & LoadBalancer :: unpackMigratingData);
142 com.finishExchange();
143
145
146
147#ifdef LoadBalancer_debug_print
148 // debug print
149 int nnodes = d->giveNumberOfDofManagers(), nelems = d->giveNumberOfElements();
150 fprintf(stderr, "\n[%d] Nodal Table\n", myrank);
151 for ( int i = 1; i <= nnodes; i++ ) {
153 fprintf( stderr, "[%d]: %5d[%d] local\n", myrank, i, d->giveDofManager(i)->giveGlobalNumber() );
154 } else if ( d->giveDofManager(i)->giveParallelMode() == DofManager_shared ) {
155 fprintf( stderr, "[%d]: %5d[%d] shared ", myrank, i, d->giveDofManager(i)->giveGlobalNumber() );
156 for ( int j = 1; j <= d->giveDofManager(i)->givePartitionList()->giveSize(); j++ ) {
157 fprintf( stderr, "%d ", d->giveDofManager(i)->givePartitionList()->at(j) );
158 }
159
160 fprintf(stderr, "\n");
161 }
162 }
163
164 fprintf(stderr, "\n[%d] Element Table\n", myrank);
165 for ( int i = 1; i <= nelems; i++ ) {
166 fprintf(stderr, "%5d {", i);
167 for ( int j = 1; j <= d->giveElement(i)->giveNumberOfDofManagers(); j++ ) {
168 fprintf( stderr, "%d ", d->giveElement(i)->giveDofManager(j)->giveNumber() );
169 }
170
171 fprintf(stderr, "}\n");
172 }
173
174#endif
175
176 // migrate work transfer plugin data
177 for ( auto &wtp: wtpList ) {
178 wtp->migrate();
179 }
180
181 // update work transfer plugin data
182 for ( auto &wtp: wtpList ) {
183 wtp->update();
184 }
185
186
187#ifdef LoadBalancer_debug_print
188 // debug print
189 nnodes = d->giveNumberOfDofManagers();
190 nelems = d->giveNumberOfElements();
191 fprintf(stderr, "LB Debug print (after wtp update):\n");
192 fprintf(stderr, "\n[%d] Nodal Table\n", myrank);
193 for ( int i = 1; i <= nnodes; i++ ) {
195 fprintf( stderr, "[%d]: %5d[%d] local\n", myrank, i, d->giveDofManager(i)->giveGlobalNumber() );
196 } else if ( d->giveDofManager(i)->giveParallelMode() == DofManager_shared ) {
197 fprintf( stderr, "[%d]: %5d[%d] shared ", myrank, i, d->giveDofManager(i)->giveGlobalNumber() );
198 for ( int j = 1; j <= d->giveDofManager(i)->givePartitionList()->giveSize(); j++ ) {
199 fprintf( stderr, "%d ", d->giveDofManager(i)->givePartitionList()->at(j) );
200 }
201
202 fprintf(stderr, "\n");
203 }
204 }
205
206 fprintf(stderr, "\n[%d] Element Table\n", myrank);
207 for ( int i = 1; i <= nelems; i++ ) {
208 fprintf(stderr, "[%d] %5d [%d]{", myrank, i, d->giveElement(i)->giveGlobalNumber());
209 for ( int j = 1; j <= d->giveElement(i)->giveNumberOfDofManagers(); j++ ) {
210 fprintf( stderr, "%d[%d] ", d->giveElement(i)->giveDofManager(j)->giveNumber(), d->giveElement(i)->giveDofManager(j)->giveGlobalNumber());
211 }
212
213 fprintf(stderr, "}\n");
214 }
215
216#endif
217
218
219
220 // print some local statistics
221 int nelem = domain->giveNumberOfElements();
222 int nnode = domain->giveNumberOfDofManagers();
223 int lnode = 0, lelem = 0;
224
225 for ( int i = 1; i <= nnode; i++ ) {
226 if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_local ) {
227 lnode++;
228 }
229 }
230
231 for ( int i = 1; i <= nelem; i++ ) {
232 if ( domain->giveElement(i)->giveParallelMode() == Element_local ) {
233 lelem++;
234 }
235 }
236
237 OOFEM_LOG_RELEVANT("[%d] LB Statistics: local elem=%d local node=%d\n", myrank, lelem, lnode);
238}
239
240int
241LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
242{
243 int myrank = d->giveEngngModel()->giveRank();
244 int iproc = pc.giveRank();
245
246 // **************************************************
247 // Pack migrating data to remote partition
248 // **************************************************
249
250 // pack dofManagers
251 if ( iproc == myrank ) {
252 return 1; // skip local partition
253 }
254
255 // query process communicator to use
257 // loop over dofManagers
258 int ndofman = d->giveNumberOfDofManagers();
259 for ( int idofman = 1; idofman <= ndofman; idofman++ ) {
260 DofManager *dofman = d->giveDofManager(idofman);
261 // sync data to remote partition
262 // if dofman already present on remote partition then there is no need to sync
263 //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
264 if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
265 ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
266 pcbuff->write( dofman->giveInputRecordName() );
267 pcbuff->write( this->giveDofManState(idofman) );
268 pcbuff->write( dofman->giveGlobalNumber() );
269
270 // pack dofman state (this is the local dofman, not available on remote)
271 /* this is a potential performance leak, sending shared dofman to a partition,
272 * in which is already shared does not require to send context (is already there)
273 * here for simplicity it is always send */
275 // send list of new partitions
276 this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
277 }
278 }
279
280 // pack end-of-dofman-section record
281 pcbuff->write("");
282
283 int nelem = d->giveNumberOfElements(), nsend = 0;
284
285 for ( int ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
286 Element *elem = d->giveElement(ielem);
287 if ( ( elem->giveParallelMode() == Element_local ) &&
288 ( this->giveElementPartition(ielem) == iproc ) ) {
289 // pack local element (node numbers should be global ones!!!)
290 // pack type
291 pcbuff->write( elem->giveInputRecordName() );
292 // nodal numbers should be packed as global !!
294 nsend++;
295 }
296 } // end loop over elements
297
298 // pack end-of-element-record
299 pcbuff->write("");
300
301 OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);
302
303 return 1;
304}
305
306
307int
308LoadBalancer :: unpackMigratingData(Domain *d, ProcessCommunicator &pc)
309{
310 // create temp space for dofManagers and elements
311 // merging should be made by domain ?
312 // maps of new dofmanagers and elements indexed by global number
313
314 // we can put local dofManagers and elements into maps (should be done before unpacking)
315 // int nproc=this->giveEngngModel()->giveNumberOfProcesses();
316 int myrank = d->giveEngngModel()->giveRank();
317 int iproc = pc.giveRank();
318 int _mode, _globnum;
319 bool _newentry;
320 std :: string _type;
321 IntArray _partitions, local_partitions;
322 //LoadBalancer::DofManMode dmode;
323 DofManager *dofman;
325
326 // **************************************************
327 // Unpack migrating data to remote partition
328 // **************************************************
329
330 if ( iproc == myrank ) {
331 return 1; // skip local partition
332 }
333
334 // query process communicator to use
336
337 // unpack dofman data
338 do {
339 if ( !pcbuff->read(_type) ) {
340 OOFEM_ERROR("Internal error in load balancing.");
341 }
342 if ( _type.size() == 0 ) { // Empty string marks end of data
343 break;
344 }
345 pcbuff->read(_mode);
346 switch ( _mode ) {
347 case LoadBalancer :: DM_Remote:
348 // receiving new local dofManager
349 pcbuff->read(_globnum);
350 /*
351 * _newentry = false;
352 * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
353 * // data not available -> create a new one
354 * _newentry = true;
355 * dofman = classFactory.createDofManager(_etype, 0, d);
356 * }
357 */
358 _newentry = true;
359 dofman = classFactory.createDofManager(_type.c_str(), 0, d).release();
360
361 dofman->setGlobalNumber(_globnum);
362 // unpack dofman state (this is the local dofman, not available on remote)
364 // unpack list of new partitions
365 _partitions.restoreYourself(*pcbuff);
366 dofman->setPartitionList(& _partitions);
368 // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
369 if ( _newentry ) {
370 dtm->addDofManTransaction(DomainTransactionManager :: DTT_ADD, _globnum, dofman);
371 }
372
373 //dmanMap[_globnum] = dofman;
374 break;
375
376 case LoadBalancer :: DM_Shared:
377 // receiving new shared dofManager, that was local on sending partition
378 // should be received only once (from partition where was local)
379 pcbuff->read(_globnum);
380 /*
381 * _newentry = false;
382 * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
383 * // data not available -> mode should be SharedUpdate
384 * _newentry = true;
385 * dofman = classFactory.createDofManager(_etype, 0, d);
386 * }
387 */
388 _newentry = true;
389 dofman = classFactory.createDofManager(_type.c_str(), 0, d).release();
390
391 dofman->setGlobalNumber(_globnum);
392 // unpack dofman state (this is the local dofman, not available on remote)
394 // unpack list of new partitions
395 _partitions.restoreYourself(*pcbuff);
396 dofman->setPartitionList(& _partitions);
398#ifdef __VERBOSE_PARALLEL
399 fprintf(stderr, "[%d] received Shared new dofman [%d]\n", myrank, _globnum);
400#endif
401 // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
402 if ( _newentry ) {
403 dtm->addDofManTransaction(DomainTransactionManager :: DTT_ADD, _globnum, dofman);
404 }
405
406 //dmanMap[_globnum] = dofman;
407 break;
408
409 default:
410 OOFEM_ERROR("unexpected dof manager mode (%d)", _mode);
411 }
412 } while ( 1 );
413
414 // unpack element data
415 Element *elem;
416 int nrecv = 0;
417 do {
418 pcbuff->read(_type);
419 if ( _type.size() == 0 ) {
420 break;
421 }
422
423 elem = classFactory.createElement(_type.c_str(), 0, d).release();
424 elem->restoreContext(*pcbuff, CM_Definition | CM_State);
425 elem->initForNewStep();
426 dtm->addElementTransaction(DomainTransactionManager :: DTT_ADD, elem->giveGlobalNumber(), elem);
427 nrecv++;
428 //recvElemList.push_back(elem);
429 } while ( 1 );
430
431 OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: receiving %d migrating elements from %d\n", myrank, nrecv, iproc);
432
433 return 1;
434}
435
436
437/* will delete those dofmanagers, that were sent to remote partition and are locally owned here
438 * so they are no longer necessary (those with state equal to DM_Remote and DM_SharedMerge)
439 * This will update domain DofManager list as well as global dmanMap and physically deletes the remote dofManager
440 */
441void
442LoadBalancer :: deleteRemoteDofManagers(Domain *d)
443{
444 int ndofman = d->giveNumberOfDofManagers();
445 //LoadBalancer* lb = this->giveLoadBalancer();
446 LoadBalancer :: DofManMode dmode;
447 DofManager *dman;
448 int myrank = d->giveEngngModel()->giveRank();
450 // loop over local nodes
451
452 for ( int i = 1; i <= ndofman; i++ ) {
453 dmode = this->giveDofManState(i);
454 if ( dmode == LoadBalancer :: DM_Remote ) {
455 // positive candidate found
456 dtm->addDofManTransaction(DomainTransactionManager :: DTT_Remove, d->giveDofManager(i)->giveGlobalNumber(), NULL);
457 // dmanMap.erase (d->giveDofManager (i)->giveGlobalNumber());
458 //dman = dofManagerList->unlink (i);
459 //delete dman;
460 } else if ( dmode == LoadBalancer :: DM_NULL ) {
461 // positive candidate found; we delete all null dof managers
462 // they will be created by nonlocalmatwtp if necessary.
463 // potentially, they can be reused, but this will make the code too complex
464 dtm->addDofManTransaction(DomainTransactionManager :: DTT_Remove, d->giveDofManager(i)->giveGlobalNumber(), NULL);
465 } else if ( dmode == LoadBalancer :: DM_Shared ) {
466 dman = d->giveDofManager(i);
467 dman->setPartitionList( this->giveDofManPartitions(i) );
469 if ( !dman->givePartitionList()->findFirstIndexOf(myrank) ) {
470 dtm->addDofManTransaction(DomainTransactionManager :: DTT_Remove, d->giveDofManager(i)->giveGlobalNumber(), NULL);
471 //dmanMap.erase (this->giveDofManager (i)->giveGlobalNumber());
472 //dman = dofManagerList->unlink (i);
473 //delete dman;
474 }
475 } else if ( dmode == LoadBalancer :: DM_Local ) {
476 IntArray _empty(0);
477 dman = d->giveDofManager(i);
478 dman->setPartitionList(& _empty);
480 } else {
481 OOFEM_ERROR("unknown dmode encountered");
482 }
483 }
484}
485
486/* will delete those elements, that were sent to remote partition and are locally owned here
487 * so they are no longer necessary (those with state equal to DM_Remote and DM_SharedMerge)
488 * This will update domain DofManager list as well as global dmanMap and physically deletes the remote dofManager
489 */
490void
491LoadBalancer :: deleteRemoteElements(Domain *d)
492{
493 int nelem = d->giveNumberOfElements();
494 int myrank = d->giveEngngModel()->giveRank();
495 //LoadBalancer* lb = this->giveLoadBalancer();
497 //Element* elem;
498
499 // loop over local nodes
500
501 for ( int i = 1; i <= nelem; i++ ) {
502 if ( this->giveElementPartition(i) != myrank ) {
503 // positive candidate found
504 // this->deleteElement (i); // delete and set entry to NULL
505 dtm->addElementTransaction(DomainTransactionManager :: DTT_Remove, d->giveElement(i)->giveGlobalNumber(), NULL);
506 //elem = elementList->unlink (i);
507 //dmanMap.erase (elem->giveGlobalNumber());
508 //delete (elem);
509 } else if ( d->giveElement(i)->giveParallelMode() != Element_local ) {
510 dtm->addElementTransaction(DomainTransactionManager :: DTT_Remove, d->giveElement(i)->giveGlobalNumber(), NULL);
511 }
512 }
513}
514
515
516void
517LoadBalancer :: printStatistics() const
518{
519 EngngModel *emodel = domain->giveEngngModel();
520 int nelem, nnode;
521 int lelem = 0, lnode = 0;
522 int myrank = emodel->giveRank();
523
524 nelem = domain->giveNumberOfElements();
525 nnode = domain->giveNumberOfDofManagers();
526
527 for ( int i = 1; i <= nnode; i++ ) {
528 if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_local ) {
529 lnode++;
530 }
531 }
532
533 for ( int i = 1; i <= nelem; i++ ) {
534 if ( domain->giveElement(i)->giveParallelMode() == Element_local ) {
535 lelem++;
536 }
537 }
538
539 double mySolutionWTime = emodel->giveTimer()->getWtime(EngngModelTimer :: EMTT_AnalysisTimer);
540 double mySolutionUTime = emodel->giveTimer()->getUtime(EngngModelTimer :: EMTT_AnalysisTimer);
541
542 OOFEM_LOG_RELEVANT("[%d] LB Statistics: wt=%.1f ut=%.1f nelem=%d nnode=%d\n", myrank,
543 mySolutionWTime, mySolutionUTime, lelem, lnode);
544}
545
546
547void
548LoadBalancerMonitor :: initializeFrom(InputRecord &ir)
549{
550 int nproc = emodel->giveNumberOfProcesses();
551 int nodeWeightMode = 0;
552
553 nodeWeights.resize(nproc);
554 for ( int i = 0; i < nproc; i++ ) {
555 nodeWeights(i) = 1.0 / nproc;
556 }
557
559 if ( nodeWeightMode == 0 ) { // default, dynamic weights
560 staticNodeWeightFlag = false;
561 } else if ( nodeWeightMode == 1 ) { // equal weights for all nodes
563 } else if ( nodeWeightMode == 2 ) { // user defined static weights
565 if ( nodeWeights.giveSize() != nproc ) {
566 OOFEM_ERROR("nodeWeights size not equal to number of processors");
567 }
568
570 } else {
571 OOFEM_ERROR("unsupported node weight type, using default value");
572 staticNodeWeightFlag = false;
573 }
574}
575
576#endif // end __MPI_PARALLEL_MODE
577} // end namespace oofem
int unpackAllData(T *ptr, int(T ::*unpackFunc)(ProcessCommunicator &))
int initExchange(int tag)
int packAllData(T *ptr, int(T ::*packFunc)(ProcessCommunicator &))
void setPartitionList(const IntArray *_p)
Definition dofmanager.h:535
int giveGlobalNumber() const
Definition dofmanager.h:515
const char * giveInputRecordName() const override
Definition dofmanager.h:558
void saveContext(DataStream &stream, ContextMode mode) override
Definition dofmanager.C:540
void restoreContext(DataStream &stream, ContextMode mode) override
Definition dofmanager.C:595
const IntArray * givePartitionList()
Definition dofmanager.h:533
void setGlobalNumber(int newNumber)
Definition dofmanager.h:521
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
void setParallelMode(dofManagerParallelMode _mode)
Definition dofmanager.h:528
int addDofManTransaction(DomainTransactionType, int, DofManager *)
int addElementTransaction(DomainTransactionType, int, Element *)
DomainTransactionManager * giveTransactionManager()
Definition domain.C:1697
int commitTransactions(DomainTransactionManager *tm)
Definition domain.C:1710
void initGlobalElementMap(bool forceinit=false)
Definition domain.C:1847
void initGlobalDofManMap(bool forceinit=false)
Definition domain.C:1826
int giveNumberOfElements() const
Returns number of elements in domain.
Definition domain.h:463
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition domain.h:461
DofManager * giveDofManager(int n)
Definition domain.C:317
Element * giveElement(int n)
Definition domain.C:165
EngngModel * giveEngngModel()
Definition domain.C:419
int giveGlobalNumber() const
Definition element.h:1129
void saveContext(DataStream &stream, ContextMode mode) override
Definition element.C:923
elementParallelMode giveParallelMode() const
Definition element.h:1139
virtual void initForNewStep()
Definition element.C:879
DofManager * giveDofManager(int i) const
Definition element.C:553
void restoreContext(DataStream &stream, ContextMode mode) override
Definition element.C:999
double getUtime(EngngModelTimerType t)
Returns total user time elapsed.
Definition timer.C:154
double getWtime(EngngModelTimerType t)
Returns elapsed wall clock time.
Definition timer.C:159
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
Definition engngm.h:1156
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1).
Definition engngm.h:1154
EngngModelTimer * giveTimer()
Returns reference to receiver timer (EngngModelTimer).
Definition engngm.h:793
virtual const char * giveInputRecordName() const =0
int giveNumber() const
Definition femcmpnn.h:104
contextIOResultType restoreYourself(DataStream &stream)
Definition intarray.C:254
int findFirstIndexOf(int value) const
Definition intarray.C:280
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
virtual IntArray * giveDofManPartitions(int idofman)=0
Returns the partition list of given dofmanager after load balancing.
void initializeWtp(IntArray &wtp)
virtual DofManMode giveDofManState(int idofman)=0
Returns the label of dofmanager after load balancing.
virtual int giveElementPartition(int ielem)=0
Returns the new partition number assigned to local element after LB.
void deleteRemoteDofManagers(Domain *)
std ::vector< std ::unique_ptr< WorkTransferPlugin > > wtpList
List of work transfer plugins.
void deleteRemoteElements(Domain *)
int read(int *data, std::size_t count) override
Reads count integer values into array pointed by data.
Definition processcomm.h:94
int write(const int *data, std::size_t count) override
Writes count integer values from array pointed by data.
Definition processcomm.h:86
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
#define CM_UnknownDictState
Definition contextmode.h:49
#define CM_State
Definition contextmode.h:46
#define CM_DefinitionGlobal
Definition contextmode.h:48
#define CM_Definition
Definition contextmode.h:47
#define OOFEM_ERROR(...)
Definition error.h:79
#define IR_GIVE_OPTIONAL_FIELD(__ir, __value, __id)
Definition inputrecord.h:75
#define _IFT_LoadBalancer_wtp
#define MIGRATE_LOAD_TAG
#define _IFT_LoadBalancerMonitor_initialnodeweights
#define _IFT_LoadBalancerMonitor_nodeWeightMode
#define OOFEM_LOG_RELEVANT(...)
Definition logger.h:142
@ Element_local
Element is local, there are no contributions from other domains to this element.
Definition element.h:88
@ DofManager_local
Definition dofmanager.h:67
@ DofManager_shared
Definition dofmanager.h:68
@ CBT_dynamic
ClassFactory & classFactory

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011