OOFEM 3.0
Loading...
Searching...
No Matches
nonlocalmatwtp.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
35#include "nonlocalmatwtp.h"
36#include "nonlocalmaterialext.h"
37#include "element.h"
38#include "dofmanager.h"
39#include "engngm.h"
40#include "gausspoint.h"
41#include "material.h"
42#include "communicator.h"
43#include "datastream.h"
45#include "classfactory.h"
46
47#include <set>
48
49namespace oofem {
50#define NonlocalMaterialWTP_DEBUG_PRINT 0
51
52/*
53 * Returns array storing nonlocal dependency
54 * (in terms of global element numbers) for given element
55 */
56void
57NonlocalMaterialWTP :: giveElementNonlocalDepArry(IntArray &answer, Domain *d, int num)
58{
59 std :: set< int >relems;
60 std :: set< int > :: const_iterator relemsIter;
61 Element *ielem = d->giveElement(num);
62
64 relems.clear();
65 // loop over element IRules and their IPs to retrieve remote (nonlocal) elements
66 // store their global numbers in the relems set (to avoid redundancy)
67 // and then keep them in nonlocTables array.
68 ielem->ipEvaluator(this, & NonlocalMaterialWTP :: giveNonlocalDepArryElementPlugin, relems);
69
70 // convert relems set into an int array
71 // and store it
72 answer.resize( relems.size() );
73 int _i = 1;
74 for ( int relem: relems ) {
75 answer.at(_i++) = relem;
76 }
77 } else {
78 answer.clear();
79 }
80}
81
82
83void
84NonlocalMaterialWTP :: giveNonlocalDepArryElementPlugin(GaussPoint *gp, std :: set< int > &s)
85{
86 int remoteElemNum;
87
91 if ( interface ) {
92 auto lir = interface->giveIntegrationDomainList();
93
94 for ( auto &intdom: *lir ) {
95 remoteElemNum = ( intdom.nearGp )->giveElement()->giveGlobalNumber();
96 s.insert(remoteElemNum);
97 }
98 }
99}
100
101
102/*
103 * prepares the communication maps for remote elements
104 * should be called immediately after load balancing,
105 * before any work transfer.
106 *
107 */
108void
109NonlocalMaterialWTP :: init(Domain *domain)
110{
111 int ie, gie, nelem = domain->giveNumberOfElements();
112 EngngModel *emodel = domain->giveEngngModel();
113 Element *elem;
114 int nproc = emodel->giveNumberOfProcesses();
115 int myrank = emodel->giveRank();
116 CommunicatorBuff cb(nproc, CBT_dynamic);
117 Communicator com(emodel, &cb, myrank, nproc, CommMode_Dynamic);
118 this->nonlocElementDependencyMap.clear();
119
120 // build nonlocal element dependency array for each element
121 for ( ie = 1; ie <= nelem; ie++ ) {
122 elem = domain->giveElement(ie);
123 if ( ( elem->giveParallelMode() == Element_local ) ) {
124 gie = elem->giveGlobalNumber();
126 }
127 }
128
129 /* send and receive nonlocElementDependencyArry of migrating elements to remote partition */
130 com.packAllData(this, domain, & NonlocalMaterialWTP :: packMigratingElementDependencies);
132 com.unpackAllData(this, domain, & NonlocalMaterialWTP :: unpackMigratingElementDependencies);
133 com.finishExchange();
134}
135
136
137
138/*
139 * should be called after basic local migration is finalized,
140 * when all local elements are already available
141 */
142void
143NonlocalMaterialWTP :: migrate()
144{
145 Domain *domain = this->lb->giveDomain();
146 EngngModel *emodel = domain->giveEngngModel();
147 int nproc = emodel->giveNumberOfProcesses();
148 int myrank = emodel->giveRank();
149 CommunicatorBuff cb(nproc, CBT_dynamic);
150 Communicator com(emodel, &cb, myrank, nproc, CommMode_Dynamic);
151 StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
152
153 /*
154 * build domain nonlocal element dependency list. Then exclude local elements - what remains are unsatisfied
155 * remote dependencies that have to be broadcasted and received from partitions owning relevant elements
156 */
157 int _locsize, i, _i, ie, _size, _globnum, result, nelems = domain->giveNumberOfElements();
158 int _globsize, _val;
159 Element *elem;
160 std :: set< int >domainElementDepSet;
161 // loop over each element dep list to assemble domain list
162 for ( ie = 1; ie <= nelems; ie++ ) {
163 elem = domain->giveElement(ie);
164 if ( ( elem->giveParallelMode() == Element_local ) ) {
165 _globnum = elem->giveGlobalNumber();
166 IntArray &iedep = nonlocElementDependencyMap [ _globnum ];
167 _size = iedep.giveSize();
168 for ( _i = 1; _i <= _size; _i++ ) {
169 domainElementDepSet.insert( iedep.at(_i) );
170 }
171
172#if NonlocalMaterialWTP_DEBUG_PRINT
173 fprintf(stderr, "[%d] element %d dependency:", myrank, _globnum);
174 for ( _i = 1; _i <= _size; _i++ ) {
175 fprintf( stderr, "%d ", iedep.at(_i) );
176 }
177
178 fprintf(stderr, "\n");
179#endif
180 }
181 }
182
183#if NonlocalMaterialWTP_DEBUG_PRINT
184 fprintf(stderr, "[%d] nonlocal domain dependency:", myrank);
185 for ( int eldep: domainElementDepSet ) {
186 fprintf(stderr, "%d ", eldep);
187 }
188
189 fprintf(stderr, "\n");
190#endif
191
192 // now exclude local elements (local dependency is always satisfied)
193 for ( _i = 1; _i <= nelems; _i++ ) {
194 elem = domain->giveElement(_i);
195 if ( elem->giveParallelMode() == Element_local ) {
196 domainElementDepSet.erase( elem->giveGlobalNumber() );
197 }
198 }
199
200#if NonlocalMaterialWTP_DEBUG_PRINT
201 fprintf(stderr, "[%d] remote elem wish list:", myrank);
202 for ( int eldep: domainElementDepSet ) {
203 fprintf(stderr, "%d ", eldep);
204 }
205
206 fprintf(stderr, "\n");
207#endif
208
209 // broadcast remaining elements (unsatisfied domain nonlocal dependency) to remaining partitions
210 _locsize = domainElementDepSet.size() + 1;
211 result = MPI_Allreduce(& _locsize, & _globsize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
212 if ( result != MPI_SUCCESS ) {
213 OOFEM_ERROR("MPI_Allreduce to determine broadcast buffer size failed");
214 }
215
216 commBuff.resize( commBuff.givePackSizeOfInt(_globsize) );
217 // remote domain wish list
218 std :: set< int >remoteWishSet;
219
220 toSendList.resize(nproc);
221 for ( i = 0; i < nproc; i++ ) { // loop over partitions
222 commBuff.init();
223 toSendList [ i ].clear();
224 if ( i == myrank ) {
225 // current domain has to send its receive wish list to all domains
226 commBuff.write(_locsize);
227 for ( int eldep: domainElementDepSet ) {
228 commBuff.write(eldep);
229 }
230
231 result = commBuff.bcast(i);
232 } else {
233 // unpack remote domain wish list
234 remoteWishSet.clear();
235 result = commBuff.bcast(i);
236 // unpack size
237 commBuff.read(_size);
238 for ( _i = 1; _i < _size; _i++ ) {
239 commBuff.read(_val);
240 remoteWishSet.insert(_val);
241 }
242
243 // determine which local elements are to be sent to remotepartition
244 for ( _i = 1; _i <= nelems; _i++ ) {
245 elem = domain->giveElement(_i);
246 if ( elem->giveParallelMode() == Element_local ) {
247 if ( remoteWishSet.find( elem->giveGlobalNumber() ) != remoteWishSet.end() ) {
248 // store local element number
249 toSendList [ i ].push_back(_i);
250 }
251 }
252 }
253 }
254 } // end loop over partitions broadcast
255
256#if NonlocalMaterialWTP_DEBUG_PRINT
257 for ( i = 0; i < nproc; i++ ) { // loop over partitions
258 // print some info
259 fprintf(stderr, "[%d] elements scheduled for mirroring at [%d]:",
260 myrank, i);
261 for ( int elnum: toSendList [ i ] ) {
262 fprintf( stderr, "%d[%d] ", elnum, domain->giveElement(elnum)->giveGlobalNumber() );
263 }
264
265 fprintf(stderr, "\n");
266 }
267
268#endif
269
270
271
272
273
274 com.packAllData(this, domain, & NonlocalMaterialWTP :: packRemoteElements);
276 com.unpackAllData(this, domain, & NonlocalMaterialWTP :: unpackRemoteElements);
277 com.finishExchange();
278
279 domain->commitTransactions( domain->giveTransactionManager() );
280
281#ifdef __VERBOSE_PARALLEL
282 VERBOSEPARALLEL_PRINT("NonlocalMaterialWTP::migrate", "Finished migrating remote elements", myrank);
283#endif
284}
285
286
287void
288NonlocalMaterialWTP :: update()
289{
290 /* Now the question is how to use nonlocElementDependencyMap, which is available for
291 * each element, to fastly reinitialize nonlocal integration tables.
292 *
293 * if not needed, should be deleted at the end of migrate method, to free memory
294 */
296 // delete element dep arrays
298}
299
300
301int NonlocalMaterialWTP :: packMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
302{
303 int myrank = d->giveEngngModel()->giveRank();
304 int iproc = pc.giveRank();
305
306 if ( iproc == myrank ) {
307 return 1; // skip local partition
308 }
309
310 // query process communicator to use
312
313 int ielem, nelem = d->giveNumberOfElements();
314 int _globnum;
315 Element *elem;
316
317 for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
318 elem = d->giveElement(ielem);
319 if ( ( elem->giveParallelMode() == Element_local ) &&
320 ( lb->giveElementPartition(ielem) == iproc ) ) {
321 // pack local element (node numbers shuld be global ones!!!)
322 // pack type
323 _globnum = elem->giveGlobalNumber();
324 pcbuff->write(_globnum);
325 nonlocElementDependencyMap [ _globnum ].storeYourself(*pcbuff);
326 }
327 } // end loop over elements
328
329 // pack end-of-element-record
331
332 return 1;
333}
334
335int NonlocalMaterialWTP :: unpackMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
336{
337 int myrank = d->giveEngngModel()->giveRank();
338 int iproc = pc.giveRank();
339 int _globnum;
340
341 if ( iproc == myrank ) {
342 return 1; // skip local partition
343 }
344
345 // query process communicator to use
347
348 // unpack element data
349 do {
350 pcbuff->read(_globnum);
351 if ( _globnum == NonlocalMaterialWTP_END_DATA ) {
352 break;
353 }
354
355 nonlocElementDependencyMap [ _globnum ].restoreYourself(*pcbuff);
356 } while ( 1 );
357
358 return 1;
359}
360
361int NonlocalMaterialWTP :: packRemoteElements(Domain *d, ProcessCommunicator &pc)
362{
363 int myrank = d->giveEngngModel()->giveRank();
364 int iproc = pc.giveRank();
365 int nnodes, inode;
366 DofManager *node, *dofman;
367 Element *elem;
368
369 if ( iproc == myrank ) {
370 return 1; // skip local partition
371 }
372
373 // query process communicator to use
375
376 // here we have to pack also nodes that are shared by packed elements !!!
377 // assemble set of nodes needed by those elements
378 // these have to be send (except those that are shared)
379 std :: set< int >nodesToSend;
380 for ( int ie: toSendList [ iproc ] ) {
381 //ie = d->elementGlobal2Local(gie);
382 elem = d->giveElement(ie);
383 nnodes = elem->giveNumberOfDofManagers();
384 for ( int i = 1; i <= nnodes; i++ ) {
385 node = elem->giveDofManager(i);
386 if ( ( node->giveParallelMode() == DofManager_local ) ||
387 ( node->isShared() && !node->givePartitionList()->contains(iproc) ) ) {
388 nodesToSend.insert( node->giveGlobalNumber() );
389 }
390 }
391 }
392
393 // pack nodes that become null nodes on remote partition
394 for ( int in: nodesToSend ) {
395 inode = d->dofmanGlobal2Local(in);
396 dofman = d->giveDofManager(inode);
397 pcbuff->write( dofman->giveInputRecordName() );
399 }
400
401 pcbuff->write("");
402
403 for ( int ie: toSendList [ iproc ] ) {
404 //ie = d->elementGlobal2Local(gie);
405 elem = d->giveElement(ie);
406 // pack local element (node numbers shuld be global ones!!!)
407 // pack type
408 pcbuff->write( elem->giveInputRecordName() );
410 }
411
412 pcbuff->write("");
413
414
415 return 1;
416}
417
418int NonlocalMaterialWTP :: unpackRemoteElements(Domain *d, ProcessCommunicator &pc)
419{
420 int myrank = d->giveEngngModel()->giveRank();
421 int iproc = pc.giveRank();
422 std :: string _type;
423 IntArray _partitions;
424
425 if ( iproc == myrank ) {
426 return 1; // skip local partition
427 }
428
429 // query process communicator to use
431
432 // unpack dofman data
433 do {
434 pcbuff->read(_type);
435 if ( _type.size() == 0 ) {
436 break;
437 }
438 auto dofman = classFactory.createDofManager(_type.c_str(), 0, d);
439 dofman->restoreContext(*pcbuff, CM_Definition | CM_State | CM_UnknownDictState);
440 dofman->setParallelMode(DofManager_null);
441 if ( d->dofmanGlobal2Local( dofman->giveGlobalNumber() ) ) {
442 // record already exist
443 } else {
444 d->giveTransactionManager()->addDofManTransaction(DomainTransactionManager :: DTT_ADD,
445 dofman->giveGlobalNumber(),
446 dofman.release());
447 }
448 } while ( 1 );
449
450
451 // unpack element data
452 _partitions.resize(1);
453 _partitions.at(1) = iproc;
454 do {
455 pcbuff->read(_type);
456 if ( _type.size() == 0 ) {
457 break;
458 }
459
460 auto elem = classFactory.createElement(_type.c_str(), 0, d).release();
461 elem->restoreContext(*pcbuff, CM_Definition | CM_State);
462 elem->setParallelMode(Element_remote);
463 elem->setPartitionList(_partitions);
464 d->giveTransactionManager()->addElementTransaction(DomainTransactionManager :: DTT_ADD,
465 elem->giveGlobalNumber(), elem);
466 } while ( 1 );
467
468 return 1;
469}
470
471/* Now the question is how to use nonlocElementDependencyMap, which is available for
472 * each element, to quickly reinitialize nonlocal integration tables.
473 *
474 * if not needed, should be deleted at the end of migrate method, to free memory
475 *
476 * first the existing data should be cleared, and new ones initialized
477 * profiting from nonlocElementDependencyMap, that is available for all
478 * local elements.
479 */
480
481void
482NonlocalMaterialWTP :: fastRebuildNonlocalTables()
483{
484 Domain *d = lb->giveDomain();
485 int n, i, globnum, ie, nelem = d->giveNumberOfElements();
486 IntArray localElementDep;
487 Element *elem;
488
489 // build nonlocal element dependency array for each element
490 for ( ie = 1; ie <= nelem; ie++ ) {
491 elem = d->giveElement(ie);
492 if ( ( elem->giveParallelMode() == Element_local ) ) {
493 IntArray localMap;
494 // translate here nonlocElementDependencyMap[_globnum] to corresponding local numbers
495 globnum = elem->giveGlobalNumber();
496 n = nonlocElementDependencyMap [ globnum ].giveSize();
497 localElementDep.resize(n);
498 for ( i = 1; i <= n; i++ ) {
499 localElementDep.at(i) = d->elementGlobal2Local( nonlocElementDependencyMap [ globnum ].at(i) );
500 }
501
502 elem->ipEvaluator(this, & NonlocalMaterialWTP :: fastElementIPNonlocTableUpdater, localElementDep);
503 }
504 }
505}
506
507
508
509void
510NonlocalMaterialWTP :: fastElementIPNonlocTableUpdater(GaussPoint *gp, IntArray &map)
511{
512 Element *elem = gp->giveElement();
514 if ( iface ) {
515 iface->rebuildNonlocalPointTable(gp, & map);
516 }
517}
518} // end namespace oofem
int givePackSizeOfInt(std::size_t count) override
Definition combuff.C:279
int unpackAllData(T *ptr, int(T ::*unpackFunc)(ProcessCommunicator &))
int initExchange(int tag)
int packAllData(T *ptr, int(T ::*packFunc)(ProcessCommunicator &))
int giveGlobalNumber() const
Definition dofmanager.h:515
const char * giveInputRecordName() const override
Definition dofmanager.h:558
bool isShared()
Returns true if receiver is shared.
Definition dofmanager.h:552
void saveContext(DataStream &stream, ContextMode mode) override
Definition dofmanager.C:540
const IntArray * givePartitionList()
Definition dofmanager.h:533
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
int addDofManTransaction(DomainTransactionType, int, DofManager *)
int addElementTransaction(DomainTransactionType, int, Element *)
DomainTransactionManager * giveTransactionManager()
Definition domain.C:1697
int commitTransactions(DomainTransactionManager *tm)
Definition domain.C:1710
int dofmanGlobal2Local(int _globnum)
Definition domain.C:1955
int giveNumberOfElements() const
Returns number of elements in domain.
Definition domain.h:463
DofManager * giveDofManager(int n)
Definition domain.C:317
Element * giveElement(int n)
Definition domain.C:165
EngngModel * giveEngngModel()
Definition domain.C:419
int elementGlobal2Local(int _globnum)
Definition domain.C:1967
int giveGlobalNumber() const
Definition element.h:1129
virtual Material * giveMaterial()
Definition element.C:523
void saveContext(DataStream &stream, ContextMode mode) override
Definition element.C:923
elementParallelMode giveParallelMode() const
Definition element.h:1139
void ipEvaluator(T *src, void(T ::*f)(GaussPoint *gp))
Integration point evaluator, loops over receiver IP's and calls given function (passed as f parameter...
Definition element.h:1259
virtual int giveNumberOfDofManagers() const
Definition element.h:695
DofManager * giveDofManager(int i) const
Definition element.C:553
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
Definition engngm.h:1156
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1).
Definition engngm.h:1154
virtual Interface * giveInterface(InterfaceType t)
Definition femcmpnn.h:181
virtual const char * giveInputRecordName() const =0
IntegrationPointStatus * giveMaterialStatus(IntegrationPointStatusIDType key=IPSID_Default)
Definition gausspoint.h:204
Element * giveElement()
Returns corresponding element to receiver.
Definition gausspoint.h:187
void resize(int n)
Definition intarray.C:73
bool contains(int value) const
Definition intarray.h:292
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
void rebuildNonlocalPointTable(GaussPoint *gp, IntArray *contributingElems) const
void giveElementNonlocalDepArry(IntArray &answer, Domain *d, int num)
std ::map< int, IntArray > nonlocElementDependencyMap
std ::vector< std ::list< int > > toSendList
int read(int *data, std::size_t count) override
Reads count integer values into array pointed by data.
Definition processcomm.h:94
int write(const int *data, std::size_t count) override
Writes count integer values from array pointed by data.
Definition processcomm.h:86
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
int read(int *dest, std::size_t n) override
Reads count integer values into array pointed by data.
Definition combuff.h:347
int write(const int *src, std::size_t n) override
Writes count integer values from array pointed by data.
Definition combuff.h:335
int resize(std::size_t newSize) override
Definition combuff.h:328
int bcast(int root) override
Definition combuff.h:373
#define CM_UnknownDictState
Definition contextmode.h:49
#define CM_State
Definition contextmode.h:46
#define CM_DefinitionGlobal
Definition contextmode.h:48
#define CM_Definition
Definition contextmode.h:47
#define OOFEM_ERROR(...)
Definition error.h:79
@ Element_remote
Element in active domain is only mirror of some remote element.
Definition element.h:89
@ Element_local
Element is local, there are no contributions from other domains to this element.
Definition element.h:88
@ DofManager_local
Definition dofmanager.h:67
@ DofManager_null
Definition dofmanager.h:74
@ CBT_dynamic
ClassFactory & classFactory
@ NonlocalMaterialStatusExtensionInterfaceType
@ NonlocalMaterialExtensionInterfaceType
#define MIGRATE_NONLOCALDEP_TAG
#define NonlocalMaterialWTP_END_DATA
#define MIGRATE_REMOTE_ELEMENTS_TAG
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition parallel.h:50

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011