OOFEM 3.0
Loading...
Searching...
No Matches
problemcomm.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
35#include "problemcomm.h"
36#include "intarray.h"
37#include "error.h"
38#include "engngm.h"
39#include "element.h"
40#include "dofmanager.h"
41
42#ifdef __USE_MPI
43 #include <mpi.h>
44#endif
45
46#define __VERBOSE_PARALLEL
47
48namespace oofem {
49ProblemCommunicator :: ProblemCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size) :
50 Communicator(emodel, b, rank, size)
51{
52 this->initialized = false;
53}
54
55
56ProblemCommunicator :: ~ProblemCommunicator()
57{ }
58
59
60NodeCommunicator :: NodeCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size) :
61 ProblemCommunicator(emodel, b, rank, size)
62{ }
63
64ElementCommunicator :: ElementCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size) :
65 ProblemCommunicator(emodel, b, rank, size)
66{ }
67
68void
69NodeCommunicator :: setUpCommunicationMaps(EngngModel *pm, bool excludeSelfCommFlag, bool forceReinit)
70{
71#ifdef __VERBOSE_PARALLEL
72 VERBOSEPARALLEL_PRINT("NodeCommunicator :: setUpCommunicationMaps", "Setting up communication maps", rank);
73#endif
74
75 if ( !forceReinit && initialized ) {
76 return;
77 }
78
79 Domain *domain = pm->giveDomain(1);
80 int nnodes = domain->giveNumberOfDofManagers();
81 int size = this->processComms.size();
82 //
83 // receive and send maps are same and are assembled locally
84 // using DofManager's partition lists.
85 //
86
87 IntArray domainNodeSendCount(size);
88
89 for ( int i = 1; i <= nnodes; i++ ) {
90 DofManager *dman = domain->giveDofManager(i);
91 const IntArray *partitionList = dman->givePartitionList();
92 if ( dman->giveParallelMode() == DofManager_shared ) {
93 for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
94 if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
95 domainNodeSendCount.at(partitionList->at(j) + 1)++;
96 }
97 }
98 }
99 }
100
101 // build maps simultaneously
102 IntArray pos(size);
103 std :: vector< IntArray >maps( size );
104 for ( int i = 0; i < size; i++ ) {
105 maps [ i ].resize( domainNodeSendCount.at ( i + 1 ) );
106 }
107
108
109 for ( int i = 1; i <= nnodes; i++ ) {
110 DofManager *dman = domain->giveDofManager(i);
111 // if combination node & element cut can occur, test for shared DofMan mode
112 const IntArray *partitionList = dman->givePartitionList();
113 if ( dman->giveParallelMode() == DofManager_shared ) {
114 for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
115 int partition = partitionList->at(j);
116 if ( !( excludeSelfCommFlag && ( this->rank == partition ) ) ) {
117 maps [ partition ].at( ++pos.at(partition + 1) ) = i;
118 }
119 }
120 }
121 }
122
123 // set up domain communicators maps
124 for ( int i = 0; i < size; i++ ) {
127 //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, maps[i]);
128 //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, maps[i]);
129 }
130
131 initialized = true;
132}
133
134
135void
136ElementCommunicator :: setUpCommunicationMaps(EngngModel *pm, bool excludeSelfCommFlag, bool forceReinit)
137{
138#ifdef __VERBOSE_PARALLEL
139 VERBOSEPARALLEL_PRINT("ElementCommunicator :: setUpCommunicationMaps", "Setting up communication maps", rank);
140#endif
141
142 if ( !forceReinit && initialized ) {
143 return;
144 }
145
146 OOFEM_LOG_RELEVANT("[%d] ElementCommunicator :: Setting up communication maps\n", rank);
147
148 Domain *domain = pm->giveDomain(1);
149 int size = this->processComms.size();
150
151 /*
152 * Initially, each partition knows for which nodes a receive
153 * is needed (and can therefore compute easily the recv map),
154 * but does not know for which nodes it should send data to which
155 * partition. Hence, the communication setup is performed by
156 * broadcasting "send request" lists of nodes for which
157 * a partition expects to receive data (ie. of those nodes
158 * which the partition uses, but does not own) to all
159 * collaborating processes. The "send request" list are
160 * converted into send maps.
161 */
162
163 // receive maps can be build locally,
164 // but send maps should be assembled from broadcasted lists (containing
165 // expected receive nodes) of remote partitions.
166
167 // first build local receive map
168 IntArray domainNodeRecvCount(size);
169 int domainRecvListSize = 0, domainRecvListPos = 0;
170 int nelems;
171 int result = 1;
172
173 nelems = domain->giveNumberOfElements();
174 for ( int i = 1; i <= nelems; i++ ) {
175 Element *element = domain->giveElement(i);
176 const IntArray *partitionList = element->givePartitionList();
177 if ( element->giveParallelMode() == Element_remote ) {
178 // size of partitionList should be 1 <== only ine master
179 for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
180 if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
181 domainRecvListSize++;
182 domainNodeRecvCount.at(partitionList->at(j) + 1)++;
183 }
184 }
185 }
186 }
187
188 // build maps simultaneously
189 IntArray pos(size);
190 std :: vector< IntArray >maps( size );
191 for ( int i = 0; i < size; i++ ) {
192 maps [ i ].resize( domainNodeRecvCount.at ( i + 1 ) );
193 }
194
195 // allocate also domain receive list to be broadcasted
196 IntArray domainRecvList(domainRecvListSize);
197
198 if ( domainRecvListSize ) {
199 for ( int i = 1; i <= nelems; i++ ) {
200 // test if element is remote one
201 Element *element = domain->giveElement(i);
202 if ( element->giveParallelMode() == Element_remote ) {
203 domainRecvList.at(++domainRecvListPos) = element->giveGlobalNumber();
204
205 const IntArray *partitionList = element->givePartitionList();
206 // size of partitionList should be 1 <== only ine master
207 for ( int j = 1; j <= partitionList->giveSize(); j++ ) {
208 if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
209 int partition = partitionList->at(j);
210 maps [ partition ].at( ++pos.at(partition + 1) ) = i;
211 }
212 }
213 }
214 }
215 }
216
217 // set up domains recv communicator maps
218 for ( int i = 0; i < size; i++ ) {
220 //this->giveDomainCommunicator(i)->setToRecvArry(this->engngModel, maps [ i ]);
221 }
222
223
224#ifdef __VERBOSE_PARALLEL
225 for ( int i = 0; i < size; i++ ) {
226 fprintf(stderr, "domain %d-%d: domainCommRecvsize is %d\n", rank, i, this->giveProcessCommunicator(i)->giveToRecvMap().giveSize() );
227 printf("domain %d-%d: reecv map:", rank, i);
228 this->giveProcessCommunicator(i)->giveToRecvMap().printYourself();
229 }
230#endif
231
232
233 // to assemble send maps, we must analyze broadcasted remote domain send lists
234 // and we must also broadcast our send list.
235
236#ifdef __VERBOSE_PARALLEL
237 VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Remote Element-cut broadcasting started", rank);
238#endif
239
240
241 StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
242 IntArray remoteDomainRecvList;
243 IntArray toSendMap;
244 int localExpectedSize, globalRecvSize;
245 int sendMapPos, sendMapSize, globalDofManNum;
246
247 // determine the size of receive buffer using AllReduce operation
248#ifndef IBM_MPI_IMPLEMENTATION
249 localExpectedSize = domainRecvList.givePackSize(commBuff);
250#else
251 localExpectedSize = domainRecvList.givePackSize(commBuff) + 1;
252#endif
253
254
255#ifdef __USE_MPI
256 result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
257 if ( result != MPI_SUCCESS ) {
258 OOFEM_ERROR("MPI_Allreduce failed");
259 }
260
261#else
262WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
263#endif
264
265#ifdef __VERBOSE_PARALLEL
266 VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank);
267#endif
268
269
270 // resize to fit largest received message
271 commBuff.resize(globalRecvSize);
272
273 // resize toSend map to max possible size
274 toSendMap.resize(globalRecvSize);
275
276 for ( int i = 0; i < size; i++ ) { // loop over domains
277 commBuff.init();
278 if ( i == rank ) {
279 //current domain has to send its receive list to all domains
280 // broadcast domainRecvList
281
282#ifdef __VERBOSE_PARALLEL
283 VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank);
284#endif
285
286 domainRecvList.storeYourself(commBuff);
287 result = commBuff.bcast(i);
288 if ( result != MPI_SUCCESS ) {
289 OOFEM_ERROR("commBuff broadcast failed");
290 }
291
292#ifdef __VERBOSE_PARALLEL
293 VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank);
294#endif
295 } else {
296#ifdef __VERBOSE_PARALLEL
297 OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
298 rank, "ProblemCommunicator :: unpackAllData", i);
299#endif
300 // receive broadcasted lists
301 result = commBuff.bcast(i);
302 if ( result != MPI_SUCCESS ) {
303 OOFEM_ERROR("commBuff broadcast failed");
304 }
305
306#ifdef __VERBOSE_PARALLEL
307 OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
308 rank, "ProblemCommunicator :: unpackAllData", i);
309#endif
310
311
312 // unpack remote receive list
313 if ( remoteDomainRecvList.restoreYourself(commBuff) != CIO_OK ) {
314 OOFEM_ERROR("unpack remote receive list failed");
315 }
316
317 // find if remote elements are in local partition
318 // if yes add them into send map for correcponding i-th partition
319 sendMapPos = 0;
320 sendMapSize = 0;
321 // determine sendMap size
322 for ( int j = 1; j <= nelems; j++ ) { // loop over local elements
323 Element *element = domain->giveElement(j);
324 if ( element->giveParallelMode() == Element_local ) {
325 globalDofManNum = element->giveGlobalNumber();
326 // test id globalDofManNum is in remoteDomainRecvList
327 if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
328 sendMapSize++;
329 }
330 }
331 }
332
333 toSendMap.resize(sendMapSize);
334
335 for ( int j = 1; j <= nelems; j++ ) { // loop over local elements
336 Element *element = domain->giveElement(j);
337 if ( element->giveParallelMode() == Element_local ) {
338 globalDofManNum = element->giveGlobalNumber();
339 // test id globalDofManNum is in remoteDomainRecvList
340 if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
341 // add this local DofManager number to sed map for active partition
342 toSendMap.at(++sendMapPos) = j;
343 }
344 }
345 } // end loop over local DofManagers
346
347 // set send map to i-th process communicator
349
350#ifdef __VERBOSE_PARALLEL
351 fprintf(stderr, "domain %d-%d: domainCommSendsize is %d\n", rank, i, this->giveProcessCommunicator(i)->giveToSendMap().giveSize() );
352 printf("domain %d-%d: send map:", rank, i);
353 this->giveProcessCommunicator(i)->giveToSendMap().printYourself();
354
355#endif
356
357
358 //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap);
359 } // end receiving broadcasted lists
360
361#ifdef __VERBOSE_PARALLEL
362 VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank);
363#endif
364 } // end loop over domains
365
366 initialized = true;
367}
368
369int
370NodeCommunicator :: setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map)
371{
372 sortCommMap(map, & ProblemCommunicator :: DofManCmp);
373 processComm->setToSendArry(engngModel, map, 0);
374 return 1;
375}
376
377int
378NodeCommunicator :: setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map)
379{
380 sortCommMap(map, & ProblemCommunicator :: DofManCmp);
381 processComm->setToRecvArry(engngModel, map, 0);
382 return 1;
383}
384
385int
386ElementCommunicator :: setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map)
387{
388 sortCommMap(map, & ProblemCommunicator :: ElemCmp);
389 processComm->setToSendArry(engngModel, map, 1);
390 return 1;
391}
392
393int
394ElementCommunicator :: setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map)
395{
396 sortCommMap(map, & ProblemCommunicator :: ElemCmp);
397 processComm->setToRecvArry(engngModel, map, 1);
398 return 1;
399}
400
401
402void
403ProblemCommunicator :: sortCommMap( IntArray &map, int ( ProblemCommunicator :: *cmp )( int, int ) )
404{
405 this->quickSortCommMap(map, 1, map.giveSize(), cmp);
406}
407
408
409void
410ProblemCommunicator :: quickSortCommMap( IntArray &map, int l, int r, int ( ProblemCommunicator :: *cmp )( int, int ) )
411{
412 if ( r <= l ) {
413 return;
414 }
415
416 int i = quickSortPartition(map, l, r, cmp);
417 quickSortCommMap(map, l, i - 1, cmp);
418 quickSortCommMap(map, i + 1, r, cmp);
419}
420
421
422int
423ProblemCommunicator :: quickSortPartition( IntArray &map, int l, int r, int ( ProblemCommunicator :: *cmp )( int, int ) )
424{
425 int i = l - 1, j = r;
426 int v = map.at(r);
427
428 for ( ; ; ) {
429 while ( ( ( this->*cmp )(map.at(++i), v) ) < 0 ) {
430 ;
431 }
432
433 while ( ( ( this->*cmp )( v, map.at(--j) ) ) < 0 ) {
434 if ( j == l ) {
435 break;
436 }
437 }
438
439 if ( i >= j ) {
440 break;
441 }
442
443 std::swap(map.at(i), map.at(j));
444 }
445
446 std::swap(map.at(i), map.at(r));
447 return i;
448}
449
450
451int
452ProblemCommunicator :: DofManCmp(int i, int j)
453{
454 return ( engngModel->giveDomain(1)->giveDofManager(i)->giveGlobalNumber() -
455 engngModel->giveDomain(1)->giveDofManager(j)->giveGlobalNumber() );
456}
457int
458ProblemCommunicator :: ElemCmp(int i, int j)
459{
460 return ( engngModel->giveDomain(1)->giveElement(i)->giveGlobalNumber() -
461 engngModel->giveDomain(1)->giveElement(j)->giveGlobalNumber() );
462}
463} // end namespace oofem
464
int rank
Rank of process.
EngngModel * engngModel
Engineering model.
std::vector< ProcessCommunicator > processComms
Array of process communicators.
ProcessCommunicator * giveProcessCommunicator(int i)
Communicator(EngngModel *emodel, CommunicatorBuff *buff, int rank, int size, CommunicatorMode mode=CommMode_Static)
const IntArray * givePartitionList()
Definition dofmanager.h:533
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
int giveNumberOfElements() const
Returns number of elements in domain.
Definition domain.h:463
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition domain.h:461
DofManager * giveDofManager(int n)
Definition domain.C:317
Element * giveElement(int n)
Definition domain.C:165
int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map) override
int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map) override
int giveGlobalNumber() const
Definition element.h:1129
const IntArray * givePartitionList() const
Definition element.h:1178
elementParallelMode giveParallelMode() const
Definition element.h:1139
Domain * giveDomain(int n)
Definition engngm.C:1936
int givePackSize(DataStream &buff) const
Definition intarray.C:274
void resize(int n)
Definition intarray.C:73
contextIOResultType restoreYourself(DataStream &stream)
Definition intarray.C:254
contextIOResultType storeYourself(DataStream &stream) const
Definition intarray.C:238
int findFirstIndexOf(int value) const
Definition intarray.C:280
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map) override
int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map) override
void sortCommMap(IntArray &map, int(ProblemCommunicator ::*cmp)(int, int))
void quickSortCommMap(IntArray &map, int l, int r, int(ProblemCommunicator ::*cmp)(int, int))
Implementation of quicksort algorithm.
ProblemCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
Definition problemcomm.C:49
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
int resize(std::size_t newSize) override
Definition combuff.h:328
int bcast(int root) override
Definition combuff.h:373
#define OOFEM_ERROR(...)
Definition error.h:79
#define OOFEM_LOG_RELEVANT(...)
Definition logger.h:142
#define OOFEM_LOG_DEBUG(...)
Definition logger.h:144
@ Element_remote
Element in active domain is only mirror of some remote element.
Definition element.h:89
@ Element_local
Element is local, there are no contributions from other domains to this element.
Definition element.h:88
@ DofManager_shared
Definition dofmanager.h:68
int quickSortPartition(IntArray &arry, int l, int r, operation op)
Definition intarray.h:370
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition parallel.h:50

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011