OOFEM 3.0
Loading...
Searching...
No Matches
parallelordering.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
35#include "parallelordering.h"
36#include "engngm.h"
37#include "domain.h"
39#include "combuff.h"
40#include "mathfem.h"
41
42namespace oofem {
43bool
44ParallelOrdering :: isLocal(DofManager *dman)
45{
46 int myrank = dman->giveDomain()->giveEngngModel()->giveRank();
47 if ( dman->giveParallelMode() == DofManager_local ) {
48 return true;
49 }
50
51 if ( dman->giveParallelMode() == DofManager_shared ) {
52 // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to
53 // deliver number
54 return myrank <= dman->givePartitionList()->minimum();
55 }
56
57 return false;
58}
59
60bool
61ParallelOrdering :: isShared(DofManager *dman)
62{
63 if ( dman->giveParallelMode() == DofManager_shared ) {
64 return true;
65 } else {
66 return false;
67 }
68}
69
70Natural2GlobalOrdering :: Natural2GlobalOrdering() : ParallelOrdering(), locGlobMap(), globLocMap()
71{
72 l_neqs = g_neqs = 0;
73}
74
75
76void
77Natural2GlobalOrdering :: init(EngngModel *emodel, int di, const UnknownNumberingScheme &n)
78{
79 Domain *d = emodel->giveDomain(di);
80 int ndofman = d->giveNumberOfDofManagers();
81 int myrank = emodel->giveRank();
82 DofManager *dman;
83 // determine number of local eqs + number of those shared DOFs which are numbered by receiver
84 // shared dofman is numbered on partition with lovest rank number
85
86#ifdef __VERBOSE_PARALLEL
87 VERBOSEPARALLEL_PRINT("Natural2GlobalOrdering :: init", "initializing N2G ordering", myrank);
88#endif
89
90 l_neqs = 0;
91 for ( int i = 1; i <= ndofman; i++ ) {
92 dman = d->giveDofManager(i);
93#if 0
94 if ( dman->giveParallelMode() == DofManager_local ) { // count all dofman eqs
95 for ( Dof *dof: *dman ) {
96 if ( dof->isPrimaryDof() ) {
97 if ( dof->giveEquationNumber() ) {
98 l_neqs++;
99 }
100 }
101 }
102 } else if ( dman->giveParallelMode() == DofManager_shared ) {
103 // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to
104 // deliver number
105 IntArray *plist = dman->givePartitionList();
106 int n = plist->giveSize();
107 int minrank = myrank;
108 for ( j = 1; j <= n; j++ ) {
109 minrank = min( minrank, plist->at(j) );
110 }
111 if ( minrank == myrank ) { // count eqs
112 for ( Dof *dof: *dman ) {
113 if ( dof->isPrimaryDof() ) {
114 if ( dof->giveEquationNumber() ) {
115 l_neqs++;
116 }
117 }
118 }
119 }
120 } // end shared dman
121#endif
122 if ( isLocal(dman) ) {
123 for ( auto &dof: *dman ) {
124 if ( dof->isPrimaryDof() ) {
125 if ( dof->giveEquationNumber(n) ) {
126 l_neqs++;
127 }
128 }
129 }
130 }
131 }
132
133 // exchange with other procs the number of eqs numbered on particular procs
134 int *leqs = new int [ emodel->giveNumberOfProcesses() ];
135 MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD);
136 // compute local offset
137 int offset = 0;
138 for ( int j = 0; j < myrank; j++ ) {
139 offset += leqs [ j ];
140 }
141
142 // count global number of eqs
143 g_neqs = 0;
144 for ( int j = 0; j < emodel->giveNumberOfProcesses(); j++ ) {
145 g_neqs += leqs [ j ];
146 }
147
148 // send numbered shared ones
149 locGlobMap.resize( emodel->giveNumberOfDomainEquations(di, n) );
150
151 // determine shared dofs
152 int psize, nproc = emodel->giveNumberOfProcesses();
153 IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc);
154#ifdef __VERBOSE_PARALLEL
155 IntArray nrecToSend(nproc);
156#endif
157 const IntArray *plist;
158 for ( int i = 1; i <= ndofman; i++ ) {
159 // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
160 if ( isShared( d->giveDofManager(i) ) ) {
161 int n = d->giveDofManager(i)->giveNumberOfDofs();
162 plist = d->giveDofManager(i)->givePartitionList();
163 psize = plist->giveSize();
164 int minrank = myrank;
165 for ( int j = 1; j <= psize; j++ ) {
166 minrank = min( minrank, plist->at(j) );
167 }
168
169 if ( minrank == myrank ) { // count to send
170 for ( int j = 1; j <= psize; j++ ) {
171#ifdef __VERBOSE_PARALLEL
172 nrecToSend( plist->at(j) )++;
173#endif
174 sizeToSend( plist->at(j) ) += ( 1 + n ); // ndofs+dofman number
175 }
176 } else {
177 nrecToReceive(minrank)++;
178 sizeToRecv(minrank) += ( 1 + n ); // ndofs+dofman number
179 }
180 }
181 }
182
183#ifdef __VERBOSE_PARALLEL
184 for ( int i = 0; i < nproc; i++ ) {
185 OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n",
186 myrank, nrecToSend(i), nrecToReceive(i), i);
187 }
188
189#endif
190
191
192 std :: map< int, int >globloc; // global->local mapping for shared
193 // number local guys
194 int globeq = offset;
195 for ( int i = 1; i <= ndofman; i++ ) {
196 dman = d->giveDofManager(i);
197 //if (dman->giveParallelMode() == DofManager_shared) {
198 if ( isShared(dman) ) {
199 globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared
200
201 plist = dman->givePartitionList();
202 psize = plist->giveSize();
203 int minrank = myrank;
204 for ( int j = 1; j <= psize; j++ ) {
205 minrank = min( minrank, plist->at(j) );
206 }
207
208 if ( minrank == myrank ) { // local
209 for ( Dof *dof: *dman ) {
210 if ( dof->isPrimaryDof() ) {
211 int eq = dof->giveEquationNumber(n);
212
213 if ( eq ) {
214 locGlobMap.at(eq) = globeq++;
215 }
216 }
217 }
218 }
219
220 //} else if (dman->giveParallelMode() == DofManager_local) {
221 } else {
222 for ( auto &dof: *dman ) {
223 if ( dof->isPrimaryDof() ) {
224 int eq = dof->giveEquationNumber(n);
225
226 if ( eq ) {
227 locGlobMap.at(eq) = globeq++;
228 }
229 }
230 }
231 }
232 }
233
234
235 /*
236 * fprintf (stderr, "[%d] locGlobMap: ", myrank);
237 * for (i=1; i<=locGlobMap.giveSize(); i++)
238 * fprintf (stderr, "%d ",locGlobMap.at(i));
239 */
240
241 // pack data for remote procs
242 CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ];
243 for ( int p = 0; p < nproc; p++ ) {
244 buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
245 buffs [ p ]->resize( buffs [ p ]->givePackSizeOfInt(1) * sizeToSend(p) );
246
247#if 0
248 OOFEM_LOG_INFO( "[%d]Natural2GlobalOrdering :: init: Send buffer[%d] size %d\n",
249 myrank, p, sizeToSend(p) );
250#endif
251 }
252
253
254 for ( int i = 1; i <= ndofman; i++ ) {
255 if ( isShared( d->giveDofManager(i) ) ) {
256 dman = d->giveDofManager(i);
257 plist = dman->givePartitionList();
258 psize = plist->giveSize();
259 int minrank = myrank;
260 for ( int j = 1; j <= psize; j++ ) {
261 minrank = min( minrank, plist->at(j) );
262 }
263
264 if ( minrank == myrank ) { // do send
265 for ( int j = 1; j <= psize; j++ ) {
266 int p = plist->at(j);
267 if ( p == myrank ) {
268 continue;
269 }
270
271#if 0
272 OOFEM_LOG_INFO("[%d]Natural2GlobalOrdering :: init: Sending localShared node %d[%d] to proc %d\n",
273 myrank, i, dman->giveGlobalNumber(), p);
274#endif
275 buffs [ p ]->write( dman->giveGlobalNumber() );
276 for ( Dof *dof: *dman ) {
277 if ( dof->isPrimaryDof() ) {
278 int eq = dof->giveEquationNumber(n);
279
280 if ( eq ) {
281 buffs [ p ]->write( locGlobMap.at(eq) );
282 }
283 }
284 }
285 }
286 }
287 }
288 }
289
290
291 //fprintf (stderr, "[%d] Sending glob nums ...", myrank);
292 // send buffers
293 for ( int p = 0; p < nproc; p++ ) {
294 if ( p != myrank ) {
295 buffs [ p ]->iSend(p, 999);
296 }
297 }
298
299
300#if 0
301 for ( int p = 0; p < nproc; p++ ) {
302 if ( p == myrank ) {
303 continue;
304 }
305 for ( int i = 1; i <= ndofman; i++ ) {
306 //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
307 if ( isShared( d->giveDofManager(i) ) ) {
308 dman = d->giveDofManager(i);
309 plist = dman->givePartitionList();
310 psize = plist->giveSize();
311 int minrank = myrank;
312 for ( j = 1; j <= psize; j++ ) {
313 minrank = min( minrank, plist->at(j) );
314 }
315 if ( minrank == myrank ) { // do send
316 buffs [ p ]->write( dman->giveGlobalNumber() );
317 for ( Dof *dof: *dman ) {
318 if ( dof->isPrimaryDof() ) {
319 buffs [ p ]->write( locGlobMap.at( dof->giveEquationNumber() ) );
320 }
321 }
322 }
323 }
324 }
325 // send buffer
326 buffs [ p ]->iSend(p, 999);
327 }
328#endif
329
330 // receive remote eqs and complete global numbering
331 CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ];
332 for ( int p = 0; p < nproc; p++ ) {
333 rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
334 rbuffs [ p ]->resize( rbuffs [ p ]->givePackSizeOfInt(1) * sizeToRecv(p) );
335#if 0
336 OOFEM_LOG_INFO( "[%d]Natural2GlobalOrdering :: init: Receive buffer[%d] size %d\n",
337 myrank, p, sizeToRecv(p) );
338#endif
339 }
340
341
342 //fprintf (stderr, "[%d] Receiving glob nums ...", myrank);
343 for ( int p = 0; p < nproc; p++ ) {
344 if ( p != myrank ) {
345 rbuffs [ p ]->iRecv(p, 999);
346 }
347 }
348
349
350 IntArray finished(nproc);
351 finished.zero();
352 int fin = 1;
353 finished.at(emodel->giveRank() + 1) = 1;
354 do {
355 for ( int p = 0; p < nproc; p++ ) {
356 if ( finished.at(p + 1) == 0 ) {
357 if ( rbuffs [ p ]->testCompletion() ) {
358 // data are here
359 // unpack them
360 int nite = nrecToReceive(p);
361 int shdm, ldm;
362 for ( int i = 1; i <= nite; i++ ) {
363 rbuffs [ p ]->read(shdm);
364
365#if 0
366 OOFEM_LOG_INFO("[%d]Natural2GlobalOrdering :: init: Received shared node [%d] from proc %d\n",
367 myrank, shdm, p);
368#endif
369 //
370 // find local guy coorecponding to shdm
371 if ( globloc.find(shdm) != globloc.end() ) {
372 ldm = globloc [ shdm ];
373 } else {
374 continue;
375 OOFEM_ERROR("[%d] invalid shared dofman received, globnum %d\n", myrank, shdm);
376 ldm = 0;
377 }
378
379 dman = d->giveDofManager(ldm);
380 for ( Dof *dof: *dman ) {
381 if ( dof->isPrimaryDof() ) {
382 int eq = dof->giveEquationNumber(n);
383
384 if ( eq ) {
385 int val;
386 rbuffs [ p ]->read(val);
387 locGlobMap.at(eq) = val;
388 }
389 }
390 }
391 }
392
393 finished.at(p + 1) = 1;
394 fin++;
395 }
396 }
397 }
398 } while ( fin < nproc );
399
400
401 /*
402 * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank);
403 *
404 * fprintf (stderr, "[%d] locGlobMap:", myrank);
405 * for (i=1; i<=locGlobMap.giveSize(); i++)
406 * fprintf (stderr, "%d ",locGlobMap.at(i));
407 */
408
409#ifdef __VERBOSE_PARALLEL
410 int _eq;
411 char *ptr;
412 char locname[] = "local", shname[] = "shared", unkname[] = "unknown";
413 for ( int i = 1; i <= ndofman; i++ ) {
414 dman = d->giveDofManager(i);
415 if ( dman->giveParallelMode() == DofManager_local ) {
416 ptr = locname;
417 } else if ( dman->giveParallelMode() == DofManager_shared ) {
418 ptr = shname;
419 } else {
420 ptr = unkname;
421 }
422
423 for ( Dof *dof: *dman ) {
424 DofIDItem id = dof->giveDofID();
425 if ( ( _eq = dof->giveEquationNumber(n) ) ) {
426 fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), id, _eq, locGlobMap.at(_eq) );
427 } else {
428 fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), id, _eq, 0);
429 }
430 }
431 }
432
433#endif
434
435
436 // build reverse map
437 int lneq = emodel->giveNumberOfDomainEquations(di, n);
438
439 globLocMap.clear();
440 for ( int i = 1; i <= lneq; i++ ) {
441 globLocMap [ locGlobMap.at(i) ] = i;
442 }
443
444 for ( int p = 0; p < nproc; p++ ) {
445 delete rbuffs [ p ];
446 delete buffs [ p ];
447 }
448
449 delete[] rbuffs;
450 delete[] buffs;
451 delete[] leqs;
452
453 MPI_Barrier(MPI_COMM_WORLD);
454#ifdef __VERBOSE_PARALLEL
455 VERBOSEPARALLEL_PRINT("Natural2GlobalOrdering :: init", "done", myrank);
456#endif
457}
458
459
460int
461Natural2GlobalOrdering :: giveNewEq(int leq)
462{
463 return locGlobMap.at(leq);
464}
465
466int
467Natural2GlobalOrdering :: giveOldEq(int eq)
468{
469 std :: map< int, int > :: iterator i = globLocMap.find(eq);
470 if ( i != globLocMap.end() ) {
471 return i->second;
472 } else {
473 return 0;
474 }
475}
476
477void
478Natural2GlobalOrdering :: map2New(IntArray &answer, const IntArray &src, int baseOffset)
479{
480 int indx, n = src.giveSize();
481 answer.resize(n);
482
483 //for (i=1; i<=n; i++) answer.at(i)=locGlobMap.at(src.at(i))-baseOffset;
484 for ( int i = 1; i <= n; i++ ) {
485 if ( ( indx = src.at(i) ) ) {
486 answer.at(i) = locGlobMap.at(indx) + baseOffset;
487 } else {
488 answer.at(i) = ( -1 ) + baseOffset;
489 }
490 }
491}
492
493
494void
495Natural2GlobalOrdering :: map2Old(IntArray &answer, const IntArray &src, int baseOffset)
496{
497 int n = src.giveSize();
498 int offset = baseOffset - 1;
499 answer.resize(n);
500 for ( int i = 1; i <= n; i++ ) {
501 answer.at(i) = giveOldEq( src.at(i) ) + offset;
502 }
503}
504
505
506
507
508Natural2LocalOrdering :: Natural2LocalOrdering() : ParallelOrdering(), n2l() { }
509
510void
511Natural2LocalOrdering :: init(EngngModel *emodel, int di, const UnknownNumberingScheme &n)
512{
513 Domain *d = emodel->giveDomain(di);
514 int ndofman = d->giveNumberOfDofManagers(), loc_eq = 1;
515
516 // determine number of local eqs + number of those shared DOFs which are numbered by receiver
517 // shared dofman is numbered on partition with lovest rank number
518
519 n2l.resize( emodel->giveNumberOfDomainEquations(di, n) );
520
521 for ( int i = 1; i <= ndofman; i++ ) {
522 DofManager *dman = d->giveDofManager(i);
523 bool lFlag = isLocal(dman);
524 for ( Dof *dof: *dman ) {
525 if ( dof->isPrimaryDof() ) {
526 int n_eq = dof->giveEquationNumber(n);
527
528 if ( n_eq == 0 ) {
529 continue;
530 }
531
532 if ( lFlag ) {
533 n2l.at(n_eq) = loc_eq++;
534 } else {
535 n2l.at(n_eq) = 0;
536 }
537 }
538 }
539 }
540}
541
542int
543Natural2LocalOrdering :: giveNewEq(int leq)
544{
545 return n2l.at(leq);
546}
547
548int
549Natural2LocalOrdering :: giveOldEq(int eq)
550{
551 // not really efficient
552 // it is assumed that queries in oposite directuion take only place
553 // this is here only for completness, but performance is really bad.
554 return ( n2l.findFirstIndexOf(eq) );
555}
556
557void
558Natural2LocalOrdering :: map2New(IntArray &answer, const IntArray &src, int baseOffset)
559{
560 int indx, n = src.giveSize();
561 answer.resize(n);
562 for ( int i = 1; i <= n; i++ ) {
563 if ( ( indx = src.at(i) ) ) {
564 answer.at(i) = n2l.at(indx) - baseOffset;
565 } else {
566 answer.at(i) = 0 - baseOffset;
567 }
568 }
569}
570
571void
572Natural2LocalOrdering :: map2Old(IntArray &answer, const IntArray &src, int baseOffset)
573{
574 int n = src.giveSize();
575 answer.resize(n);
576 for ( int i = 1; i <= n; i++ ) {
577 answer.at(i) = giveOldEq( src.at(i) ) - baseOffset;
578 }
579}
580} // end namespace oofem
virtual int iRecv(int source, int tag, std::size_t count=0)=0
virtual int iSend(int dest, int tag)=0
virtual int resize(std::size_t newSize)=0
int write(bool data) override
Writes a bool value.
Definition combuff.C:273
int read(bool &data) override
Reads a bool value from data.
Definition combuff.C:265
int giveGlobalNumber() const
Definition dofmanager.h:515
int giveNumberOfDofs() const
Definition dofmanager.C:287
const IntArray * givePartitionList()
Definition dofmanager.h:533
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition domain.h:461
DofManager * giveDofManager(int n)
Definition domain.C:317
EngngModel * giveEngngModel()
Definition domain.C:419
virtual int giveNumberOfDomainEquations(int di, const UnknownNumberingScheme &num)
Definition engngm.C:452
int giveNumberOfProcesses() const
Returns the number of collaborating processes.
Definition engngm.h:1156
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1).
Definition engngm.h:1154
Domain * giveDomain(int n)
Definition engngm.C:1936
Domain * giveDomain() const
Definition femcmpnn.h:97
int minimum() const
Definition intarray.C:133
void resize(int n)
Definition intarray.C:73
void zero()
Sets all component to zero.
Definition intarray.C:52
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
IntArray locGlobMap
Old to new mapping; uses 0-based global eq ordering; 1-based local ordering.
std ::map< int, int > globLocMap
New to old mapping.
int l_neqs
Number of local and global eqs.
int giveOldEq(int eq) override
IntArray n2l
Natural to local.
bool isShared(DofManager *dman)
Returns true if given DofManager is shared between partitions.
bool isLocal(DofManager *dman)
Returns true if given DofManager is local (ie maintained by the receiver processor).
#define OOFEM_ERROR(...)
Definition error.h:79
#define OOFEM_LOG_INFO(...)
Definition logger.h:143
FloatArrayF< N > min(const FloatArrayF< N > &a, const FloatArrayF< N > &b)
@ DofManager_local
Definition dofmanager.h:67
@ DofManager_shared
Definition dofmanager.h:68
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition parallel.h:50

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011