OOFEM 3.0
Loading...
Searching...
No Matches
feticommunicator.C
Go to the documentation of this file.
1/*
2 *
3 * ##### ##### ###### ###### ### ###
4 * ## ## ## ## ## ## ## ### ##
5 * ## ## ## ## #### #### ## # ##
6 * ## ## ## ## ## ## ## ##
7 * ## ## ## ## ## ## ## ##
8 * ##### ##### ## ###### ## ##
9 *
10 *
11 * OOFEM : Object Oriented Finite Element Code
12 *
13 * Copyright (C) 1993 - 2025 Borek Patzak
14 *
15 *
16 *
17 * Czech Technical University, Faculty of Civil Engineering,
18 * Department of Structural Mechanics, 166 29 Prague, Czech Republic
19 *
20 * This library is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU Lesser General Public
22 * License as published by the Free Software Foundation; either
23 * version 2.1 of the License, or (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28 * Lesser General Public License for more details.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this library; if not, write to the Free Software
32 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
36#include "engngm.h"
37#include "intarray.h"
38#include "dofmanager.h"
40#include "domain.h"
41
42#ifdef __USE_MPI
43 #include <mpi.h>
44#endif
45
46namespace oofem {
47FETICommunicator :: FETICommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size) :
48 Communicator(emodel, b, rank, size)
49{
50 if ( rank != 0 ) {
51 OOFEM_ERROR("bad rank number, expected rank 0 for master");
52 }
53}
54
55
56FETICommunicator :: ~FETICommunicator()
57{ }
58
59
60void
61FETICommunicator :: setUpCommunicationMaps(EngngModel *pm)
62{
63 int i, j, maxRec;
64 int globaldofmannum, localNumber, ndofs;
65 int numberOfBoundaryDofMans;
66 int source, tag;
67 int size = this->processComms.size();
68 IntArray numberOfPartitionBoundaryDofMans(size);
69 StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
71 // FETIBoundaryDofManager *dofmanrec;
72 // Map containing boundary dof managers records, the key is corresponding global number
73 // value is corresponding local master dof manager number
74 map< int, int, less< int > >BoundaryDofManagerMap;
75 // communication maps of slaves
76 std::vector<IntArray> commMaps(size);
77 // location array
78 IntArray locNum;
79 Domain *domain = pm->giveDomain(1);
80
81 // check if receiver is master
82 if ( this->rank != 0 ) {
83 OOFEM_ERROR("rank 0 (master) expected as receiver");
84 }
85
86 // resize receive buffer
87 commBuff.resize( commBuff.givePackSizeOfInt(1) );
88
89 //
90 // receive data
91 //
92 for ( int i = 1; i < size; i++ ) {
93 commBuff.iRecv(MPI_ANY_SOURCE, FETICommunicator :: NumberOfBoundaryDofManagersMsg);
94 while ( !commBuff.testCompletion(source, tag) ) {
95 ;
96 }
97
98 // unpack data
99 commBuff.read(j);
100#ifdef __VERBOSE_PARALLEL
101 OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d (received %d)\n",
102 rank, "FETICommunicator :: setUpCommunicationMaps : received number of boundary dofMans", source, j);
103#endif
104 numberOfPartitionBoundaryDofMans.at(source + 1) = j;
105 commBuff.init();
106 }
107
108 MPI_Barrier(MPI_COMM_WORLD);
109
110
111 // determine the total number of boundary dof managers at master
112 int nnodes = domain->giveNumberOfDofManagers();
113 j = 0;
114 for ( int i = 1; i <= nnodes; i++ ) {
115 if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_shared ) {
116 j++;
117 }
118 }
119
120 numberOfPartitionBoundaryDofMans.at(1) = j;
121
122 //
123 // receive list of bounadry dof managers with corresponding number of dofs from each partition
124 //
125
126 // resize the receive buffer to fit all messages
127 maxRec = 0;
128 for ( int i = 0; i < size; i++ ) {
129 if ( numberOfPartitionBoundaryDofMans.at(i + 1) > maxRec ) {
130 maxRec = numberOfPartitionBoundaryDofMans.at(i + 1);
131 }
132 }
133
134 commBuff.resize( 2 * maxRec * commBuff.givePackSizeOfInt(1) );
135 // resize communication maps acordingly
136 for ( int i = 0; i < size; i++ ) {
137 j = numberOfPartitionBoundaryDofMans.at(i + 1);
138 commMaps [ i ].resize(j);
139 }
140
141
142 // add local master contribution first
143 // loop over all dofmanager data received
144 i = 0;
145 for ( int j = 1; j <= numberOfPartitionBoundaryDofMans.at(1); j++ ) {
146 // fing next shared dofman
147 while ( !( domain->giveDofManager(++i)->giveParallelMode() == DofManager_shared ) ) {
148 ;
149 }
150
151 globaldofmannum = domain->giveDofManager(i)->giveGlobalNumber();
152 domain->giveDofManager(i)->giveCompleteLocationArray(locNum, dn);
153 ndofs = 0;
154 for ( int l = 1; l <= locNum.giveSize(); l++ ) {
155 if ( locNum.at(l) ) {
156 ndofs++;
157 }
158 }
159
160 // add corresponding entry to master map of boundary dof managers
161 if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
162 // create it
163 boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
164 // remember the local number; actual position in vector is localNumber-1
165 localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
166 boundaryDofManList.back().addPartition(0);
167 } else { // update the corresponding record
168 boundaryDofManList [ localNumber - 1 ].addPartition(0);
169 if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
170 OOFEM_ERROR("ndofs size mismatch");
171 }
172 }
173
174 // remember communication map for particular partition
175 commMaps [ 0 ].at(j) = localNumber;
176 }
177
178 //
179 // receive data from slave partitions
180 //
181
182 for ( int i = 1; i < size; i++ ) {
183 commBuff.iRecv(MPI_ANY_SOURCE, FETICommunicator :: BoundaryDofManagersRecMsg);
184 while ( !commBuff.testCompletion(source, tag) ) {
185 ;
186 }
187
188 // unpack data
189#ifdef __VERBOSE_PARALLEL
190 OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d\n",
191 rank, "FETICommunicator :: setUpCommunicationMaps : received boundary dofMans records", source);
192#endif
193
194 // loop over all dofmanager data received
195 for ( int j = 1; j <= numberOfPartitionBoundaryDofMans.at(source + 1); j++ ) {
196 commBuff.read(globaldofmannum);
197 commBuff.read(ndofs);
198
199 // add corresponding entry to master map of boundary dof managers
200 if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
201 // create it
202 boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
203 // remember the local number; actual position in vector is localNumber-1
204 localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
205 boundaryDofManList.back().addPartition(source);
206 } else { // update the corresponding record
207 boundaryDofManList [ localNumber - 1 ].addPartition(source);
208 if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
209 OOFEM_ERROR("ndofs size mismatch");
210 }
211 }
212
213 // remember communication map for particular partition
214 commMaps [ source ].at(j) = localNumber;
215 }
216
217 commBuff.init();
218 }
219
220 MPI_Barrier(MPI_COMM_WORLD);
221 //
222 // assign code numbers to boundary dofs
223 //
225 numberOfBoundaryDofMans = boundaryDofManList.size();
226 for ( int i = 1; i <= numberOfBoundaryDofMans; i++ ) {
227 boundaryDofManList [ i - 1 ].setCodeNumbers(numberOfEquations); // updates numberOfEquations
228 }
229
230 // store the commMaps
231 for ( int i = 0; i < size; i++ ) {
232 if ( i != 0 ) {
233 this->giveProcessCommunicator(i)->setToSendArry(engngModel, commMaps [ i ], 0);
234 this->giveProcessCommunicator(i)->setToRecvArry(engngModel, commMaps [ i ], 0);
235 } else {
236 masterCommMap = commMaps [ i ];
237 }
238 }
239
240 MPI_Barrier(MPI_COMM_WORLD);
241
242#ifdef __VERBOSE_PARALLEL
243 VERBOSEPARALLEL_PRINT("FETICommunicator::setUpCommunicationMaps", "communication maps setup finished", rank);
244#endif
245}
246} // end namespace oofem
int givePackSizeOfInt(std::size_t count) override
Definition combuff.C:279
int rank
Rank of process.
EngngModel * engngModel
Engineering model.
std::vector< ProcessCommunicator > processComms
Array of process communicators.
ProcessCommunicator * giveProcessCommunicator(int i)
Communicator(EngngModel *emodel, CommunicatorBuff *buff, int rank, int size, CommunicatorMode mode=CommMode_Static)
int giveGlobalNumber() const
Definition dofmanager.h:515
void giveCompleteLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s) const
Definition dofmanager.C:237
dofManagerParallelMode giveParallelMode() const
Definition dofmanager.h:526
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
Definition domain.h:461
DofManager * giveDofManager(int n)
Definition domain.C:317
Domain * giveDomain(int n)
Definition engngm.C:1936
int numberOfEquations
Number of equations at master level (determined form boundary nodes).
vector< FETIBoundaryDofManager > boundaryDofManList
List of boundary dof managers records.
int & at(std::size_t i)
Definition intarray.h:104
int giveSize() const
Definition intarray.h:211
int read(int *dest, std::size_t n) override
Reads count integer values into array pointed by data.
Definition combuff.h:347
int iRecv(int source, int tag, std::size_t count=0) override
Definition combuff.h:365
int resize(std::size_t newSize) override
Definition combuff.h:328
#define OOFEM_ERROR(...)
Definition error.h:79
#define OOFEM_LOG_DEBUG(...)
Definition logger.h:144
@ DofManager_shared
Definition dofmanager.h:68
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Definition parallel.h:50

This page is part of the OOFEM-3.0 documentation. Copyright Copyright (C) 1994-2025 Borek Patzak Bořek Patzák
Project e-mail: oofem@fsv.cvut.cz
Generated at for OOFEM by doxygen 1.15.0 written by Dimitri van Heesch, © 1997-2011