46#define __VERBOSE_PARALLEL
56ProblemCommunicator :: ~ProblemCommunicator()
69NodeCommunicator :: setUpCommunicationMaps(
EngngModel *pm,
bool excludeSelfCommFlag,
bool forceReinit)
71#ifdef __VERBOSE_PARALLEL
89 for (
int i = 1; i <= nnodes; i++ ) {
93 for (
int j = 1; j <= partitionList->
giveSize(); j++ ) {
94 if ( !( excludeSelfCommFlag && ( this->
rank == partitionList->
at(j) ) ) ) {
95 domainNodeSendCount.
at(partitionList->
at(j) + 1)++;
103 std :: vector< IntArray >maps( size );
104 for (
int i = 0; i < size; i++ ) {
105 maps [ i ].resize( domainNodeSendCount.
at ( i + 1 ) );
109 for (
int i = 1; i <= nnodes; i++ ) {
114 for (
int j = 1; j <= partitionList->
giveSize(); j++ ) {
115 int partition = partitionList->
at(j);
116 if ( !( excludeSelfCommFlag && ( this->
rank == partition ) ) ) {
117 maps [ partition ].at( ++pos.
at(partition + 1) ) = i;
124 for (
int i = 0; i < size; i++ ) {
136ElementCommunicator :: setUpCommunicationMaps(
EngngModel *pm,
bool excludeSelfCommFlag,
bool forceReinit)
138#ifdef __VERBOSE_PARALLEL
169 int domainRecvListSize = 0, domainRecvListPos = 0;
174 for (
int i = 1; i <= nelems; i++ ) {
179 for (
int j = 1; j <= partitionList->
giveSize(); j++ ) {
180 if ( !( excludeSelfCommFlag && ( this->
rank == partitionList->
at(j) ) ) ) {
181 domainRecvListSize++;
182 domainNodeRecvCount.
at(partitionList->
at(j) + 1)++;
190 std :: vector< IntArray >maps( size );
191 for (
int i = 0; i < size; i++ ) {
192 maps [ i ].resize( domainNodeRecvCount.
at ( i + 1 ) );
196 IntArray domainRecvList(domainRecvListSize);
198 if ( domainRecvListSize ) {
199 for (
int i = 1; i <= nelems; i++ ) {
207 for (
int j = 1; j <= partitionList->
giveSize(); j++ ) {
208 if ( !( excludeSelfCommFlag && ( this->
rank == partitionList->
at(j) ) ) ) {
209 int partition = partitionList->
at(j);
210 maps [ partition ].at( ++pos.
at(partition + 1) ) = i;
218 for (
int i = 0; i < size; i++ ) {
224#ifdef __VERBOSE_PARALLEL
225 for (
int i = 0; i < size; i++ ) {
226 fprintf(stderr,
"domain %d-%d: domainCommRecvsize is %d\n",
rank, i, this->
giveProcessCommunicator(i)->giveToRecvMap().giveSize() );
227 printf(
"domain %d-%d: reecv map:",
rank, i);
236#ifdef __VERBOSE_PARALLEL
244 int localExpectedSize, globalRecvSize;
245 int sendMapPos, sendMapSize, globalDofManNum;
248#ifndef IBM_MPI_IMPLEMENTATION
249 localExpectedSize = domainRecvList.
givePackSize(commBuff);
251 localExpectedSize = domainRecvList.
givePackSize(commBuff) + 1;
256 result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
257 if ( result != MPI_SUCCESS ) {
262WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
265#ifdef __VERBOSE_PARALLEL
271 commBuff.
resize(globalRecvSize);
274 toSendMap.
resize(globalRecvSize);
276 for (
int i = 0; i < size; i++ ) {
282#ifdef __VERBOSE_PARALLEL
287 result = commBuff.
bcast(i);
288 if ( result != MPI_SUCCESS ) {
292#ifdef __VERBOSE_PARALLEL
296#ifdef __VERBOSE_PARALLEL
297 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
298 rank,
"ProblemCommunicator :: unpackAllData", i);
301 result = commBuff.
bcast(i);
302 if ( result != MPI_SUCCESS ) {
306#ifdef __VERBOSE_PARALLEL
307 OOFEM_LOG_DEBUG(
"[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
308 rank,
"ProblemCommunicator :: unpackAllData", i);
322 for (
int j = 1; j <= nelems; j++ ) {
333 toSendMap.
resize(sendMapSize);
335 for (
int j = 1; j <= nelems; j++ ) {
342 toSendMap.
at(++sendMapPos) = j;
350#ifdef __VERBOSE_PARALLEL
351 fprintf(stderr,
"domain %d-%d: domainCommSendsize is %d\n",
rank, i, this->
giveProcessCommunicator(i)->giveToSendMap().giveSize() );
352 printf(
"domain %d-%d: send map:",
rank, i);
361#ifdef __VERBOSE_PARALLEL
372 sortCommMap(map, & ProblemCommunicator :: DofManCmp);
380 sortCommMap(map, & ProblemCommunicator :: DofManCmp);
388 sortCommMap(map, & ProblemCommunicator :: ElemCmp);
396 sortCommMap(map, & ProblemCommunicator :: ElemCmp);
425 int i = l - 1, j = r;
429 while ( ( ( this->*cmp )(map.
at(++i), v) ) < 0 ) {
433 while ( ( ( this->*cmp )( v, map.
at(--j) ) ) < 0 ) {
443 std::swap(map.
at(i), map.
at(j));
446 std::swap(map.
at(i), map.
at(r));
452ProblemCommunicator :: DofManCmp(
int i,
int j)
454 return (
engngModel->giveDomain(1)->giveDofManager(i)->giveGlobalNumber() -
455 engngModel->giveDomain(1)->giveDofManager(j)->giveGlobalNumber() );
458ProblemCommunicator :: ElemCmp(
int i,
int j)
460 return (
engngModel->giveDomain(1)->giveElement(i)->giveGlobalNumber() -
461 engngModel->giveDomain(1)->giveElement(j)->giveGlobalNumber() );
EngngModel * engngModel
Engineering model.
std::vector< ProcessCommunicator > processComms
Array of process communicators.
ProcessCommunicator * giveProcessCommunicator(int i)
Communicator(EngngModel *emodel, CommunicatorBuff *buff, int rank, int size, CommunicatorMode mode=CommMode_Static)
const IntArray * givePartitionList()
dofManagerParallelMode giveParallelMode() const
int giveNumberOfElements() const
Returns number of elements in domain.
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
DofManager * giveDofManager(int n)
Element * giveElement(int n)
int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map) override
int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map) override
int giveGlobalNumber() const
const IntArray * givePartitionList() const
elementParallelMode giveParallelMode() const
Domain * giveDomain(int n)
int givePackSize(DataStream &buff) const
contextIOResultType restoreYourself(DataStream &stream)
contextIOResultType storeYourself(DataStream &stream) const
int findFirstIndexOf(int value) const
int setProcessCommunicatorToSendArry(ProcessCommunicator *processComm, IntArray &map) override
int setProcessCommunicatorToRecvArry(ProcessCommunicator *processComm, IntArray &map) override
void sortCommMap(IntArray &map, int(ProblemCommunicator ::*cmp)(int, int))
void quickSortCommMap(IntArray &map, int l, int r, int(ProblemCommunicator ::*cmp)(int, int))
Implementation of quicksort algorithm.
ProblemCommunicator(EngngModel *emodel, CommunicatorBuff *b, int rank, int size)
void setToRecvArry(T *emodel, const IntArray &src, int packUnpackType)
void setToSendArry(T *emodel, const IntArray &src, int packUnpackType)
int resize(std::size_t newSize) override
int bcast(int root) override
#define OOFEM_LOG_RELEVANT(...)
#define OOFEM_LOG_DEBUG(...)
@ Element_remote
Element in active domain is only mirror of some remote element.
@ Element_local
Element is local, there are no contributions from other domains to this element.
int quickSortPartition(IntArray &arry, int l, int r, operation op)
#define VERBOSEPARALLEL_PRINT(service, str, rank)