78#ifdef __MPI_PARALLEL_MODE
139#ifdef __MPI_PARALLEL_MODE
148 comm = MPI_COMM_SELF;
154EngngModel :: ~EngngModel()
166#ifdef __MPI_PARALLEL_MODE
174void EngngModel :: setParallelMode(
bool newParallelFlag)
184EngngModel :: Instanciate_init()
193 for (
int i = 1; i <=
ndomains; i++ ) {
194 domainList.push_back(std::make_unique<Domain>(i, 0,
this));
203 std::shared_ptr<InputRecord> irPtr(ir.
ptr());
209 bool inputReaderFinish =
true;
216 this->dataOutputFileName.append(
".oofeg");
238 inputReaderFinish =
false;
256 this->
giveContext()->giveFieldManager()->instanciateYourself(dr, *irPtr);
260 this->instanciateMPM(dr,*irPtr);
273 inputReaderFinish =
false;
278 if ( inputReaderFinish ) {
282 OOFEM_ERROR(
"Error initializing from user input: %s\n", e.what());
321 if ( eeTypeId >= 0 ) {
329#ifdef __MPI_PARALLEL_MODE
356#ifdef __MPI_PARALLEL_MODE
374 result&=(*Idomain)->instanciateYourself(dr,rec);
387 for (
int i = 1; i <= this->
nMetaSteps; i++ ) {
423 std::shared_ptr<InputRecord> irPtr(ir.
ptr());
429 std::unique_ptr< Variable > var = std :: make_unique< Variable >();
430 var->initializeFrom(mir);
431 variableMap[name] = std::move(var);
437 term->initializeFrom(mir,
this);
438 termList.push_back(std::move(term));
442 std::unique_ptr< Integral > integral = std :: make_unique< Integral >(
nullptr, &
dummySet,
nullptr);
443 integral->initializeFrom(mir,
this);
444 this->addIntegral(std::move(integral));
469EngngModel :: forceEquationNumbering(
int id)
485 node->askNewEquationNumbers(currStep);
489 int nnodes = elem->giveNumberOfInternalDofManagers();
490 for (
int k = 1; k <= nnodes; k++ ) {
491 elem->giveInternalDofManager(k)->askNewEquationNumbers(currStep);
496 for (
auto &bc : domain->
giveBcs() ) {
497 int nnodes = bc->giveNumberOfInternalDofManagers();
498 for (
int k = 1; k <= nnodes; k++ ) {
499 bc->giveInternalDofManager(k)->askNewEquationNumbers(currStep);
504 int initialProfile, optimalProfile;
522 OOFEM_LOG_DEBUG(
"Nominal profile %d (old) %d (new)\n", initialProfile, optimalProfile);
534EngngModel :: forceEquationNumbering()
569 domain->giveBCTracker()->initialize();
574EngngModel :: solveYourself()
578 this->
timer.startTimer(EngngModelTimer :: EMTT_AnalysisTimer);
586 for (
int imstep = smstep; imstep <=
timeStepController->giveNumberOfMetaSteps(); imstep++ ) {
591 double msFinalTime = activeMStep->giveFinalTime() - this->
giveInitialTime();
594 this->
timer.startTimer(EngngModelTimer :: EMTT_SolutionStepTimer);
595 this->
timer.initTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
612 auto nReductions = 0;
622 if (
timeStepController->giveCurrentMetaStep()->giveTimeStepReductionStrategy()->giveReductionFlag() ) {
624 OOFEM_LOG_INFO(
"--------------------------------------------------------------------------------------\nRestarting step with new time step increment %e due to convergence problem \n--------------------------------------------------------------------------------------\n", this->
giveCurrentStep()->giveTimeIncrement() );
629 if ( nReductions > activeMStep->giveNumberOfMaxTimeStepReductions() ) {
630 OOFEM_ERROR(
"Maximum number of time step reductions has been reached." );
641 this->
timer.stopTimer(EngngModelTimer :: EMTT_SolutionStepTimer);
648 OOFEM_LOG_INFO(
"EngngModel info: user time consumed by solution step %d: %.2fs\n",
653 fprintf(this->
giveOutputStream(),
"\nUser time consumed by solution step %d: %.3f [s]\n\n",
657#ifdef __MPI_PARALLEL_MODE
678#ifdef __MPI_PARALLEL_MODE
699 for (
auto &dman : domain->giveDofManagers() ) {
700 dman->updateYourself(tStep);
704 if ( domain->hasXfemManager() ) {
705 domain->giveXfemManager()->updateYourself(tStep);
709 VERBOSE_PRINT0(
"Updated nodes ", domain->giveNumberOfDofManagers())
713 for (
auto &elem : domain->giveElements() ) {
721 elem->updateYourself(tStep);
725 VERBOSE_PRINT0(
"Updated Elements ", domain->giveNumberOfElements())
728 for (
auto &bc : domain->giveBcs() ) {
729 bc->updateYourself(tStep);
732 VERBOSE_PRINT0(
"Updated BCs ", domain->giveNumberOfBoundaryConditions())
781EngngModel :: printOutputAt(FILE *file,
TimeStep *tStep)
786 domCount += domain->giveOutputManager()->testTimeStepOutput(tStep);
789 if ( domCount == 0 ) {
793 fprintf(file,
"\n==============================================================");
795 fprintf(file,
"\n==============================================================\n");
797 fprintf( file,
"Output for domain %3d\n", domain->giveNumber() );
799 domain->giveOutputManager()->doDofManOutput(file, tStep);
800 domain->giveOutputManager()->doElementOutput(file, tStep);
809 int dnum = domain->giveNumber();
810 fprintf( file,
"Output for domain %3d\n", dnum );
811 int nset = nodeSets.
giveSize() < dnum ? 0 : nodeSets.
at(dnum);
812 int eset = elementSets.
giveSize() < dnum ? 0 : elementSets.
at(dnum);
818 this->outputReactionForces(file, *domain, tStep, nset);
825EngngModel :: outputNodes(FILE *file,
Domain &domain,
TimeStep *tStep,
int setNum)
827 fprintf(file,
"\n\nNode output:\n------------------\n");
834 dman->printOutputAt(file, tStep);
839 for (
int inode : nodes ) {
847 fprintf(file,
"\n\n");
852EngngModel :: outputElements(FILE *file,
Domain &domain,
TimeStep *tStep,
int setNum)
854 fprintf(file,
"\n\nElement output:\n---------------\n");
861 elem->printOutputAt(file, tStep);
865 for (
int ielem : elements ) {
873 fprintf(file,
"\n\n");
877void EngngModel :: printYourself()
879 printf(
"\nEngineeringModel: instance %s\n", this->
giveClassName() );
884void EngngModel :: printDofOutputAt(FILE *stream,
Dof *iDof,
TimeStep *tStep)
895 omp_lock_t writelock;
896 omp_init_lock(&writelock);
899 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
902#pragma omp parallel for shared(answer) private(mat, R, loc)
904 for (
int ielem = 1; ielem <= nelem; ielem++ ) {
909 if ( element->giveParallelMode() ==
Element_remote || !element->isActivated(tStep) || !this->isElementActivated(element) ) {
918 if ( element->giveRotationMatrix(R) ) {
925 if ( answer.
assemble(loc, mat) == 0 ) {
932#pragma omp parallel for shared(answer) private(mat, R, loc)
935 for (
size_t i = 0; i < domain->
giveBcs().size(); i++) {
936 auto &bc = domain->
giveBcs()[i];
946 }
else if ( bc->giveSetNumber() ) {
947 if ( !bc->isImposed(tStep) )
continue;
948 auto load =
dynamic_cast< Load *
>(bc.get());
949 if ( !load )
continue;
956 Set *set = domain->
giveSet( bc->giveSetNumber() );
958 if ( ( bodyLoad =
dynamic_cast< BodyLoad *
>(load) ) ) {
960 for (
auto ielem : elements ) {
966 if ( element->giveRotationMatrix(R) ) {
972 omp_set_lock(&writelock);
976 omp_unset_lock(&writelock);
980 }
else if ( ( sLoad =
dynamic_cast< SurfaceLoad *
>(load) ) ) {
982 for (
int ibnd = 1; ibnd <= surfaces.giveSize() / 2; ++ibnd ) {
983 auto element = domain->
giveElement( surfaces.at(ibnd * 2 - 1) );
984 int boundary = surfaces.at(ibnd * 2);
989 bNodes = element->giveInterpolation()->boundaryGiveNodes(boundary, element->giveGeometryType());
990 if ( element->computeDofTransformationMatrix(R, bNodes,
true) ) {
997 omp_set_lock(&writelock);
1001 omp_unset_lock(&writelock);
1005 }
else if ( ( eLoad =
dynamic_cast< EdgeLoad *
>(load) ) ) {
1007 for (
int ibnd = 1; ibnd <= edges.giveSize() / 2; ++ibnd ) {
1008 auto element = domain->
giveElement( edges.at(ibnd * 2 - 1) );
1009 int boundary = edges.at(ibnd * 2);
1014 bNodes = element->giveInterpolation()->boundaryEdgeGiveNodes(boundary, element->giveGeometryType());
1015 if ( element->computeDofTransformationMatrix(R, bNodes,
true) ) {
1021 omp_set_lock(&writelock);
1025 omp_unset_lock(&writelock);
1033 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1048 omp_lock_t writelock;
1049 omp_init_lock(&writelock);
1052 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1055#pragma omp parallel for shared(answer) private(mat, R, r_loc, c_loc)
1057 for (
int ielem = 1; ielem <= nelem; ielem++ ) {
1075 #pragma omp critical
1077 if ( answer.
assemble(r_loc, c_loc, mat) == 0 ) {
1084#pragma omp parallel for shared(answer) private(mat, R, r_loc, c_loc)
1087 for (
size_t i = 0; i < domain->
giveBcs().size(); i++) {
1088 auto &gbc = domain->
giveBcs()[i];
1099 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1112#ifdef __MPI_PARALLEL_MODE
1115 MPI_Allreduce(& maxdofids, & val, 1, MPI_INT, MPI_MAX, this->
comm);
1119 eNorms->
resize(maxdofids);
1146 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1149#pragma omp parallel for shared(answer, eNorms) private(R, charVec, loc, dofids)
1151 for (
int i = 1; i <= nnode; i++ ) {
1174 #pragma omp critical
1187 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1197 omp_lock_t writelock;
1198 omp_init_lock(&writelock);
1201 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1203#pragma omp parallel for shared(answer, eNorms)
1205 for (
int i = 1; i <= nbc; ++i ) {
1227 if ( ( bodyLoad =
dynamic_cast< BodyLoad *
>(load) ) ) {
1229 for (
int ielem = 1; ielem <= elements.
giveSize(); ++ielem ) {
1231 if ( element->
isActivated(tStep) && this->isElementActivated(element) ) {
1242 omp_set_lock(&writelock);
1249 omp_unset_lock(&writelock);
1256 for (
int ibnd = 1; ibnd <= boundaries.
giveSize() / 2; ++ibnd ) {
1258 if ( element->
isActivated(tStep) && this->isElementActivated(element) ) {
1260 int boundary = boundaries.
at(ibnd * 2);
1273 omp_set_lock(&writelock);
1281 omp_unset_lock(&writelock);
1288 for (
int ibnd = 1; ibnd <= edgeBoundaries.
giveSize() / 2; ++ibnd ) {
1290 if ( element->
isActivated(tStep) && this->isElementActivated(element) ) {
1291 int boundary = edgeBoundaries.
at(ibnd * 2);
1304 omp_set_lock(&writelock);
1312 omp_unset_lock(&writelock);
1317 }
else if ( ( nLoad =
dynamic_cast< NodalLoad *
>(load) ) ) {
1319 for (
int idman = 1; idman <= nodes.
giveSize(); ++idman ) {
1331 omp_set_lock(&writelock);
1339 omp_unset_lock(&writelock);
1347 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1363 bool assembleFlag =
false;
1372 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1375#pragma omp parallel for shared(answer, eNorms) private(R, charVec, loc, dofids)
1377 for (
int i = 1; i <= nelem; i++ ) {
1388 if ( !element->
isActivated(tStep) || !this->isElementActivated(element) ) {
1412#pragma omp parallel for shared(answer, eNorms) private(R, charVec, loc, dofids)
1414 for (
int i = 1; i <= nelem; i++ ) {
1424 if ( !element->
isActivated(tStep) || !this->isElementActivated(element) ) {
1431 for (
int iload=1; iload<=list.
giveSize(); iload++) {
1460#pragma omp parallel for shared(answer, eNorms) private(R, charVec, loc, dofids, assembleFlag)
1462 for (
int i = 1; i <= nelem; i++ ) {
1472 if ( !element->
isActivated(tStep) || !this->isElementActivated(element) ) {
1479 for (
int j=1; j<=list2.
giveSize()/2; j++) {
1480 int iload = list2.
at(j * 2 - 1) ;
1481 int boundary = list2.
at(j * 2);
1484 assembleFlag =
false;
1497 assembleFlag =
true;
1509 assembleFlag =
true;
1512 OOFEM_ERROR (
"Unsupported element boundary load type");
1515 if ( assembleFlag ) {
1532 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1548 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1553 for (
int i = 1; i <= nelems; i++ ) {
1563 if ( !element->
isActivated(tStep) || !this->isElementActivated(element) ) {
1609 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1626 this->
timer.resumeTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1631 for (
int i = 1; i <= nelems; i++ ) {
1660 #pragma omp critical
1668 this->
timer.pauseTimer(EngngModelTimer :: EMTT_NetComputationalStepTimer);
1683 OOFEM_ERROR(
"updateSolution is not implemented.");
1690 OOFEM_ERROR(
"updateInternalRHS is not implemented.");
1702EngngModel :: initStepIncrements()
1712 for (
auto &elem : domain->giveElements() ) {
1720 elem->initForNewStep();
1733 auto s = solutionVector;
1749EngngModel :: updateDomainLinks()
1803 domain->saveContext(stream, mode);
1838 if ( !stream.
read(istep) ) {
1843 currentStep = std::make_unique<TimeStep>(istep,
this, 0, 0., 0., 0);
1852 if ( !this->
giveMetaStep(pmstep)->isStepValid(istep - 1) ) {
1884 domain->restoreContext(stream, mode);
1900EngngModel :: giveCurrentMetaStep()
1907EngngModel :: giveContextFileName(
int tStepNumber,
int stepVersion)
const
1911 sprintf(fext,
".%d.%d.osf", tStepNumber, stepVersion);
1912 return fname + fext;
1917EngngModel :: giveDomainFileName(
int domainNum,
int domainSerNum)
const
1921 sprintf(fext,
".domain.%d.%d.din", domainNum, domainSerNum);
1922 return fname + fext;
1926EngngModel :: errorInfo(
const char *func)
const
1929 return std::string(this->
giveClassName()) +
"::" + func +
", Rank: " + std::to_string(
rank);
1936EngngModel :: giveDomain(
int i)
1938 if ( ( i > 0 ) && ( i <= (
int)this->
domainList.size() ) ) {
1948EngngModel :: setDomain(
int i,
Domain *ptr,
bool iDeallocateOld)
1950 if ( i < 1 || i > (
int)this->
domainList.size() ) {
1953 if ( !iDeallocateOld ) {
1961EngngModel :: giveParallelContext(
int i)
1964 OOFEM_ERROR(
"context not initialized for this problem");
1971EngngModel :: initParallelContexts()
1980EngngModel :: letOutputBaseFileNameBe(
const std :: string &src)
1994EngngModel :: giveOutputStream()
2005EngngModel :: giveSolutionStepTime()
2007 return this->
timer.getUtime(EngngModelTimer :: EMTT_SolutionStepTimer);
2011EngngModel :: giveAnalysisTime(
int &rhrs,
int &rmin,
int &rsec,
int &uhrs,
int &umin,
int &usec)
2013 double rtsec = this->
timer.getWtime(EngngModelTimer :: EMTT_AnalysisTimer);
2014 double utsec = this->
timer.getUtime(EngngModelTimer :: EMTT_AnalysisTimer);
2015 rsec = rmin = rhrs = 0;
2016 usec = umin = uhrs = 0;
2017 this->
timer.convert2HMS(rhrs, rmin, rsec, rtsec);
2018 this->
timer.convert2HMS(uhrs, umin, usec, utsec);
2022EngngModel :: terminateAnalysis()
2024 int rsec = 0, rmin = 0, rhrs = 0;
2025 int usec = 0, umin = 0, uhrs = 0;
2026 time_t endTime = time(NULL);
2027 this->
timer.stopTimer(EngngModelTimer :: EMTT_AnalysisTimer);
2035 fprintf(out,
"\nFinishing analysis on: %s\n", ctime(& endTime) );
2036 fprintf(out,
"Real time consumed: %03dh:%02dm:%02ds\n", rhrs, rmin, rsec);
2037 fprintf(out,
"User time consumed: %03dh:%02dm:%02ds\n\n\n", uhrs, umin, usec);
2041 OOFEM_LOG_FORCED(
"Real time consumed: %03dh:%02dm:%02ds\n", rhrs, rmin, rsec);
2042 OOFEM_LOG_FORCED(
"User time consumed: %03dh:%02dm:%02ds\n", uhrs, umin, usec);
2047EngngModel :: checkProblemConsistency()
2054 result &= domain->checkConsistency();
2072EngngModel :: postInitialize()
2077 domain->postInitialize();
2089EngngModel :: initParallel()
2094 this->
comm = MPI_COMM_WORLD;
2095 MPI_Comm_rank(this->
comm, & this->
rank);
2098 OOFEM_ERROR(
"Can't do it, only compiled for sequential runs");
2100 #ifdef __VERBOSE_PARALLEL
2130 elem->drawYourself(
gc, tStep);
2139 dman->drawYourself(
gc, tStep);
2147EngngModel :: initializeCommMaps(
bool forceInit)
2149#ifdef __MPI_PARALLEL_MODE
2151 communicator->setUpCommunicationMaps(
this,
true, forceInit);
2156 OOFEM_ERROR(
"Can't set up comm maps, parallel support not compiled");
2165#ifdef __MPI_PARALLEL_MODE
2167 #ifdef __VERBOSE_PARALLEL
2172 tmp.
array = & answer;
2174 result &=
communicator->packAllData(
this, & tmp, & EngngModel :: packDofManagers);
2176 #ifdef __VERBOSE_PARALLEL
2182 #ifdef __VERBOSE_PARALLEL
2186 result &=
communicator->unpackAllData(
this, & tmp, & EngngModel :: unpackDofManagers);
2190 OOFEM_ERROR(
"Support for parallel mode not compiled in.");
2200EngngModel :: exchangeRemoteElementData(
int ExchangeTag)
2204#ifdef __MPI_PARALLEL_MODE
2206 #ifdef __VERBOSE_PARALLEL
2210 result &=
nonlocCommunicator->packAllData(
this, & EngngModel :: packRemoteElementData);
2212 #ifdef __VERBOSE_PARALLEL
2218 #ifdef __VERBOSE_PARALLEL
2222 if ( !( result &=
nonlocCommunicator->unpackAllData(
this, & EngngModel :: unpackRemoteElementData) ) ) {
2223 OOFEM_ERROR(
"Receiveng and Unpacking remote element data");
2229 OOFEM_ERROR(
"Support for parallel mode not compiled in.");
2236#ifdef __MPI_PARALLEL_MODE
2243 OOFEM_WARNING(
"No load balancer found, skipping load balancing step");
2248 lb->printStatistics();
2251 LoadBalancerMonitor :: LoadBalancerDecisionType _d =
lbm->decide(tStep);
2252 if ( ( _d == LoadBalancerMonitor :: LBD_RECOVER ) ||
2254 this->
timer.startTimer(EngngModelTimer :: EMTT_LoadBalancingTimer);
2257 lb->calculateLoadTransfer();
2266 #ifdef __VERBOSE_PARALLEL
2269 int nnodes =
giveDomain(1)->giveNumberOfDofManagers();
2271 fprintf(stderr,
"\n[%d] Nodal Table\n", myrank);
2272 for (
int i = 1; i <= nnodes; i++ ) {
2274 fprintf( stderr,
"[%d]: %5d[%d] local ", myrank, i,
giveDomain(1)->giveDofManager(i)->giveGlobalNumber() );
2276 fprintf( stderr,
"[%d]: %5d[%d] shared ", myrank, i,
giveDomain(1)->giveDofManager(i)->giveGlobalNumber() );
2280 fprintf( stderr,
"(%d)", dof->giveEquationNumber(dn) );
2283 fprintf(stderr,
"\n");
2290 this->
timer.stopTimer(EngngModelTimer :: EMTT_LoadBalancingTimer);
2291 double _steptime = this->
timer.getUtime(EngngModelTimer :: EMTT_LoadBalancingTimer);
2292 OOFEM_LOG_INFO(
"[%d] EngngModel info: user time consumed by load rebalancing %.2fs\n",
2308 for (
int ielem : toSendMap ) {
2325 for (
int ielem : toRecvMap ) {
2350 for (
int inode : toSendMap ) {
2352 for (
auto &jdof: *dman ) {
2353 if ( jdof->isPrimaryDof() ) {
2354 int eqNum = jdof->giveEquationNumber(s);
2356 result &= pcbuff->
write( src->
at(eqNum) );
2379 for (
int inode : toRecvMap ) {
2382 for (
auto &jdof: *dman ) {
2383 int eqNum = jdof->giveEquationNumber(s);
2384 if ( jdof->isPrimaryDof() && eqNum ) {
2385 result &= pcbuff->
read(value);
2387 dest->
at(eqNum) += value;
2389 dest->
at(eqNum) = value;
std::unique_ptr< Term > createTerm(const char *name)
const char * what() const noexcept override
RAII guard for DataReader::enterRecord and DataReader::leaveRecord.
virtual std::string giveReferenceName() const =0
Gives the reference file name (e.g. file name).
virtual InputRecord * giveTopInputRecord()
virtual InputRecord & giveInputRecord(InputRecordType irType, int recordId)=0
GroupRecords giveGroupRecords(const std::shared_ptr< InputRecord > &ir, InputFieldType ift, const std::string &name, InputRecordType irType, bool optional)
virtual bool hasFlattenedStructure()
virtual int read(int *data, std::size_t count)=0
Reads count integer values into array pointed by data.
virtual int write(const int *data, std::size_t count)=0
Writes count integer values from array pointed by data.
void giveLocationArray(const IntArray &dofIDArry, IntArray &locationArray, const UnknownNumberingScheme &s) const
virtual bool computeM2LTransformation(FloatMatrix &answer, const IntArray &dofIDArry)
void giveCompleteLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s) const
void giveCompleteMasterDofIDArray(IntArray &dofIDArray) const
int givePartitionsConnectivitySize()
void printOutputAt(FILE *file, TimeStep *tStep) override
IntArray * giveLoadArray()
dofManagerParallelMode giveParallelMode() const
virtual void printSingleOutputAt(FILE *file, TimeStep *tStep, char ch, ValueModeType mode, double scale=1.0)
int giveNumberOfBoundaryConditions() const
Returns number of boundary conditions in domain.
int giveNumber()
Returns domain number.
int giveNumberOfElements() const
Returns number of elements in domain.
std ::vector< std ::unique_ptr< DofManager > > & giveDofManagers()
int giveNumberOfDofManagers() const
Returns number of dof managers in domain.
std ::vector< std ::unique_ptr< GeneralBoundaryCondition > > & giveBcs()
DofManager * giveDofManager(int n)
Element * giveElement(int n)
GeneralBoundaryCondition * giveBc(int n)
std ::vector< std ::unique_ptr< Element > > & giveElements()
virtual bool giveRotationMatrix(FloatMatrix &answer)
virtual bool computeDofTransformationMatrix(FloatMatrix &answer, const IntArray &nodes, bool includeInternal)
virtual void giveCharacteristicMatrix(FloatMatrix &answer, CharType type, TimeStep *tStep)
virtual bool isActivated(TimeStep *tStep)
virtual IntArray giveBoundaryEdgeNodes(int boundary, bool includeHierarchical=false) const
int packUnknowns(DataStream &buff, TimeStep *tStep)
void computeVectorOfPrescribed(ValueModeType u, TimeStep *tStep, FloatArray &answer)
const IntArray & giveBodyLoadList() const
virtual IntArray giveBoundarySurfaceNodes(int boundary, bool includeHierarchical=false) const
const IntArray & giveBoundaryLoadList() const
void giveLocationArray(IntArray &locationArray, const UnknownNumberingScheme &s, IntArray *dofIds=NULL) const
int unpackAndUpdateUnknowns(DataStream &buff, TimeStep *tStep)
void computeVectorOf(ValueModeType u, TimeStep *tStep, FloatArray &answer)
elementParallelMode giveParallelMode() const
void printOutputAt(FILE *file, TimeStep *tStep) override
virtual void initStepIncrements()
EngngModelContext * context
Context.
void assembleVectorFromDofManagers(FloatArray &answer, TimeStep *tStep, const VectorAssembler &va, ValueModeType mode, const UnknownNumberingScheme &s, Domain *domain, FloatArray *eNorms=NULL)
virtual void updateYourself(TimeStep *tStep)
virtual void initializeYourself(TimeStep *tStep)
int parallelFlag
Flag indicating that the receiver runs in parallel.
std::unique_ptr< LoadBalancer > lb
Load Balancer.
std ::vector< std ::unique_ptr< Domain > > domainList
List of problem domains.
enum fMode nonLinFormulation
Type of non linear formulation (total or updated formulation).
std::unique_ptr< TimeStepController > timeStepController
Time Step controller is responsible for collecting data from analysis, elements, and materials,...
virtual void drawNodes(oofegGraphicContext &gc)
virtual void preInitializeNextStep()
Does a pre-initialization of the next time step (implement if necessarry).
int instanciateDomains(DataReader &dr)
Instanciate problem domains by calling their instanciateYourself() service.
virtual LoadBalancer * giveLoadBalancer()
bool force_load_rebalance_in_first_step
Debug flag forcing load balancing after first step.
int numberOfEquations
Total number of equation in current time step.
int numProcs
Total number of collaborating processes.
virtual void balanceLoad(TimeStep *tStep)
int rank
Domain rank in a group of collaborating processes (0..groupSize-1).
MPI_Comm comm
Communication object for this engineering model.
virtual TimeStep * giveCurrentStep(bool force=false)
virtual int giveNumberOfDomainEquations(int di, const UnknownNumberingScheme &num)
std::string dataOutputFileName
Path to output stream.
void assembleVectorFromElements(FloatArray &answer, TimeStep *tStep, const VectorAssembler &va, ValueModeType mode, const UnknownNumberingScheme &s, Domain *domain, FloatArray *eNorms=NULL)
virtual FieldPtr giveField(FieldType key, TimeStep *)
virtual LoadBalancerMonitor * giveLoadBalancerMonitor()
int giveRank() const
Returns domain rank in a group of collaborating processes (0..groupSize-1).
void outputElements(FILE *file, Domain &domain, TimeStep *tStep, int setNum)
ExportModuleManager * giveExportModuleManager()
Returns receiver's export module manager.
ProblemCommunicator * communicator
Communicator.
std::string simulationDescription
virtual TimeStep * giveNextStep()
Returns next time step (next to current step) of receiver.
EngngModelContext * giveContext()
Context requesting service.
bool renumberFlag
Renumbering flag (renumbers equations after each step, necessary if Dirichlet BCs change).
EngngModel(int i, EngngModel *_master=NULL)
std ::unique_ptr< TimeStep > previousStep
Previous time step.
int giveNumberOfDomains()
Returns number of domains in problem.
std::string giveContextFileName(int tStepNumber, int stepVersion) const
virtual void adaptTimeStep(double nIter)
double giveSolutionStepTime()
ContextOutputMode giveContextOutputMode() const
std::string coreOutputFileName
String with core output file name.
virtual NumericalMethod * giveNumericalMethod(MetaStep *mStep)
Returns reference to receiver's numerical method.
int equationNumberingCompleted
Equation numbering completed flag.
bool profileOpt
Profile optimized numbering flag (using Sloan's algorithm).
char processor_name[PROCESSOR_NAME_LENGTH]
Processor name.
MetaStep * giveCurrentMetaStep()
Returns current meta step.
std::string referenceFileName
String with reference file name.
int ndomains
Number of receiver domains.
virtual void solveYourselfAt(TimeStep *tStep)
ProblemCommunicator * nonlocCommunicator
NonLocal Communicator. Necessary when nonlocal constitutive models are used.
void setUDContextOutputMode(int cStep)
int numberOfSteps
Total number of time steps.
virtual int checkConsistency()
Domain * giveDomain(int n)
virtual void doStepOutput(TimeStep *tStep)
std ::unique_ptr< TimeStep > currentStep
Current time step.
virtual int instanciateDefaultMetaStep(InputRecord &ir)
Instanciate default metastep, if nmsteps is zero.
int nonlocalExt
Flag indicating if nonlocal extension active, which will cause data to be sent between shared element...
bool loadBalancingFlag
If set to true, load balancing is active.
FILE * giveOutputStream()
Returns file descriptor of output file.
int giveNumberOfSteps(bool force=false)
time_t startTime
Solution start time.
MetaStep * giveMetaStep(int i)
Returns the i-th meta step.
EngngModelTimer timer
E-model timer.
void assembleVectorFromBC(FloatArray &answer, TimeStep *tStep, const VectorAssembler &va, ValueModeType mode, const UnknownNumberingScheme &s, Domain *domain, FloatArray *eNorms=NULL)
@ RemoteElementExchangeTag
InitModuleManager initModuleManager
Initialization module manager.
std ::vector< ParallelContext > parallelContextList
List where parallel contexts are stored.
virtual ParallelContext * giveParallelContext(int n)
std ::vector< MetaStep > metaStepList
List of problem metasteps.
int exchangeRemoteElementData(int ExchangeTag)
virtual void updateSolution(FloatArray &solutionVector, TimeStep *tStep, Domain *d)
problemMode pMode
Domain mode.
virtual void terminate(TimeStep *tStep)
virtual void initParallelContexts()
int nMetaSteps
Number of meta steps.
virtual int forceEquationNumbering(int i)
CommunicatorBuff * commBuff
Common Communicator buffer.
virtual void postInitialize()
ExportModuleManager exportModuleManager
Export module manager.
void saveStepContext(TimeStep *tStep, ContextMode mode)
ContextOutputMode contextOutputMode
Domain context output mode.
EngngModel * master
Master e-model; if defined receiver is in maintained (slave) mode.
virtual void unpackMigratingData(TimeStep *tStep)
std::unique_ptr< LoadBalancerMonitor > lbm
virtual void restartYourself(TimeStep *tS)
virtual void drawElements(oofegGraphicContext &gc)
virtual void updateDomainLinks()
virtual void initializeFrom(InputRecord &ir)
virtual void printOutputAt(FILE *file, TimeStep *tStep)
void giveAnalysisTime(int &rhrs, int &rmin, int &rsec, int &uhrs, int &umin, int &usec)
bool suppressOutput
Flag for suppressing output to file.
MonitorManager monitorManager
Monitor manager.
FILE * outputStream
Output stream.
virtual double giveInitialTime()
return time at the begining of analysis
void initParallel()
Request domain rank and problem size.
virtual const char * giveClassName() const =0
Returns class name of the receiver.
bool isParallel() const
Returns true if receiver in parallel mode.
virtual void updateAttributes(MetaStep *mStep)
virtual void saveContext(DataStream &stream, ContextMode mode)
problemMode giveProblemMode() const
Returns domain mode.
virtual bool requiresEquationRenumbering(TimeStep *tStep)
IntArray domainPrescribedNeqs
Number of prescribed equations per domain.
std::unique_ptr< ErrorEstimator > defaultErrEstimator
Error estimator. Useful for adaptivity, or simply printing errors output.
void outputNodes(FILE *file, Domain &domain, TimeStep *tStep, int setNum)
int numberOfPrescribedEquations
Total number or prescribed equations in current time step.
virtual void packMigratingData(TimeStep *tStep)
problemScale pScale
Multiscale mode.
IntArray domainNeqs
Number of equations per domain.
void assemble(const FloatArray &fe, const IntArray &loc)
void zero()
Zeroes all coefficients of receiver.
void rotatedWith(FloatMatrix &r, char mode)
void beProductOf(const FloatMatrix &aMatrix, const FloatArray &anArray)
void assembleSquared(const FloatArray &fe, const IntArray &loc)
void subtract(const FloatArray &src)
bool isNotEmpty() const
Returns true if receiver is not empty.
void rotatedWith(const FloatMatrix &r, char mode='n')
*Sets size of receiver to be an empty matrix It will have zero rows and zero columns size void clear()
bool isNotEmpty() const
Tests for empty matrix.
virtual bcGeomType giveBCGeoType() const
virtual const IntArray & giveDofIDs() const
virtual bool isImposed(TimeStep *tStep)
int giveSetNumber() const
virtual void matrixFromElement(FloatMatrix &mat, Element &element, TimeStep *tStep) const
virtual void matrixFromSurfaceLoad(FloatMatrix &mat, Element &element, BoundaryLoad *load, int boundary, TimeStep *tStep) const
virtual void locationFromElement(IntArray &loc, Element &element, const UnknownNumberingScheme &s, IntArray *dofIds=nullptr) const
virtual void assembleFromActiveBC(SparseMtrx &k, ActiveBoundaryCondition &bc, TimeStep *tStep, const UnknownNumberingScheme &s_r, const UnknownNumberingScheme &s_c, void *lock=nullptr) const
virtual void matrixFromEdgeLoad(FloatMatrix &mat, Element &element, BoundaryLoad *load, int edge, TimeStep *tStep) const
virtual void locationFromElementNodes(IntArray &loc, Element &element, const IntArray &bNodes, const UnknownNumberingScheme &s, IntArray *dofIds=nullptr) const
virtual void matrixFromLoad(FloatMatrix &mat, Element &element, BodyLoad *load, TimeStep *tStep) const
virtual void restoreContext(DataStream &stream, ContextMode mode)
virtual void saveContext(DataStream &stream, ContextMode mode)
CommunicationBuffer & giveRecvBuff()
int read(int *data, std::size_t count) override
Reads count integer values into array pointed by data.
CommunicationBuffer & giveSendBuff()
int write(const int *data, std::size_t count) override
Writes count integer values from array pointed by data.
const IntArray & giveToRecvMap()
ProcessCommunicatorBuff * giveProcessCommunicatorBuff()
Returns communication buffer.
const IntArray & giveToSendMap()
const IntArray & giveEdgeList()
const IntArray & giveBoundaryList()
const IntArray & giveElementList()
const IntArray & giveNodeList()
void initialize()
Initialize graph from domain description.
void askNewOptimalNumbering(TimeStep *tStep)
Numbers all the DOFs according to the optimal renumbering found.
int giveOptimalProfileSize()
Returns the optimal profile found.
void tryParameters(int wdeg, int wdis)
virtual int assembleBegin()
Starts assembling the elements.
virtual int assemble(const IntArray &loc, const FloatMatrix &mat)=0
virtual int assembleEnd()
Returns when assemble is completed.
double giveTargetTime()
Returns target time.
int giveNumber()
Returns receiver's number.
TimeStep * givePreviousStep()
Returns pointer to previous solution step.
virtual bool isDefault() const
virtual void assembleFromActiveBC(FloatArray &answer, ActiveBoundaryCondition &bc, TimeStep *tStep, ValueModeType mode, const UnknownNumberingScheme &s, FloatArray *eNorms, void *lock=nullptr) const
virtual void vectorFromElement(FloatArray &vec, Element &element, TimeStep *tStep, ValueModeType mode) const
virtual void vectorFromNodeLoad(FloatArray &vec, DofManager &dman, NodalLoad *load, TimeStep *tStep, ValueModeType mode) const
virtual void vectorFromSurfaceLoad(FloatArray &vec, Element &element, BoundaryLoad *load, int boundary, TimeStep *tStep, ValueModeType mode) const
virtual void vectorFromEdgeLoad(FloatArray &vec, Element &element, BoundaryLoad *load, int edge, TimeStep *tStep, ValueModeType mode) const
virtual void locationFromElement(IntArray &loc, Element &element, const UnknownNumberingScheme &s, IntArray *dofIds=nullptr) const
Default implementation takes all the DOF IDs.
virtual void locationFromElementNodes(IntArray &loc, Element &element, const IntArray &bNodes, const UnknownNumberingScheme &s, IntArray *dofIds=nullptr) const
Default implementation takes all the DOF IDs.
virtual void vectorFromLoad(FloatArray &vec, Element &element, BodyLoad *load, TimeStep *tStep, ValueModeType mode) const
#define _IFT_EngngModel_nmsteps
#define _IFT_EngngModel_nsteps
#define _IFT_EngngModel_forceloadBalancingFlag
#define _IFT_EngngModel_profileOpt
#define _IFT_EngngModel_suppressOutput
#define _IFT_EngngModel_loadBalancingFlag
#define _IFT_EngngModel_parallelflag
#define _IFT_EngngModel_renumberFlag
#define _IFT_EngngModel_eetype
#define _IFT_EngngModel_nonLinFormulation
#define _IFT_EngngModel_contextoutputstep
#define OOFEM_WARNING(...)
#define OOFEM_LOG_INFO(...)
#define OOFEM_LOG_RELEVANT(...)
#define OOFEM_LOG_DEBUG(...)
#define OOFEM_LOG_FORCED(...)
@ Element_remote
Element in active domain is only mirror of some remote element.
@ SurfaceLoadBGT
Distributed surface load.
@ EdgeLoadBGT
Distributed edge load.
dofManagerParallelMode
In parallel mode, this type indicates the mode of DofManager.
@ COM_Required
If required (for backtracking computation).
@ COM_Always
Enable for post-processing.
@ COM_UserDefined
Input attribute of domain (each n-th step).
@ COM_NoContext
No context.
ClassFactory & classFactory
std::shared_ptr< Field > FieldPtr
@ CIO_IOERR
General IO error.
oofem::oofegGraphicContext gc[OOFEG_LAST_LAYER]
OOFEM_EXPORT const char * PRG_HEADER
#define VERBOSEPARALLEL_PRINT(service, str, rank)
Helper struct to pass array and numbering scheme as a single argument.
const UnknownNumberingScheme * numbering
#define VERBOSE_PRINTS(str, str1)
#define VERBOSE_PRINT0(str, number)