From 5cd8398bcda711a88296be0b6ecdf760b527997e Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Tue, 17 Sep 2024 16:54:30 -0700 Subject: [PATCH 1/6] FillPatchSingleLevel and FillPatchTwoLevels for ERF These new functions do not take PhysBC functors. So it's the caller's responsibility to fill domain ghost cells before calling. --- Src/AmrCore/AMReX_FillPatchUtil.H | 69 ++++++++++ Src/AmrCore/AMReX_FillPatchUtil_I.H | 198 +++++++++++++++++++++++++++- 2 files changed, 266 insertions(+), 1 deletion(-) diff --git a/Src/AmrCore/AMReX_FillPatchUtil.H b/Src/AmrCore/AMReX_FillPatchUtil.H index 11a8eb560e..b71102b58d 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil.H +++ b/Src/AmrCore/AMReX_FillPatchUtil.H @@ -728,6 +728,75 @@ namespace amrex const IntVect& ratio, Interp* mapper, const Vector& bcs); + /** + * \brief FillPatch with data from the current level + * + * In this version of FillPatchSingleLevel, it's the CALLER's + * responsibility to make sure that smf has `snghost` ghost cells + * already filled before calling this function. The destination + * MultiFab/FabArray is on the same AMR level as the source + * MultiFab/FabArray. If needed, interpolation in time is performed. + * + * \tparam MF the MultiFab/FabArray type + * + * \param mf destination MF + * \param nghost number of ghost cells of mf needed to be filled + * \param time time associated with mf + * \param smf source MFs + * \param snghost number of ghost cells in smf with valid data + * \param stime times associated smf + * \param scomp starting component of the source MFs + * \param dcomp starting component of the destination MF + * \param ncomp number of components + * \param geom Geometry for this level + */ + template + std::enable_if_t::value> + FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, + const Vector& smf, IntVect const& snghost, + const Vector& stime, int scomp, int dcomp, int ncomp, + const Geometry& geom); + + /** + * \brief FillPatch with data from the current level and the level below. + * + * In this version of FillPatchTwoLevels, it's the CALLER's + * responsibility to make sure all ghost cells of the coarse MF needed + * for interpolation are filled already before calling this + * function. It's assumed that the fine level MultiFab mf's BoxArray is + * coarsenable by the refinement ratio. There is no support for EB. + * + * \tparam MF the MultiFab/FabArray type + * \tparam Interp spatial interpolater + * + * \param mf destination MF on the fine level + * \param nghost number of ghost cells of mf inside domain needed to be filled + * \param nghost_outside_domain number of ghost cells of mf outside domain needed to be filled + * \param time time associated with mf + * \param cmf source MFs on the coarse level + * \param ct times associated cmf + * \param fmf source MFs on the fine level + * \param ft times associated fmf + * \param scomp starting component of the source MFs + * \param dcomp starting component of the destination MF + * \param ncomp number of components + * \param cgeom Geometry for the coarse level + * \param fgeom Geometry for the fine level + * \param ratio refinement ratio + * \param mapper spatial interpolater + * \param bcs boundary types for each component. + */ + template + std::enable_if_t::value> + FillPatchTwoLevels (MF& mf, IntVect const& nghost, + IntVect const& nghost_outside_domain, Real time, + const Vector& cmf, const Vector& ct, + const Vector& fmf, const Vector& ft, + int scomp, int dcomp, int ncomp, + const Geometry& cgeom, const Geometry& fgeom, + const IntVect& ratio, Interp* mapper, + const Vector& bcs); + #ifndef BL_NO_FORT enum InterpEM_t { InterpE, InterpB}; diff --git a/Src/AmrCore/AMReX_FillPatchUtil_I.H b/Src/AmrCore/AMReX_FillPatchUtil_I.H index fa26cc5ea9..4192d75934 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil_I.H +++ b/Src/AmrCore/AMReX_FillPatchUtil_I.H @@ -1075,6 +1075,8 @@ InterpFromCoarseLevel (Array const& mf, IntVect const& ngho const PreInterpHook& pre_interp, const PostInterpHook& post_interp) { + BL_PROFILE("InterpFromCoarseLevel"); + using FAB = typename MF::FABType::value_type; using iFAB = typename iMultiFab::FABType::value_type; @@ -1219,6 +1221,8 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, const IntVect& ratio, Interp* mapper, const Vector& bcs) { + BL_PROFILE("InterpFromCoarseLevel_nobc"); + const BoxArray& ba = mf.boxArray(); const DistributionMapping& dm = mf.DistributionMap(); @@ -1244,11 +1248,203 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, cgeom.periodicity()); Box fdomain_g = amrex::convert(fgeom.Domain(),typ); - fdomain_g.grow(nghost_outside_domain); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + if (fgeom.isPeriodic(idim)) { + fdomain_g.grow(idim, nghost[idim]); + } else { + fdomain_g.grow(idim, nghost_outside_domain[idim]); + } + } FillPatchInterp(mf, dcomp, mf_crse_patch, 0, ncomp, nghost, cgeom, fgeom, fdomain_g, ratio, mapper, bcs, 0); } +template +std::enable_if_t::value> +FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, + const Vector& smf, IntVect const& snghost, + const Vector& stime, int scomp, int dcomp, int ncomp, + const Geometry& geom) +{ + BL_PROFILE("FillPatchSingleLevel_nobc"); + + AMREX_ASSERT(scomp+ncomp <= smf[0]->nComp()); + AMREX_ASSERT(dcomp+ncomp <= mf.nComp()); + AMREX_ASSERT(smf.size() == stime.size()); + AMREX_ASSERT(!smf.empty()); + AMREX_ASSERT(nghost.allLE(mf.nGrowVect())); + + if (smf.size() == 1) + { + if (&mf == smf[0] && scomp == dcomp) { + mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity()); + } else { + mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, snghost, nghost, geom.periodicity()); + } + } + else if (smf.size() == 2) + { + BL_ASSERT(smf[0]->boxArray() == smf[1]->boxArray()); + MF raii; + MF * dmf; + int destcomp; + bool sameba; + if (mf.boxArray() == smf[0]->boxArray() && + mf.DistributionMap() == smf[0]->DistributionMap()) + { + dmf = &mf; + destcomp = dcomp; + sameba = true; + } else { + raii.define(smf[0]->boxArray(), smf[0]->DistributionMap(), ncomp, snghost, + MFInfo(), smf[0]->Factory()); + + dmf = &raii; + destcomp = 0; + sameba = false; + } + + if ((dmf != smf[0] && dmf != smf[1]) || scomp != dcomp) + { + IntVect interp_ghost = snghost; + if (sameba) { interp_ghost.min(nghost); } +#ifdef AMREX_USE_OMP +#pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(*dmf,TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + const Box& bx = mfi.growntilebox(interp_ghost); + const Real t0 = stime[0]; + const Real t1 = stime[1]; + auto const sfab0 = smf[0]->array(mfi); + auto const sfab1 = smf[1]->array(mfi); + auto dfab = dmf->array(mfi); + + if (time == t0) + { + AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, + { + dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp); + }); + } + else if (time == t1) + { + AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, + { + dfab(i,j,k,n+destcomp) = sfab1(i,j,k,n+scomp); + }); + } + else if (! amrex::almostEqual(t0,t1)) + { + Real alpha = (t1-time)/(t1-t0); + Real beta = (time-t0)/(t1-t0); + AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, + { + dfab(i,j,k,n+destcomp) = alpha*sfab0(i,j,k,n+scomp) + + beta*sfab1(i,j,k,n+scomp); + }); + } + else + { + AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, + { + dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp); + }); + } + } + } + + if (sameba) + { + // Note that when sameba is true mf's BoxArray is nonoverlapping. + // So FillBoundary is safe. + mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity()); + } + else + { + mf.ParallelCopy(*dmf, 0, dcomp, ncomp, snghost, nghost, geom.periodicity()); + } + } + else { + amrex::Abort("FillPatchSingleLevel: high-order interpolation in time not implemented yet"); + } +} + +template +std::enable_if_t::value> +FillPatchTwoLevels (MF& mf, IntVect const& nghost, + IntVect const& nghost_outside_domain, Real time, + const Vector& cmf, const Vector& ct, + const Vector& fmf, const Vector& ft, + int scomp, int dcomp, int ncomp, + const Geometry& cgeom, const Geometry& fgeom, + const IntVect& ratio, Interp* mapper, + const Vector& bcs) +{ + BL_PROFILE("FillPatchTwoLevels_nobc"); + + const IndexType& typ = mf.ixType(); + + AMREX_ALWAYS_ASSERT(mf.ixType.nodeCentered() || mf.cellCentered()); + + if (nghost.max() > 0 || mf.getBDKey() != fmf[0]->getBDKey()) + { + const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio); + + Box tmp(-nghost, IntVect(32), typ); + Box tmp2 = coarsener.doit(tmp); + // This is the number of coarse ghost cells needed to interpolate + // nghost fine ghost cells inside the domain + IntVect src_ghost = -tmp2.smallEnd(); + + // This is the number of coarse ghost cells needed to interpolate + // nghost_outside_domain fine ghost cells outside the domain + tmp = Box(-nghost_outside_domain, IntVect(32), typ); + tmp2 = coarsener.doit(tmp); + IntVect src_ghost_outside_domain = -tmp2.smallEnd(); + AMREX_ALWAYS_ASSERT(src_ghost_outside_domain.allGE(src_ghost)); + + IntVect cghost = cmf.nGrowVect(); + cghost.min(src_ghost); + // This is the minimum number of ghost cells needed in cmf. + AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain)); + + const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(*fmf[0], mf, + nghost, + coarsener, + fgeom, + cgeom, + nullptr); + + if ( ! fpc.ba_crse_patch.empty()) + { + MF mf_crse_patch(fpc.ba_crse_patch, fpc.dm_patch, ncomp, 0); + + FillPatchSingleLevel(mf_crse_patch, IntVect(0), time, cmf, cghost, + ct, scomp, 0, ncomp, cgeom); + + MF mf_fine_patch(fpc.ba_fine_patch, fpc.dm_patch, ncomp, 0); + + Box fdomain_g = amrex::convert(fgeom.Domain(),typ); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + if (fgeom.isPeriodic(idim)) { + fdomain_g.grow(idim, nghost[idim]); + } else { + fdomain_g.grow(idim, nghost_outside_domain[idim]); + } + } + FillPatchInterp(mf_fine_patch, 0, mf_crse_patch, 0, + ncomp, IntVect(0), cgeom, fgeom, + fdomain_g, ratio, mapper, bcs, 0); + + mf.ParallelCopy(mf_fine_patch, 0, dcomp, ncomp, IntVect{0}, nghost); + } + } + + FillPatchSingleLevel(mf, nghost, time, fmf, IntVect(0), ft, + scomp, dcomp, ncomp, fgeom); +} + template std::enable_if_t::value> FillPatchNLevels (MF& mf, int level, const IntVect& nghost, Real time, From cc713b0424986fc3372bfbffaab76aee8c53e0d1 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Tue, 17 Sep 2024 21:01:26 -0700 Subject: [PATCH 2/6] Fix compliation --- Src/AmrCore/AMReX_FillPatchUtil_I.H | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Src/AmrCore/AMReX_FillPatchUtil_I.H b/Src/AmrCore/AMReX_FillPatchUtil_I.H index 4192d75934..d860908a07 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil_I.H +++ b/Src/AmrCore/AMReX_FillPatchUtil_I.H @@ -1385,7 +1385,7 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, const IndexType& typ = mf.ixType(); - AMREX_ALWAYS_ASSERT(mf.ixType.nodeCentered() || mf.cellCentered()); + AMREX_ALWAYS_ASSERT(typ.nodeCentered() || typ.cellCentered()); if (nghost.max() > 0 || mf.getBDKey() != fmf[0]->getBDKey()) { @@ -1393,6 +1393,7 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, Box tmp(-nghost, IntVect(32), typ); Box tmp2 = coarsener.doit(tmp); + // This is the number of coarse ghost cells needed to interpolate // nghost fine ghost cells inside the domain IntVect src_ghost = -tmp2.smallEnd(); @@ -1402,12 +1403,13 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, tmp = Box(-nghost_outside_domain, IntVect(32), typ); tmp2 = coarsener.doit(tmp); IntVect src_ghost_outside_domain = -tmp2.smallEnd(); - AMREX_ALWAYS_ASSERT(src_ghost_outside_domain.allGE(src_ghost)); - IntVect cghost = cmf.nGrowVect(); + IntVect cghost = cmf[0].nGrowVect(); cghost.min(src_ghost); + // This is the minimum number of ghost cells needed in cmf. - AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain)); + AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain) && + cmf[1]->nGrowVect().allGE(cghost)); const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(*fmf[0], mf, nghost, From 1d4d15d18f97e90e1549b15a14301f564f8cac98 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 18 Sep 2024 10:18:56 -0700 Subject: [PATCH 3/6] Refactor InterpFromCoarseLevel --- Src/AmrCore/AMReX_FillPatchUtil_I.H | 98 +++++++++++++---------------- Src/Base/AMReX_PhysBCFunct.H | 44 +++++++++++++ 2 files changed, 86 insertions(+), 56 deletions(-) diff --git a/Src/AmrCore/AMReX_FillPatchUtil_I.H b/Src/AmrCore/AMReX_FillPatchUtil_I.H index d860908a07..35e9ef6ba2 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil_I.H +++ b/Src/AmrCore/AMReX_FillPatchUtil_I.H @@ -997,6 +997,8 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, Real time, const PreInterpHook& pre_interp, const PostInterpHook& post_interp) { + BL_PROFILE("InterpFromCoarseLevel"); + using FAB = typename MF::FABType::value_type; const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio); @@ -1010,34 +1012,50 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, Real time, Box fdomain_g( amrex::convert(fgeom.Domain(),mf.ixType()) ); for (int i = 0; i < AMREX_SPACEDIM; ++i) { - if (fgeom.isPeriodic(i)) { - fdomain_g.grow(i,nghost[i]); + if constexpr (std::is_same_v) { + if (fgeom.isPeriodic(i)) { + fdomain_g.grow(i, nghost[i]); + } else { + fdomain_g.grow(i, fbc.nghost_outside_domain[i]); + } + } else { + if (fgeom.isPeriodic(i)) { + fdomain_g.grow(i,nghost[i]); + } } } - BoxArray ba_crse_patch(ba.size()); - { // TODO: later we might want to cache this - for (int i = 0, N = ba.size(); i < N; ++i) - { - Box bx = amrex::convert(amrex::grow(ba[i],nghost), typ); - bx &= fdomain_g; - ba_crse_patch.set(i, coarsener.doit(bx)); + MF mf_crse_patch; + IntVect send_ghost(0), recv_ghost(0); + if constexpr (std::is_same_v) { + mf_crse_patch.define(amrex::coarsen(ba,ratio), dm, ncomp, fbc.src_ghost); + send_ghost = fbc.cghost; + recv_ghost = fbc.src_ghost; + } else { + BoxArray ba_crse_patch(ba.size()); + { // TODO: later we might want to cache this + for (int i = 0, N = ba.size(); i < N; ++i) + { + Box bx = amrex::convert(amrex::grow(ba[i],nghost), typ); + bx &= fdomain_g; + ba_crse_patch.set(i, coarsener.doit(bx)); + } } - } - MF mf_crse_patch; #ifdef AMREX_USE_EB - if (EB2::TopIndexSpaceIfPresent()) { - auto factory = makeEBFabFactory(cgeom, ba_crse_patch, dm, {0,0,0}, EBSupport::basic); - mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0, MFInfo(), *factory); - } else + if (EB2::TopIndexSpaceIfPresent()) { + auto factory = makeEBFabFactory(cgeom, ba_crse_patch, dm, {0,0,0}, EBSupport::basic); + mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0, MFInfo(), *factory); + } else #endif - { - mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0); + { + mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0); + } + detail::mf_set_domain_bndry (mf_crse_patch, cgeom); } - detail::mf_set_domain_bndry (mf_crse_patch, cgeom); - mf_crse_patch.ParallelCopy(cmf, scomp, 0, ncomp, cgeom.periodicity()); + mf_crse_patch.ParallelCopy(cmf, scomp, 0, ncomp, send_ghost, recv_ghost, + cgeom.periodicity()); cbc(mf_crse_patch, 0, ncomp, mf_crse_patch.nGrowVect(), time, cbccomp); @@ -1075,7 +1093,7 @@ InterpFromCoarseLevel (Array const& mf, IntVect const& ngho const PreInterpHook& pre_interp, const PostInterpHook& post_interp) { - BL_PROFILE("InterpFromCoarseLevel"); + BL_PROFILE("InterpFromCoarseLevel(array)"); using FAB = typename MF::FABType::value_type; using iFAB = typename iMultiFab::FABType::value_type; @@ -1221,42 +1239,10 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, const IntVect& ratio, Interp* mapper, const Vector& bcs) { - BL_PROFILE("InterpFromCoarseLevel_nobc"); - - const BoxArray& ba = mf.boxArray(); - const DistributionMapping& dm = mf.DistributionMap(); - - const IndexType& typ = ba.ixType(); - BL_ASSERT(typ == cmf.boxArray().ixType()); - - const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio); - Box tmp(-nghost, IntVect(32), typ); - Box tmp2 = coarsener.doit(tmp); - IntVect src_ghost = -tmp2.smallEnd(); - - tmp = Box(-nghost_outside_domain, IntVect(32), typ); - tmp2 = coarsener.doit(tmp); - IntVect src_ghost_outside_domain = -tmp2.smallEnd(); - - IntVect cghost = cmf.nGrowVect(); - cghost.min(src_ghost); - - AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain)); - - MF mf_crse_patch(amrex::coarsen(ba,ratio), dm, ncomp, src_ghost); - mf_crse_patch.ParallelCopy(cmf, scomp, 0, ncomp, cghost, src_ghost, - cgeom.periodicity()); - - Box fdomain_g = amrex::convert(fgeom.Domain(),typ); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (fgeom.isPeriodic(idim)) { - fdomain_g.grow(idim, nghost[idim]); - } else { - fdomain_g.grow(idim, nghost_outside_domain[idim]); - } - } - FillPatchInterp(mf, dcomp, mf_crse_patch, 0, ncomp, nghost, cgeom, fgeom, - fdomain_g, ratio, mapper, bcs, 0); + PhysBCFunctUseCoarseGhost erfbc(cmf,nghost,nghost_outside_domain,ratio,mapper); + InterpFromCoarseLevel(mf, nghost, Real(0.0), cmf, scomp, dcomp, ncomp, + cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper, + bcs, 0); } template diff --git a/Src/Base/AMReX_PhysBCFunct.H b/Src/Base/AMReX_PhysBCFunct.H index 699863f062..2a9023b8c5 100644 --- a/Src/Base/AMReX_PhysBCFunct.H +++ b/Src/Base/AMReX_PhysBCFunct.H @@ -123,6 +123,50 @@ public: Real /*time*/, int /*bccomp*/) {} }; +class PhysBCFunctUseCoarseGhost +{ +public: + + template + PhysBCFunctUseCoarseGhost (MF const& cmf, IntVect const& a_nghost, + IntVect const& a_nghost_outside_domain, + IntVect const& ratio, Interp* mapper) + : nghost(a_nghost), nghost_outside_domain(a_nghost_outside_domain) + { + IndexType typ = cmf.ixType(); + + const auto& coarsener = mapper->BoxCoarsener(ratio); + Box tmp(-nghost, IntVect(32), typ); + Box tmp2 = coarsener.doit(tmp); + src_ghost = -tmp2.smallEnd(); + + tmp = Box(-nghost_outside_domain, IntVect(32), typ); + tmp2 = coarsener.doit(tmp); + src_ghost_outside_domain = -tmp2.smallEnd(); + + cghost = cmf.nGrowVect(); + cghost.min(src_ghost); + AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain)); + } + + void operator() (MultiFab& /*mf*/, int /*dcomp*/, int /*ncomp*/, IntVect const& /*nghost*/, + Real /*time*/, int /*bccomp*/) {} + + IntVect nghost; // # of fine ghosts inside domain to be filled + + IntVect nghost_outside_domain; // # of fine ghosts outside domain to be filled + + // This is the number of coarse ghost cells needed to interpolate fine + // ghost cells inside the domain + IntVect src_ghost; + + // This is the number of coarse ghost cells needed to interpolate + // nghost_outside_domain fine ghost cells outside the domain + IntVect src_ghost_outside_domain; + + // This is the minimum number of ghost cells needed in coarse MF + IntVect cghost; +}; template class PhysBCFunct From c5ffc295dee1aa3c9bced48a288c8087b1572440 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 18 Sep 2024 10:55:55 -0700 Subject: [PATCH 4/6] bcscomp --- Src/AmrCore/AMReX_FillPatchUtil.H | 6 ++++-- Src/AmrCore/AMReX_FillPatchUtil_I.H | 10 +++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Src/AmrCore/AMReX_FillPatchUtil.H b/Src/AmrCore/AMReX_FillPatchUtil.H index b71102b58d..b32c3e0294 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil.H +++ b/Src/AmrCore/AMReX_FillPatchUtil.H @@ -718,6 +718,7 @@ namespace amrex * \param ratio refinement ratio * \param mapper spatial interpolater * \param bcs boundar types for each component + * \param bcscomp starting component for bcs */ template std::enable_if_t::value> @@ -726,7 +727,7 @@ namespace amrex const MF& cmf, int scomp, int dcomp, int ncomp, const Geometry& cgeom, const Geometry& fgeom, const IntVect& ratio, Interp* mapper, - const Vector& bcs); + const Vector& bcs, int bcscomp); /** * \brief FillPatch with data from the current level @@ -785,6 +786,7 @@ namespace amrex * \param ratio refinement ratio * \param mapper spatial interpolater * \param bcs boundary types for each component. + * \param bcscomp starting component for bcs */ template std::enable_if_t::value> @@ -795,7 +797,7 @@ namespace amrex int scomp, int dcomp, int ncomp, const Geometry& cgeom, const Geometry& fgeom, const IntVect& ratio, Interp* mapper, - const Vector& bcs); + const Vector& bcs, int bcscomp); #ifndef BL_NO_FORT enum InterpEM_t { InterpE, InterpB}; diff --git a/Src/AmrCore/AMReX_FillPatchUtil_I.H b/Src/AmrCore/AMReX_FillPatchUtil_I.H index 35e9ef6ba2..777f6bbb82 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil_I.H +++ b/Src/AmrCore/AMReX_FillPatchUtil_I.H @@ -1237,12 +1237,12 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, const MF& cmf, int scomp, int dcomp, int ncomp, const Geometry& cgeom, const Geometry& fgeom, const IntVect& ratio, Interp* mapper, - const Vector& bcs) + const Vector& bcs, int bcscomp) { PhysBCFunctUseCoarseGhost erfbc(cmf,nghost,nghost_outside_domain,ratio,mapper); InterpFromCoarseLevel(mf, nghost, Real(0.0), cmf, scomp, dcomp, ncomp, cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper, - bcs, 0); + bcs, bcscomp); } template @@ -1365,7 +1365,7 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, int scomp, int dcomp, int ncomp, const Geometry& cgeom, const Geometry& fgeom, const IntVect& ratio, Interp* mapper, - const Vector& bcs) + const Vector& bcs, int bcscomp) { BL_PROFILE("FillPatchTwoLevels_nobc"); @@ -1390,7 +1390,7 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, tmp2 = coarsener.doit(tmp); IntVect src_ghost_outside_domain = -tmp2.smallEnd(); - IntVect cghost = cmf[0].nGrowVect(); + IntVect cghost = cmf[0]->nGrowVect(); cghost.min(src_ghost); // This is the minimum number of ghost cells needed in cmf. @@ -1423,7 +1423,7 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, } FillPatchInterp(mf_fine_patch, 0, mf_crse_patch, 0, ncomp, IntVect(0), cgeom, fgeom, - fdomain_g, ratio, mapper, bcs, 0); + fdomain_g, ratio, mapper, bcs, bcscomp); mf.ParallelCopy(mf_fine_patch, 0, dcomp, ncomp, IntVect{0}, nghost); } From 6bd4b72c4690f938776cd682da8f8776acc59671 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 18 Sep 2024 16:09:21 -0700 Subject: [PATCH 5/6] Face data --- Src/AmrCore/AMReX_FillPatchUtil_I.H | 235 ++++++---------------------- Src/Base/AMReX_PhysBCFunct.H | 5 + 2 files changed, 57 insertions(+), 183 deletions(-) diff --git a/Src/AmrCore/AMReX_FillPatchUtil_I.H b/Src/AmrCore/AMReX_FillPatchUtil_I.H index 777f6bbb82..13e6ea8917 100644 --- a/Src/AmrCore/AMReX_FillPatchUtil_I.H +++ b/Src/AmrCore/AMReX_FillPatchUtil_I.H @@ -84,12 +84,17 @@ FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, AMREX_ASSERT(!smf.empty()); AMREX_ASSERT(nghost.allLE(mf.nGrowVect())); + IntVect src_ghost(0); + if constexpr (std::is_same_v) { + src_ghost = physbcf.fp1_src_ghost; + } + if (smf.size() == 1) { if (&mf == smf[0] && scomp == dcomp) { mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity()); } else { - mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, IntVect{0}, nghost, geom.periodicity()); + mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, src_ghost, nghost, geom.periodicity()); } } else if (smf.size() == 2) @@ -106,7 +111,7 @@ FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, destcomp = dcomp; sameba = true; } else { - raii.define(smf[0]->boxArray(), smf[0]->DistributionMap(), ncomp, 0, + raii.define(smf[0]->boxArray(), smf[0]->DistributionMap(), ncomp, src_ghost, MFInfo(), smf[0]->Factory()); dmf = &raii; @@ -116,12 +121,19 @@ FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, if ((dmf != smf[0] && dmf != smf[1]) || scomp != dcomp) { + IntVect interp_ghost(0); + if constexpr (std::is_same_v) { + interp_ghost = physbcf.fp1_src_ghost; + if (sameba) { + interp_ghost.min(nghost); + } + } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif for (MFIter mfi(*dmf,TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const Box& bx = mfi.tilebox(); + const Box& bx = mfi.growntilebox(interp_ghost); const Real t0 = stime[0]; const Real t1 = stime[1]; auto const sfab0 = smf[0]->array(mfi); @@ -170,10 +182,7 @@ FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, } else { - IntVect src_ngrow = IntVect::TheZeroVector(); - IntVect dst_ngrow = nghost; - - mf.ParallelCopy(*dmf, 0, dcomp, ncomp, src_ngrow, dst_ngrow, geom.periodicity()); + mf.ParallelCopy(*dmf, 0, dcomp, ncomp, src_ghost, nghost, geom.periodicity()); } } else { @@ -504,10 +513,16 @@ namespace detail { auto solve_mask = make_mf_crse_mask(fpc, ncomp, mf.boxArray().ixType(), ratio); mf_set_domain_bndry(mf_crse_patch, cgeom); + if constexpr (std::is_same_v) { + cbc.fp1_src_ghost = cbc.cghost; + } FillPatchSingleLevel(mf_crse_patch, time, cmf, ct, scomp, 0, ncomp, cgeom, cbc, cbccomp); mf_set_domain_bndry(mf_refined_patch, fgeom); + if constexpr (std::is_same_v) { + fbc.fp1_src_ghost = IntVect(0); + } FillPatchSingleLevel(mf_refined_patch, time, fmf, ft, scomp, 0, ncomp, fgeom, fbc, fbccomp); @@ -565,16 +580,29 @@ namespace detail { MF mf_crse_patch = make_mf_crse_patch(fpc, ncomp); mf_set_domain_bndry (mf_crse_patch, cgeom); + if constexpr (std::is_same_v) { + cbc.fp1_src_ghost = cbc.cghost; + } FillPatchSingleLevel(mf_crse_patch, time, cmf, ct, scomp, 0, ncomp, cgeom, cbc, cbccomp); MF mf_fine_patch = make_mf_fine_patch(fpc, ncomp); detail::call_interp_hook(pre_interp, mf_crse_patch, 0, ncomp); + Box fdomain_g( amrex::convert(fgeom.Domain(),mf.ixType()) ); + for (int i = 0; i < AMREX_SPACEDIM; ++i) { + if (fgeom.isPeriodic(i)) { + fdomain_g.grow(i, nghost[i]); + } else { + if constexpr (std::is_same_v + ) { + fdomain_g.grow(i, fbc.nghost_outside_domain[i]); + } + } + } FillPatchInterp(mf_fine_patch, 0, mf_crse_patch, 0, ncomp, IntVect(0), cgeom, fgeom, - amrex::grow(amrex::convert(fgeom.Domain(),mf.ixType()),nghost), - ratio, mapper, bcs, bcscomp); + fdomain_g, ratio, mapper, bcs, bcscomp); detail::call_interp_hook(post_interp, mf_fine_patch, 0, ncomp); @@ -583,6 +611,9 @@ namespace detail { } } + if constexpr(std::is_same_v) { + fbc.fp1_src_ghost = IntVect(0); + } FillPatchSingleLevel(mf, nghost, time, fmf, ft, scomp, dcomp, ncomp, fgeom, fbc, fbccomp); @@ -1012,15 +1043,11 @@ InterpFromCoarseLevel (MF& mf, IntVect const& nghost, Real time, Box fdomain_g( amrex::convert(fgeom.Domain(),mf.ixType()) ); for (int i = 0; i < AMREX_SPACEDIM; ++i) { - if constexpr (std::is_same_v) { - if (fgeom.isPeriodic(i)) { - fdomain_g.grow(i, nghost[i]); - } else { - fdomain_g.grow(i, fbc.nghost_outside_domain[i]); - } + if (fgeom.isPeriodic(i)) { + fdomain_g.grow(i, nghost[i]); } else { - if (fgeom.isPeriodic(i)) { - fdomain_g.grow(i,nghost[i]); + if constexpr (std::is_same_v) { + fdomain_g.grow(i, fbc.nghost_outside_domain[i]); } } } @@ -1252,108 +1279,9 @@ FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time, const Vector& stime, int scomp, int dcomp, int ncomp, const Geometry& geom) { - BL_PROFILE("FillPatchSingleLevel_nobc"); - - AMREX_ASSERT(scomp+ncomp <= smf[0]->nComp()); - AMREX_ASSERT(dcomp+ncomp <= mf.nComp()); - AMREX_ASSERT(smf.size() == stime.size()); - AMREX_ASSERT(!smf.empty()); - AMREX_ASSERT(nghost.allLE(mf.nGrowVect())); - - if (smf.size() == 1) - { - if (&mf == smf[0] && scomp == dcomp) { - mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity()); - } else { - mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, snghost, nghost, geom.periodicity()); - } - } - else if (smf.size() == 2) - { - BL_ASSERT(smf[0]->boxArray() == smf[1]->boxArray()); - MF raii; - MF * dmf; - int destcomp; - bool sameba; - if (mf.boxArray() == smf[0]->boxArray() && - mf.DistributionMap() == smf[0]->DistributionMap()) - { - dmf = &mf; - destcomp = dcomp; - sameba = true; - } else { - raii.define(smf[0]->boxArray(), smf[0]->DistributionMap(), ncomp, snghost, - MFInfo(), smf[0]->Factory()); - - dmf = &raii; - destcomp = 0; - sameba = false; - } - - if ((dmf != smf[0] && dmf != smf[1]) || scomp != dcomp) - { - IntVect interp_ghost = snghost; - if (sameba) { interp_ghost.min(nghost); } -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for (MFIter mfi(*dmf,TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const Box& bx = mfi.growntilebox(interp_ghost); - const Real t0 = stime[0]; - const Real t1 = stime[1]; - auto const sfab0 = smf[0]->array(mfi); - auto const sfab1 = smf[1]->array(mfi); - auto dfab = dmf->array(mfi); - - if (time == t0) - { - AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, - { - dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp); - }); - } - else if (time == t1) - { - AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, - { - dfab(i,j,k,n+destcomp) = sfab1(i,j,k,n+scomp); - }); - } - else if (! amrex::almostEqual(t0,t1)) - { - Real alpha = (t1-time)/(t1-t0); - Real beta = (time-t0)/(t1-t0); - AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, - { - dfab(i,j,k,n+destcomp) = alpha*sfab0(i,j,k,n+scomp) - + beta*sfab1(i,j,k,n+scomp); - }); - } - else - { - AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n, - { - dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp); - }); - } - } - } - - if (sameba) - { - // Note that when sameba is true mf's BoxArray is nonoverlapping. - // So FillBoundary is safe. - mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity()); - } - else - { - mf.ParallelCopy(*dmf, 0, dcomp, ncomp, snghost, nghost, geom.periodicity()); - } - } - else { - amrex::Abort("FillPatchSingleLevel: high-order interpolation in time not implemented yet"); - } + PhysBCFunctUseCoarseGhost erfbc(snghost); + FillPatchSingleLevel(mf, nghost, time, smf, stime, scomp, dcomp, ncomp, geom, + erfbc, 0); } template @@ -1367,70 +1295,11 @@ FillPatchTwoLevels (MF& mf, IntVect const& nghost, const IntVect& ratio, Interp* mapper, const Vector& bcs, int bcscomp) { - BL_PROFILE("FillPatchTwoLevels_nobc"); - - const IndexType& typ = mf.ixType(); - - AMREX_ALWAYS_ASSERT(typ.nodeCentered() || typ.cellCentered()); - - if (nghost.max() > 0 || mf.getBDKey() != fmf[0]->getBDKey()) - { - const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio); - - Box tmp(-nghost, IntVect(32), typ); - Box tmp2 = coarsener.doit(tmp); - - // This is the number of coarse ghost cells needed to interpolate - // nghost fine ghost cells inside the domain - IntVect src_ghost = -tmp2.smallEnd(); - - // This is the number of coarse ghost cells needed to interpolate - // nghost_outside_domain fine ghost cells outside the domain - tmp = Box(-nghost_outside_domain, IntVect(32), typ); - tmp2 = coarsener.doit(tmp); - IntVect src_ghost_outside_domain = -tmp2.smallEnd(); - - IntVect cghost = cmf[0]->nGrowVect(); - cghost.min(src_ghost); - - // This is the minimum number of ghost cells needed in cmf. - AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain) && - cmf[1]->nGrowVect().allGE(cghost)); - - const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(*fmf[0], mf, - nghost, - coarsener, - fgeom, - cgeom, - nullptr); - - if ( ! fpc.ba_crse_patch.empty()) - { - MF mf_crse_patch(fpc.ba_crse_patch, fpc.dm_patch, ncomp, 0); - - FillPatchSingleLevel(mf_crse_patch, IntVect(0), time, cmf, cghost, - ct, scomp, 0, ncomp, cgeom); - - MF mf_fine_patch(fpc.ba_fine_patch, fpc.dm_patch, ncomp, 0); - - Box fdomain_g = amrex::convert(fgeom.Domain(),typ); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (fgeom.isPeriodic(idim)) { - fdomain_g.grow(idim, nghost[idim]); - } else { - fdomain_g.grow(idim, nghost_outside_domain[idim]); - } - } - FillPatchInterp(mf_fine_patch, 0, mf_crse_patch, 0, - ncomp, IntVect(0), cgeom, fgeom, - fdomain_g, ratio, mapper, bcs, bcscomp); - - mf.ParallelCopy(mf_fine_patch, 0, dcomp, ncomp, IntVect{0}, nghost); - } - } - - FillPatchSingleLevel(mf, nghost, time, fmf, IntVect(0), ft, - scomp, dcomp, ncomp, fgeom); + PhysBCFunctUseCoarseGhost erfbc(*cmf[0], nghost, nghost_outside_domain, ratio, + mapper); + FillPatchTwoLevels(mf, nghost, time, cmf, ct, fmf, ft, scomp, dcomp, ncomp, + cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper, + bcs, bcscomp); } template diff --git a/Src/Base/AMReX_PhysBCFunct.H b/Src/Base/AMReX_PhysBCFunct.H index 2a9023b8c5..90e0537ff3 100644 --- a/Src/Base/AMReX_PhysBCFunct.H +++ b/Src/Base/AMReX_PhysBCFunct.H @@ -127,6 +127,9 @@ class PhysBCFunctUseCoarseGhost { public: + PhysBCFunctUseCoarseGhost (IntVect const& a_fp1_src_ghost) + : fp1_src_ghost(a_fp1_src_ghost) {} + template PhysBCFunctUseCoarseGhost (MF const& cmf, IntVect const& a_nghost, IntVect const& a_nghost_outside_domain, @@ -166,6 +169,8 @@ public: // This is the minimum number of ghost cells needed in coarse MF IntVect cghost; + + IntVect fp1_src_ghost; // Used to pass information into FillPatchSingleLevel }; template From d3783fb5e6296036aa2f079edcbb42b8b9888a20 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 18 Sep 2024 16:37:18 -0700 Subject: [PATCH 6/6] fix warning --- Src/Base/AMReX_PhysBCFunct.H | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Src/Base/AMReX_PhysBCFunct.H b/Src/Base/AMReX_PhysBCFunct.H index 90e0537ff3..c85c617a1f 100644 --- a/Src/Base/AMReX_PhysBCFunct.H +++ b/Src/Base/AMReX_PhysBCFunct.H @@ -134,7 +134,9 @@ public: PhysBCFunctUseCoarseGhost (MF const& cmf, IntVect const& a_nghost, IntVect const& a_nghost_outside_domain, IntVect const& ratio, Interp* mapper) - : nghost(a_nghost), nghost_outside_domain(a_nghost_outside_domain) + : nghost(a_nghost), + nghost_outside_domain(a_nghost_outside_domain), + cghost(cmf.nGrowVect()) { IndexType typ = cmf.ixType(); @@ -147,7 +149,6 @@ public: tmp2 = coarsener.doit(tmp); src_ghost_outside_domain = -tmp2.smallEnd(); - cghost = cmf.nGrowVect(); cghost.min(src_ghost); AMREX_ALWAYS_ASSERT(cghost.allGE(src_ghost_outside_domain)); }