diff --git a/src/Base/CMakeLists.txt b/src/Base/CMakeLists.txt index be0f50aa..fc06664b 100644 --- a/src/Base/CMakeLists.txt +++ b/src/Base/CMakeLists.txt @@ -33,5 +33,6 @@ foreach(D IN LISTS AMReX_SPACEDIM) Utility.cpp Vector.cpp Version.cpp + MPMD.cpp ) endforeach() diff --git a/src/Base/MPMD.cpp b/src/Base/MPMD.cpp new file mode 100644 index 00000000..155bea34 --- /dev/null +++ b/src/Base/MPMD.cpp @@ -0,0 +1,180 @@ +/* Copyright 2021-2022 The AMReX Community + * + * Authors: Axel Huebl + * License: BSD-3-Clause-LBNL + */ +#include "pyAMReX.H" +#include +#include +#include +#include +#include +#include + +#ifdef AMREX_USE_MPI +#include + +/** mpi4py communicator wrapper + * + * refs: + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/libmpi.pxd#L35-L36 + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/MPI.pxd#L100-L105 + * - installed: include/mpi4py/mpi4py.MPI.h + */ +struct pyAMReX_PyMPICommObject +{ + PyObject_HEAD MPI_Comm ob_mpi; + unsigned int flags; +}; +using pyAMReX_PyMPIIntracommObject = pyAMReX_PyMPICommObject; + + +void init_MPMD(py::module &m) { + using namespace amrex; + + // Several functions here are copied from AMReX.cpp + m.def("MPMD_Initialize_without_split", + [](const py::list args) { + Vector cargs{"amrex"}; + Vector argv; + + // Populate the "command line" + for (const auto& v: args) + cargs.push_back(v.cast()); + for (auto& v: cargs) + argv.push_back(&v[0]); + int argc = argv.size(); + + // note: +1 since there is an extra char-string array element, + // that ANSII C requires to be a simple NULL entry + // https://stackoverflow.com/a/39096006/2719194 + argv.push_back(NULL); + char** tmp = argv.data(); + MPMD::Initialize_without_split(argc, tmp); + }); + + // This is AMReX::Initialize when MPMD exists + m.def("initialize_when_MPMD", + [](const py::list args, py::object &app_comm_py) { + Vector cargs{"amrex"}; + Vector argv; + + // Populate the "command line" + for (const auto& v: args) + cargs.push_back(v.cast()); + for (auto& v: cargs) + argv.push_back(&v[0]); + int argc = argv.size(); + + // note: +1 since there is an extra char-string array element, + // that ANSII C requires to be a simple NULL entry + // https://stackoverflow.com/a/39096006/2719194 + argv.push_back(NULL); + char** tmp = argv.data(); + + const bool build_parm_parse = (cargs.size() > 1); + + //! TODO perform mpi4py import test and check min-version + //! careful: double MPI_Init risk? only import mpi4py.MPI? + //! required C-API init? probably just checks: + //! refs: + //! - + //! https://bitbucket.org/mpi4py/mpi4py/src/3.0.0/demo/wrap-c/helloworld.c + //! - installed: include/mpi4py/mpi4py.MPI_api.h + //auto m_mpi4py = py::module::import("mpi4py"); + //amrex::ignore_unused(m_mpi4py); + + if (app_comm_py.ptr() == Py_None) + throw std::runtime_error( + "MPMD: MPI communicator cannot be None."); + if (app_comm_py.ptr() == nullptr) + throw std::runtime_error( + "MPMD: MPI communicator is a nullptr."); + + // check type string to see if this is mpi4py + // __str__ (pretty) + // __repr__ (unambiguous) + // mpi4py: + // pyMPI: ... (TODO) + py::str const comm_pystr = py::repr(app_comm_py); + std::string const comm_str = comm_pystr.cast(); + if (comm_str.substr(0, 12) != std::string(" >( + app_comm_py.get_type())) + // TODO add mpi4py version from above import check to error + // message + throw std::runtime_error( + "MPMD: comm has unexpected type layout in " + + comm_str + + " (Mismatched MPI at compile vs. runtime? " + "Breaking mpi4py release?)"); + + // todo other possible implementations: + // - pyMPI (inactive since 2008?): import mpi; mpi.WORLD + + // reimplementation of mpi4py's: + // MPI_Comm* mpiCommPtr = PyMPIComm_Get(app_comm_py.ptr()); + MPI_Comm *mpiCommPtr = + &((pyAMReX_PyMPIIntracommObject *)(app_comm_py.ptr()))->ob_mpi; + + if (PyErr_Occurred()) + throw std::runtime_error( + "MPMD: MPI communicator access error."); + if (mpiCommPtr == nullptr) + { + throw std::runtime_error( + "MPMD: MPI communicator cast failed. " + "(Mismatched MPI at compile vs. runtime?)"); + } + + return Initialize(argc, tmp, build_parm_parse, *mpiCommPtr); + }, py::return_value_policy::reference); + + constexpr auto run_gc = []() { + // explicitly run the garbage collector, so deleted objects + // get freed. + // This is a convenience helper/bandage for making work with Python + // garbage collectors in various implementations more easy. + // https://github.com/AMReX-Codes/pyamrex/issues/81 + auto m_gc = py::module::import("gc"); + auto collect = m_gc.attr("collect"); + collect(); + }; + m.def("MPMD_Finalize", + [run_gc]() { + run_gc(); + MPMD::Finalize(); + }); + m.def("MPMD_Initialized",&MPMD::Initialized); + m.def("MPMD_MyProc",&MPMD::MyProc); + m.def("MPMD_NProcs",&MPMD::NProcs); + m.def("MPMD_AppNum",&MPMD::AppNum); + m.def("MPMD_MyProgId",&MPMD::MyProgId); + + // Binding MPMD::Copier class + py::class_< MPMD::Copier >(m, "MPMD_Copier") + //! Construct an MPMD::Copier without BoxArray and DistributionMApping + .def(py::init ()) + //! Construct an MPMD::Copier with BoxArray and DistributionMApping + .def(py::init< BoxArray const&, DistributionMapping const&,bool>(), + py::arg("ba"),py::arg("dm"),py::arg("send_ba")=false) + // Copier function to send data + .def("send",&MPMD::Copier::send) + // Copier function to receive data + .def("recv",&MPMD::Copier::recv) + // Copier's BoxArray + .def("box_array",&MPMD::Copier::boxArray, + py::return_value_policy::reference_internal) + // Copier's DistributionMapping + .def("distribution_map",&MPMD::Copier::DistributionMap, + py::return_value_policy::reference_internal) + ; + +} + +#endif diff --git a/src/pyAMReX.cpp b/src/pyAMReX.cpp index cd997c8f..e952e64b 100644 --- a/src/pyAMReX.cpp +++ b/src/pyAMReX.cpp @@ -38,7 +38,9 @@ void init_PODVector(py::module &); void init_Utility(py::module &); void init_Vector(py::module &); void init_Version(py::module &); - +#ifdef AMREX_USE_MPI +void init_MPMD(py::module &); +#endif #if AMREX_SPACEDIM == 1 PYBIND11_MODULE(amrex_1d_pybind, m) { @@ -108,6 +110,9 @@ PYBIND11_MODULE(amrex_3d_pybind, m) { init_ParticleContainer(m); init_AmrMesh(m); +#ifdef AMREX_USE_MPI + init_MPMD(m); +#endif // Wrappers around standalone functions init_PlotFileUtil(m); init_Utility(m); diff --git a/tests/test_MPMD/test_1/GNUmakefile b/tests/test_MPMD/test_1/GNUmakefile new file mode 100644 index 00000000..a68d0980 --- /dev/null +++ b/tests/test_MPMD/test_1/GNUmakefile @@ -0,0 +1,20 @@ +AMREX_HOME ?= ../../../../amrex + +DEBUG = TRUE + +DIM = 3 + +COMP = gcc + +USE_MPI = TRUE + +USE_OMP = FALSE +USE_CUDA = FALSE +USE_HIP = FALSE + +include $(AMREX_HOME)/Tools/GNUMake/Make.defs + +include ./Make.package +include $(AMREX_HOME)/Src/Base/Make.package + +include $(AMREX_HOME)/Tools/GNUMake/Make.rules diff --git a/tests/test_MPMD/test_1/Make.package b/tests/test_MPMD/test_1/Make.package new file mode 100644 index 00000000..6b4b865e --- /dev/null +++ b/tests/test_MPMD/test_1/Make.package @@ -0,0 +1 @@ +CEXE_sources += main.cpp diff --git a/tests/test_MPMD/test_1/main.cpp b/tests/test_MPMD/test_1/main.cpp new file mode 100644 index 00000000..24e4b81b --- /dev/null +++ b/tests/test_MPMD/test_1/main.cpp @@ -0,0 +1,72 @@ + +#include +#include +#include +#include +#include +#include + +int main(int argc, char* argv[]) +{ + // Initialize amrex::MPMD to establish communication across the two apps + MPI_Comm comm = amrex::MPMD::Initialize(argc, argv); + amrex::Initialize(argc,argv,true,comm); + { + amrex::Print() << "Hello world from AMReX version " << amrex::Version() << "\n"; + // Number of data components at each grid point in the MultiFab + int ncomp = 2; + // how many grid cells in each direction over the problem domain + int n_cell = 32; + // how many grid cells are allowed in each direction over each box + int max_grid_size = 16; + //BoxArray -- Abstract Domain Setup + // integer vector indicating the lower coordindate bounds + amrex::IntVect dom_lo(0,0,0); + // integer vector indicating the upper coordindate bounds + amrex::IntVect dom_hi(n_cell-1, n_cell-1, n_cell-1); + // box containing the coordinates of this domain + amrex::Box domain(dom_lo, dom_hi); + // will contain a list of boxes describing the problem domain + amrex::BoxArray ba(domain); + // chop the single grid into many small boxes + ba.maxSize(max_grid_size); + // Distribution Mapping + amrex::DistributionMapping dm(ba); + // Create an MPMD Copier that + // sends the BoxArray information to the other (python) application + auto copr = amrex::MPMD::Copier(ba,dm,true); + //Define MuliFab + amrex::MultiFab mf(ba, dm, ncomp, 0); + //Geometry -- Physical Properties for data on our domain + amrex::RealBox real_box ({0., 0., 0.}, {1. , 1., 1.}); + amrex::Geometry geom(domain, &real_box); + //Calculate Cell Sizes + amrex::GpuArray dx = geom.CellSizeArray(); //dx[0] = dx dx[1] = dy dx[2] = dz + //Fill only the first component of the MultiFab + for(amrex::MFIter mfi(mf); mfi.isValid(); ++mfi){ + const amrex::Box& bx = mfi.validbox(); + const amrex::Array4& mf_array = mf.array(mfi); + + amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k){ + + amrex::Real x = (i+0.5) * dx[0]; + amrex::Real y = (j+0.5) * dx[1]; + amrex::Real z = (k+0.5) * dx[2]; + amrex::Real r_squared = ((x-0.5)*(x-0.5)+(y-0.5)*(y-0.5)+(z-0.5)*(z-0.5))/0.01; + + mf_array(i,j,k,0) = 1.0 + std::exp(-r_squared); + + }); + } + // Send ONLY the first populated MultiFab component to the other app + copr.send(mf,0,1); + // Receive ONLY the second MultiFab component from the other app + copr.recv(mf,1,1); + //Plot MultiFab Data + WriteSingleLevelPlotfile("plt_cpp_001", mf, {"comp0","comp1"}, geom, 0., 0); + + } + amrex::Finalize(); + amrex::MPMD::Finalize(); + +} diff --git a/tests/test_MPMD/test_1/main.py b/tests/test_MPMD/test_1/main.py new file mode 100644 index 00000000..33eb7440 --- /dev/null +++ b/tests/test_MPMD/test_1/main.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2023 The AMReX Community +# +# This file is part of AMReX. +# +# License: BSD-3-Clause-LBNL +# Authors: Bhargav Sriram Siddani, Revathi Jambunathan, Edoardo Zoni, Olga Shapoval, David Grote, Axel Huebl + +from mpi4py import MPI + +import amrex.space3d as amr + + +def load_cupy(): + if amr.Config.have_gpu: + try: + import cupy as cp + + xp = cp + amr.Print("Note: found and will use cupy") + except ImportError: + amr.Print( + "Warning: GPU found but cupy not available! Trying managed memory in numpy..." + ) + import numpy as np + + xp = np + if amr.Config.gpu_backend == "SYCL": + amr.Print("Warning: SYCL GPU backend not yet implemented for Python") + import numpy as np + + xp = np + + else: + import numpy as np + + xp = np + amr.Print("Note: found and will use numpy") + return xp + + +# Initialize amrex::MPMD to establish communication across the two apps +# However, leverage MPMD_Initialize_without_split +# so that communication split can be performed using mpi4py.MPI +amr.MPMD_Initialize_without_split([]) +# Leverage MPI from mpi4py to perform communication split +app_comm_py = MPI.COMM_WORLD.Split(amr.MPMD_AppNum(), amr.MPMD_MyProc()) +# Initialize AMReX +amr.initialize_when_MPMD([], app_comm_py) + +# CPU/GPU logic +xp = load_cupy() +amr.Print(f"Hello world from pyAMReX version {amr.__version__}\n") +# Create a MPMD Copier that gets the BoxArray information from the other (C++) app +copr = amr.MPMD_Copier(True) +# Number of data components at each grid point in the MultiFab +ncomp = 2 +# Define a MultiFab using the created MPMD_Copier +mf = amr.MultiFab(copr.box_array(), copr.distribution_map(), ncomp, 0) +mf.set_val(0.0) + +# Receive ONLY the FIRST MultiFab component populated in the other (C++) app +copr.recv(mf, 0, 1) + +# Fill the second MultiFab component based on the first component +for mfi in mf: + # Preferred way to fill array using fast ranged operations: + # - xp.array is indexed in reversed order (n,z,y,x), + # .T creates a view into the AMReX (x,y,z,n) order + # - indices are local (range from 0 to box size) + mf_array = xp.array(mf.array(mfi), copy=False).T + + mf_array[:, :, :, 1] = 10.0 * mf_array[:, :, :, 0] + +# Send ONLY the second MultiFab component to the other (C++) app +copr.send(mf, 1, 1) + +# Plot MultiFab data +# HERE THE DOMAIN INFORMATION IS ONLY BEING UTILIZED TO SAVE A PLOTFILE +# How many grid cells in each direction over the problem domain +n_cell = 32 +# Integer vector indicating the lower coordinate bounds +dom_lo = amr.IntVect(0, 0, 0) +# Integer vector indicating the upper coordinate bounds +dom_hi = amr.IntVect(n_cell - 1, n_cell - 1, n_cell - 1) +# Box containing the coordinates of this domain +domain = amr.Box(dom_lo, dom_hi) +# Geometry: physical properties for data on our domain +real_box = amr.RealBox([0.0, 0.0, 0.0], [1.0, 1.0, 1.0]) +coord = 0 # Cartesian +is_per = [0, 0, 0] # periodicity +geom = amr.Geometry(domain, real_box, coord, is_per) +plotfile = amr.concatenate(root="plt_py_", num=1, mindigits=3) +varnames = amr.Vector_string(["comp0", "comp1"]) +amr.write_single_level_plotfile(plotfile, mf, varnames, geom, time=0.0, level_step=0) + +# Finalize AMReX +amr.finalize() +# Finalize AMReX::MPMD +amr.MPMD_Finalize()