Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ cmake_policy(VERSION ${CMAKE_VERSION})

# ############
# Define Project
project(mpi VERSION 1.3.0 LANGUAGES CXX)
project(mpi VERSION 2.0.0 LANGUAGES CXX)
get_directory_property(IS_SUBPROJECT PARENT_DIRECTORY)

# Get the git hash & print status
Expand Down
25 changes: 16 additions & 9 deletions c++/mpi/chunk.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,41 +32,48 @@
namespace mpi {

/**
* @ingroup utilities
* @addtogroup utilities
* @{
*/

/**
* @brief Get the length of the i<sup>th</sup> subrange after splitting the integer range `[0, end)` as evenly as
* possible across `n` subranges.
*
* @details The optional parameter `min_size` can be used to first divide the range into equal parts of size
* @details The optional parameter `min_size` can be used to first divide the range into equal parts of size
* `min_size` before distributing them as evenly as possible across the number of specified subranges.
*
* It is expected that `min_size > 0` and that `min_size` is a divisor of `end`.
*
* @param end End of the integer range `[0, end)`.
* @param nranges Number of subranges.
* @param n Number of subranges.
* @param i Index of the subrange of interest.
* @param min_size Minimum size of the subranges.
* @return Length of the i<sup>th</sup> subrange.
*/
[[nodiscard]] inline long chunk_length(long end, int nranges, int i, long min_size = 1) {
[[nodiscard]] inline long chunk_length(long end, int n, int i, long min_size = 1) {
EXPECTS_WITH_MESSAGE(min_size > 0 && end % min_size == 0, "Error in mpi::chunk_length: min_size must be a divisor of end");
auto [node_begin, node_end] = itertools::chunk_range(0, end / min_size, nranges, i);
auto [node_begin, node_end] = itertools::chunk_range(0, end / min_size, n, i);
return (node_end - node_begin) * min_size;
}

/**
* @ingroup utilities
* @brief Divide a given range as evenly as possible across the MPI processes in a communicator and get the subrange
* assigned to the calling process.
* @brief Divide a given range as evenly as possible across the MPI processes in a communicator.
*
* @details It calculates the subrange assigned to the calling process based on its rank in the given communicator and
* returns it as a slice of the original range.
*
* @tparam R Range type.
* @param rg Range to divide.
* @param c mpi::communicator.
* @return An itertools::sliced range assigned to the calling process.
* @return An `itertools::sliced` range assigned to the calling process.
*/
template <typename R> [[nodiscard]] auto chunk(R &&rg, communicator c = {}) {
auto total_size = itertools::distance(std::cbegin(rg), std::cend(rg));
auto [start_idx, end_idx] = itertools::chunk_range(0, total_size, c.size(), c.rank());
return itertools::slice(std::forward<R>(rg), start_idx, end_idx);
}

/** @} */

} // namespace mpi
18 changes: 10 additions & 8 deletions c++/mpi/communicator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,21 +38,23 @@ namespace mpi {
* @ingroup mpi_essentials
* @brief C++ wrapper around `MPI_Comm` providing various convenience functions.
*
* @details It stores an `MPI_Comm` object as its only member which by default is set to `MPI_COMM_WORLD`. The
* underlying `MPI_Comm` object is not freed when a communicator goes out of scope. It is the user's responsibility to
* do so, in case it is needed. Note that copying the communicator simply copies the `MPI_Comm` object, without
* calling `MPI_Comm_dup`.
*
* All functions that make direct calls to the MPI C library throw an exception in case the call fails.
* @details It stores an `MPI_Comm` object as its only member which by default is set to `MPI_COMM_WORLD`.
*
* The underlying `MPI_Comm` object is not freed when a communicator goes out of scope. It is the user's
* responsibility to do so, in case it is needed.
*
* All functions that make direct calls to the MPI C API check their success with mpi::check_mpi_call.
*
* @note Copying the communicator simply copies the `MPI_Comm` object, without calling `MPI_Comm_dup`. Use duplicate()
* or split() if a new communicator is needed.
*/
class communicator {
public:
/// Construct a communicator with `MPI_COMM_WORLD`.
communicator() = default;

/**
* @brief Construct a communicator with a given `MPI_Comm` object.
* @details The `MPI_Comm` object is copied without calling `MPI_Comm_dup`.
* @brief Construct a communicator by wrapping a given `MPI_Comm` object.
* @param c `MPI_Comm` object to wrap.
*/
communicator(MPI_Comm c) : comm_(c) {}
Expand Down
28 changes: 13 additions & 15 deletions c++/mpi/datatypes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ namespace mpi {
* @brief Map C++ datatypes to the corresponding MPI datatypes.
*
* @details C++ types which have a corresponding MPI datatype should specialize this struct. It is assumed that it
* has a static member function `get` which returns the `MPI_Datatype` object for a given C++ type. For example:
* has a static member function `get` which returns the `MPI_Datatype` object for a given C++ type.
*
* For example:
*
* @code{.cpp}
* template <> struct mpi_type<int> {
Expand All @@ -58,7 +60,7 @@ namespace mpi {
template <typename T> struct mpi_type {};

#define D(T, MPI_TY) \
/** @brief Specialization of mpi_type for T. */ \
/** @brief Specialization of mpi_type for `T`. */ \
template <> struct mpi_type<T> { \
[[nodiscard]] static MPI_Datatype get() noexcept { return MPI_TY; } \
}
Expand Down Expand Up @@ -94,15 +96,10 @@ namespace mpi {
/**
* @brief Type trait to check if a type `T` has a corresponding MPI datatype, i.e. if mpi::mpi_type has been
* specialized.
* @tparam `T` Type to be checked.
*/
template <typename T, typename = void> constexpr bool has_mpi_type = false;

/**
* @brief Specialization of mpi::has_mpi_type for types which have a corresponding MPI datatype.
*
* @tparam T Type to be checked.
*/
template <typename T> constexpr bool has_mpi_type<T, std::void_t<decltype(mpi_type<T>::get())>> = true;
template <typename T> constexpr bool has_mpi_type = requires { mpi_type<T>::get(); };

namespace detail {

Expand Down Expand Up @@ -132,7 +129,7 @@ namespace mpi {
* @details The tuple element types must have corresponding MPI datatypes, i.e. they must have mpi::mpi_type
* specializtions. It uses `MPI_Type_create_struct` to create a new datatype consisting of the tuple element types.
*
* It throws an exception in case a call to the MPI C library fails.
* The success of MPI calls is checked with mpi::check_mpi_call.
*
* @tparam Ts Tuple element types.
* @param tup Tuple object.
Expand Down Expand Up @@ -164,7 +161,7 @@ namespace mpi {
}

/**
* @brief Specialization of mpi::mpi_type for std::tuple.
* @brief Specialization of mpi::mpi_type for `std::tuple`.
* @tparam Ts Tuple element types.
*/
template <typename... Ts> struct mpi_type<std::tuple<Ts...>> {
Expand All @@ -177,8 +174,8 @@ namespace mpi {
/**
* @brief Create an `MPI_Datatype` from some struct.
*
* @details It is assumed that there is a free function `tie_data` which returns a tuple containing the data
* members of the given type. The intended use is as a base class for a specialization of mpi::mpi_type:
* @details It is assumed that there is a free function `tie_data` which returns a tuple containing the data members
* of the given type:
*
* @code{.cpp}
* // type to use for MPI communication
Expand Down Expand Up @@ -234,15 +231,16 @@ namespace mpi {
/**
* @brief Create an `MPI_Datatype` from a serializable type.
*
* @details It is assumed that the type has a member function `serialize`
* which feeds all its class members into an archive using the `operator&`.
* @details It is assumed that the type has a member function `serialize` which feeds all its class members into an
* archive using the `operator&`.
*
* @code{.cpp}
* // type to use for MPI communication
* struct foo {
* double x;
* int y;
* void serialize(auto& ar) const { ar & x & y; }
* void deserialize(auto& ar) { ar & x & y; }
* };
* @endcode
*
Expand Down
33 changes: 22 additions & 11 deletions c++/mpi/environment.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,21 +35,32 @@ namespace mpi {
*/

/**
* @brief Check if MPI has been initialized.
* @details It throws an exception in case a call to the MPI C library fails.
* @brief Check if MPI has been initialized by calling `MPI_Initialized`.
* @return True if `MPI_Init` has been called, false otherwise.
*/
[[nodiscard]] inline bool is_initialized() noexcept {
int flag = 0;
check_mpi_call(MPI_Initialized(&flag), "MPI_Initialized");
MPI_Initialized(&flag);
return flag;
}

/**
* @brief Boolean variable that is true, if one of the environment variables `OMPI_COMM_WORLD_RANK`, `PMI_RANK`,
* `CRAY_MPICH_VERSION` or `FORCE_MPI_INIT` is set, false otherwise.
* @brief Check if MPI has been finalized by calling `MPI_Finalized`.
* @return True if `MPI_Finalize` has been called, false otherwise.
*/
[[nodiscard]] inline bool is_finalized() noexcept {
int flag = 0;
MPI_Finalized(&flag);
return flag;
}

/**
* @brief Boolean variable that checkes if there is an active MPI runtime environment.
*
* @details The environment variables are set, when a program is executed with `mpirun` or `mpiexec`.
* @details It is true if one of the environment variables `OMPI_COMM_WORLD_RANK`, `PMI_RANK`, `CRAY_MPICH_VERSION` or
* `FORCE_MPI_INIT` is set, false otherwise.
*
* @note The environment variables are set, when a program is executed with `mpirun` or `mpiexec`.
*/
static const bool has_env = []() {
if (std::getenv("OMPI_COMM_WORLD_RANK") != nullptr or std::getenv("PMI_RANK") != nullptr or std::getenv("CRAY_MPICH_VERSION") != nullptr
Expand All @@ -64,15 +75,15 @@ namespace mpi {
*
* @details Calls `MPI_Init` upon construction and `MPI_Finalize` upon destruction i.e. when the environment object
* goes out of scope. If mpi::has_env is false, this struct does nothing.
*
* All functions that make direct calls to the MPI C library throw an exception in case the call fails.
*/
struct environment {
/**
* @brief Construct a new mpi environment object by calling `MPI_Init`.
*
* @details Checks first if the program is run with an MPI runtime environment and if it has not been initialized
* before to avoid errors.
*
* Direct calls the MPI C API are checked for success with mpi::check_mpi_call.
*
* @param argc Number of command line arguments.
* @param argv Command line arguments.
Expand All @@ -84,11 +95,11 @@ namespace mpi {
/**
* @brief Destroy the mpi environment object by calling `MPI_Finalize`.
*
* @details Checks first if the program is run with an MPI runtime environment. Called automatically when the
* environment object goes out of scope.
* @details Checks first if the program is run with an MPI runtime environment and if it has not been finalized
* before to avoid errors.
*/
~environment() {
if (has_env) check_mpi_call(MPI_Finalize(), "MPI_Finalize");
if (has_env && !is_finalized()) MPI_Finalize();
}
};

Expand Down
Loading
Loading