TRIQS/mpi 1.3.0
C++ interface to MPI
Loading...
Searching...
No Matches
ranges.hpp
Go to the documentation of this file.
1// Copyright (c) 2024 Simons Foundation
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0.txt
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14//
15// Authors: Thomas Hahn, Alexander Hampel, Olivier Parcollet, Nils Wentzell
16
22#pragma once
23
24#include "./chunk.hpp"
25#include "./communicator.hpp"
26#include "./datatypes.hpp"
27#include "./environment.hpp"
29#include "./macros.hpp"
30#include "./utils.hpp"
31
32#include <itertools/itertools.hpp>
33#include <mpi.h>
34
35#include <algorithm>
36#include <ranges>
37#include <stdexcept>
38#include <vector>
39
40namespace mpi {
41
93 template <contiguous_sized_range R> void broadcast_range(R &&rg, communicator c = {}, int root = 0) { // NOLINT (ranges need not be forwarded)
94 // check the sizes of all ranges
95 using value_t = std::ranges::range_value_t<R>;
96 auto const size = std::ranges::size(rg);
97 EXPECTS_WITH_MESSAGE(all_equal(size, c), "Range sizes are not equal across all processes in mpi::broadcast_range");
98
99 // do nothing if the range is empty, if MPI is not initialized or if the communicator size is < 2
100 if (size == 0 || !has_env || c.size() < 2) return;
101
102 // broadcast the range
103 if constexpr (has_mpi_type<value_t>)
104 // make an MPI C library call for MPI compatible value types
105 check_mpi_call(MPI_Bcast(std::ranges::data(rg), size, mpi_type<value_t>::get(), root, c.get()), "MPI_Bcast");
106 else
107 // otherwise call the specialized mpi_broadcast for each element
108 for (auto &val : rg) broadcast(val, c, root);
109 }
110
154 template <contiguous_sized_range R>
155 void reduce_in_place_range(R &&rg, communicator c = {}, int root = 0, bool all = false, // NOLINT (ranges need not be forwarded)
156 MPI_Op op = MPI_SUM) {
157 // check the sizes of all ranges
158 using value_t = std::ranges::range_value_t<R>;
159 auto const size = std::ranges::size(rg);
160 EXPECTS_WITH_MESSAGE(all_equal(size, c), "Range sizes are not equal across all processes in mpi::reduce_in_place_range");
161
162 // do nothing if the range is empty, if MPI is not initialized or if the communicator size is < 2
163 if (size == 0 || !has_env || c.size() < 2) return;
164
165 // reduce the ranges
166 if constexpr (has_mpi_type<value_t>) {
167 // make an MPI C library call for MPI compatible value types
168 auto data = std::ranges::data(rg);
169 if (!all)
170 check_mpi_call(MPI_Reduce((c.rank() == root ? MPI_IN_PLACE : data), data, size, mpi_type<value_t>::get(), op, root, c.get()), "MPI_Reduce");
171 else
172 check_mpi_call(MPI_Allreduce(MPI_IN_PLACE, data, size, mpi_type<value_t>::get(), op, c.get()), "MPI_Allreduce");
173 } else {
174 // otherwise call the specialized mpi_reduce_in_place for each element
175 for (auto &val : rg) mpi_reduce_in_place(val, c, root, all, op);
176 }
177 }
178
225 template <contiguous_sized_range R1, contiguous_sized_range R2>
226 void reduce_range(R1 &&in_rg, R2 &&out_rg, communicator c = {}, int root = 0, bool all = false, // NOLINT (ranges need not be forwarded)
227 MPI_Op op = MPI_SUM) {
228 // check input and output ranges
229 auto const in_size = std::ranges::size(in_rg);
230 EXPECTS_WITH_MESSAGE(all_equal(in_size, c), "Input range sizes are not equal across all processes in mpi::reduce_range");
231 if (c.rank() == root || all) {
232 EXPECTS_WITH_MESSAGE(in_size == std::ranges::size(out_rg), "Input and output range sizes are not equal in mpi::reduce_range");
233 }
234
235 // do nothing if the input range is empty
236 if (in_size == 0) return;
237
238 // simply copy if there is no active MPI environment or if the communicator size is < 2
239 if (!has_env || c.size() < 2) {
240 std::ranges::copy(std::forward<R1>(in_rg), std::ranges::data(out_rg));
241 return;
242 }
243
244 // reduce the ranges
245 using in_value_t = std::ranges::range_value_t<R1>;
246 using out_value_t = std::ranges::range_value_t<R2>;
247 if constexpr (has_mpi_type<in_value_t> && std::same_as<in_value_t, out_value_t>) {
248 // make an MPI C library call for MPI compatible value types
249 auto const in_data = std::ranges::data(in_rg);
250 auto out_data = std::ranges::data(out_rg);
251 if (!all)
252 check_mpi_call(MPI_Reduce(in_data, out_data, in_size, mpi_type<in_value_t>::get(), op, root, c.get()), "MPI_Reduce");
253 else
254 check_mpi_call(MPI_Allreduce(in_data, out_data, in_size, mpi_type<in_value_t>::get(), op, c.get()), "MPI_Allreduce");
255 } else {
256 // otherwise call the specialized mpi_reduce for each element
257 // the size of the output range is arbitrary on non-recieving ranks, so we cannot use transform on them
258 if (c.rank() == root || all)
259 std::ranges::transform(std::forward<R1>(in_rg), std::ranges::data(out_rg), [&](auto const &val) { return reduce(val, c, root, all, op); });
260 else
261 // the assignment is needed in case a lazy object is returned
262 std::ranges::for_each(std::forward<R1>(in_rg), [&](auto const &val) { [[maybe_unused]] out_value_t ignore = reduce(val, c, root, all, op); });
263 }
264 }
265
315 template <contiguous_sized_range R1, contiguous_sized_range R2>
316 requires(std::same_as<std::ranges::range_value_t<R1>, std::ranges::range_value_t<R2>>)
317 void scatter_range(R1 &&in_rg, R2 &&out_rg, long in_size, communicator c = {}, int root = 0, // NOLINT (ranges need not be forwarded)
318 long chunk_size = 1) {
319 // check the sizes of the input and output ranges
320 if (c.rank() == root) {
321 EXPECTS_WITH_MESSAGE(in_size == std::ranges::size(in_rg), "Input range size not equal to provided size in mpi::scatter_range");
322 }
323 EXPECTS_WITH_MESSAGE(in_size == all_reduce(std::ranges::size(out_rg), c),
324 "Output range sizes don't add up to input range size in mpi::scatter_range");
325
326 // do nothing if the input range is empty
327 if (in_size == 0) return;
328
329 // simply copy if there is no active MPI environment or if the communicator size is < 2
330 if (!has_env || c.size() < 2) {
331 std::ranges::copy(std::forward<R1>(in_rg), std::ranges::data(out_rg));
332 return;
333 }
334
335 // check the size of the output range
336 int recvcount = static_cast<int>(chunk_length(in_size, c.size(), c.rank(), chunk_size));
337 EXPECTS_WITH_MESSAGE(recvcount == std::ranges::size(out_rg), "Output range size is incorrect in mpi::scatter_range");
338
339 // prepare arguments for the MPI call
340 auto sendcounts = std::vector<int>(c.size());
341 auto displs = std::vector<int>(c.size() + 1, 0);
342 for (int i = 0; i < c.size(); ++i) {
343 sendcounts[i] = static_cast<int>(chunk_length(in_size, c.size(), i, chunk_size));
344 displs[i + 1] = sendcounts[i] + displs[i];
345 }
346
347 // scatter the range
348 using in_value_t = std::ranges::range_value_t<R1>;
349 using out_value_t = std::ranges::range_value_t<R2>;
351 // make an MPI C library call for MPI compatible value types
352 auto const in_data = std::ranges::data(in_rg);
353 auto out_data = std::ranges::data(out_rg);
354 check_mpi_call(MPI_Scatterv(in_data, sendcounts.data(), displs.data(), mpi_type<in_value_t>::get(), out_data, recvcount,
355 mpi_type<out_value_t>::get(), root, c.get()),
356 "MPI_Scatterv");
357 } else {
358 // otherwise throw an exception
359 throw std::runtime_error{"Error in mpi::scatter_range: Types with no corresponding datatype can only be all-gathered"};
360 }
361 }
362
411 template <contiguous_sized_range R1, contiguous_sized_range R2>
412 void gather_range(R1 &&in_rg, R2 &&out_rg, long out_size, communicator c = {}, int root = 0, // NOLINT (ranges need not be forwarded)
413 bool all = false) {
414 // check the sizes of the input and output ranges
415 auto const in_size = std::ranges::size(in_rg);
416 EXPECTS_WITH_MESSAGE(out_size = all_reduce(in_size, c), "Input range sizes don't add up to output range size in mpi::gather_range");
417 if (c.rank() == root || all) {
418 EXPECTS_WITH_MESSAGE(out_size == std::ranges::size(out_rg), "Output range size is incorrect in mpi::gather_range");
419 }
420
421 // do nothing if the output range is empty
422 if (out_size == 0) return;
423
424 // simply copy if there is no active MPI environment or if the communicator size is < 2
425 if (!has_env || c.size() < 2) {
426 std::ranges::copy(std::forward<R1>(in_rg), std::ranges::data(out_rg));
427 return;
428 }
429
430 // prepare arguments for the MPI call
431 auto recvcounts = std::vector<int>(c.size());
432 auto displs = std::vector<int>(c.size() + 1, 0);
433 int sendcount = in_size;
434 if (!all)
435 check_mpi_call(MPI_Gather(&sendcount, 1, mpi_type<int>::get(), recvcounts.data(), 1, mpi_type<int>::get(), root, c.get()), "MPI_Gather");
436 else
437 check_mpi_call(MPI_Allgather(&sendcount, 1, mpi_type<int>::get(), recvcounts.data(), 1, mpi_type<int>::get(), c.get()), "MPI_Allgather");
438 for (int i = 0; i < c.size(); ++i) displs[i + 1] = recvcounts[i] + displs[i];
439
440 // gather the ranges
441 using in_value_t = std::ranges::range_value_t<R1>;
442 using out_value_t = std::ranges::range_value_t<R2>;
444 // make an MPI C library call for MPI compatible value types
445 auto const in_data = std::ranges::data(in_rg);
446 auto out_data = std::ranges::data(out_rg);
447 if (!all)
448 check_mpi_call(MPI_Gatherv(in_data, sendcount, mpi_type<in_value_t>::get(), out_data, recvcounts.data(), displs.data(),
449 mpi_type<out_value_t>::get(), root, c.get()),
450 "MPI_Gatherv");
451 else
452 check_mpi_call(MPI_Allgatherv(in_data, sendcount, mpi_type<in_value_t>::get(), out_data, recvcounts.data(), displs.data(),
453 mpi_type<out_value_t>::get(), c.get()),
454 "MPI_Allgatherv");
455 } else {
456 if (all) {
457 // if all == true, each process broadcasts it elements to all other ranks
458 for (int i = 0; i < c.size(); ++i) {
459 auto view = std::views::drop(out_rg, displs[i]) | std::views::take(displs[i + 1] - displs[i]);
460 if (c.rank() == i) std::ranges::copy(in_rg, std::ranges::begin(view));
461 broadcast_range(view, c, i);
462 }
463 } else {
464 // otherwise throw an exception
465 throw std::runtime_error{"Error in mpi::gather_range: Types with no corresponding datatype can only be all-gathered"};
466 }
467 }
468 }
469
472} // namespace mpi
Provides utilities to distribute a range across MPI processes.
C++ wrapper around MPI_Comm providing various convenience functions.
Provides a C++ wrapper class for an MPI_Comm object.
Provides utilities to map C++ datatypes to MPI datatypes.
Provides an MPI environment for initializing and finalizing an MPI program.
Provides generic implementations for a subset of collective MPI communications (broadcast,...
void reduce_in_place_range(R &&rg, communicator c={}, int root=0, bool all=false, MPI_Op op=MPI_SUM)
Implementation of an in-place MPI reduce for an mpi::contiguous_sized_range object.
Definition ranges.hpp:155
void reduce_range(R1 &&in_rg, R2 &&out_rg, communicator c={}, int root=0, bool all=false, MPI_Op op=MPI_SUM)
Implementation of an MPI reduce for an mpi::contiguous_sized_range.
Definition ranges.hpp:226
void scatter_range(R1 &&in_rg, R2 &&out_rg, long in_size, communicator c={}, int root=0, long chunk_size=1)
Implementation of an MPI scatter for an mpi::contiguous_sized_range.
Definition ranges.hpp:317
decltype(auto) reduce(T &&x, communicator c={}, int root=0, bool all=false, MPI_Op op=MPI_SUM)
Generic MPI reduce.
void broadcast_range(R &&rg, communicator c={}, int root=0)
Implementation of an MPI broadcast for an mpi::contiguous_sized_range object.
Definition ranges.hpp:93
void gather_range(R1 &&in_rg, R2 &&out_rg, long out_size, communicator c={}, int root=0, bool all=false)
Implementation of an MPI gather for an mpi::contiguous_sized_range.
Definition ranges.hpp:412
bool all_equal(T const &x, communicator c={})
Checks if a given object is equal across all ranks in the given communicator.
decltype(auto) all_reduce(T &&x, communicator c={}, MPI_Op op=MPI_SUM)
Generic MPI all-reduce.
void broadcast(T &&x, communicator c={}, int root=0)
Generic MPI broadcast.
void mpi_reduce_in_place(std::array< T, N > &arr, communicator c={}, int root=0, bool all=false, MPI_Op op=MPI_SUM)
Implementation of an in-place MPI reduce for a std::array.
Definition array.hpp:67
static const bool has_env
Boolean variable that is true, if one of the environment variables OMPI_COMM_WORLD_RANK,...
constexpr bool has_mpi_type
Type trait to check if a type T has a corresponding MPI datatype, i.e. if mpi::mpi_type has been spec...
Definition datatypes.hpp:89
long chunk_length(long end, int nranges, int i, long min_size=1)
Get the length of the ith subrange after splitting the integer range [0, end) as evenly as possible a...
Definition chunk.hpp:50
void check_mpi_call(int errcode, const std::string &mpi_routine)
Check the success of an MPI call.
Definition utils.hpp:72
Macros used in the mpi library.
Provides general utilities related to MPI.