5#ifndef GKO_PUBLIC_CORE_BASE_MPI_HPP_
6#define GKO_PUBLIC_CORE_BASE_MPI_HPP_
13#include <ginkgo/config.hpp>
14#include <ginkgo/core/base/exception.hpp>
15#include <ginkgo/core/base/exception_helpers.hpp>
16#include <ginkgo/core/base/executor.hpp>
17#include <ginkgo/core/base/types.hpp>
18#include <ginkgo/core/base/utils_helper.hpp>
28namespace experimental {
43#if GINKGO_HAVE_GPU_AWARE_MPI
61#define GKO_REGISTER_MPI_TYPE(input_type, mpi_type) \
63 struct type_impl<input_type> { \
64 static MPI_Datatype get_type() { return mpi_type; } \
79GKO_REGISTER_MPI_TYPE(
char, MPI_CHAR);
80GKO_REGISTER_MPI_TYPE(
unsigned char, MPI_UNSIGNED_CHAR);
81GKO_REGISTER_MPI_TYPE(
unsigned, MPI_UNSIGNED);
82GKO_REGISTER_MPI_TYPE(
int, MPI_INT);
83GKO_REGISTER_MPI_TYPE(
unsigned short, MPI_UNSIGNED_SHORT);
84GKO_REGISTER_MPI_TYPE(
unsigned long, MPI_UNSIGNED_LONG);
85GKO_REGISTER_MPI_TYPE(
long, MPI_LONG);
86GKO_REGISTER_MPI_TYPE(
long long, MPI_LONG_LONG_INT);
87GKO_REGISTER_MPI_TYPE(
unsigned long long, MPI_UNSIGNED_LONG_LONG);
88GKO_REGISTER_MPI_TYPE(
float, MPI_FLOAT);
89GKO_REGISTER_MPI_TYPE(
double, MPI_DOUBLE);
90GKO_REGISTER_MPI_TYPE(
long double, MPI_LONG_DOUBLE);
91GKO_REGISTER_MPI_TYPE(std::complex<float>, MPI_C_FLOAT_COMPLEX);
92GKO_REGISTER_MPI_TYPE(std::complex<double>, MPI_C_DOUBLE_COMPLEX);
111 GKO_ASSERT_NO_MPI_ERRORS(MPI_Type_contiguous(count, old_type, &type_));
112 GKO_ASSERT_NO_MPI_ERRORS(MPI_Type_commit(&type_));
137 *
this = std::move(other);
149 if (
this != &other) {
150 this->type_ = std::exchange(other.type_, MPI_DATATYPE_NULL);
160 if (type_ != MPI_DATATYPE_NULL) {
161 MPI_Type_free(&type_);
170 MPI_Datatype
get()
const {
return type_; }
182 serialized = MPI_THREAD_SERIALIZED,
183 funneled = MPI_THREAD_FUNNELED,
184 single = MPI_THREAD_SINGLE,
185 multiple = MPI_THREAD_MULTIPLE
200 static bool is_finalized()
203 GKO_ASSERT_NO_MPI_ERRORS(MPI_Finalized(&flag));
207 static bool is_initialized()
210 GKO_ASSERT_NO_MPI_ERRORS(MPI_Initialized(&flag));
230 const thread_type thread_t = thread_type::serialized)
232 this->required_thread_support_ =
static_cast<int>(thread_t);
233 GKO_ASSERT_NO_MPI_ERRORS(
234 MPI_Init_thread(&argc, &argv, this->required_thread_support_,
235 &(this->provided_thread_support_)));
249 int required_thread_support_;
250 int provided_thread_support_;
263 using pointer = MPI_Comm*;
264 void operator()(pointer comm)
const
266 GKO_ASSERT(*comm != MPI_COMM_NULL);
267 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_free(comm));
290 MPI_Status*
get() {
return &this->status_; }
302 template <
typename T>
336 this->req_ = std::exchange(o.req_, MPI_REQUEST_NULL);
343 if (req_ != MPI_REQUEST_NULL) {
344 if (MPI_Request_free(&req_) != MPI_SUCCESS) {
356 MPI_Request*
get() {
return &this->req_; }
367 GKO_ASSERT_NO_MPI_ERRORS(MPI_Wait(&req_,
status.
get()));
384inline std::vector<status>
wait_all(std::vector<request>& req)
386 std::vector<status> stat;
387 for (std::size_t i = 0; i < req.size(); ++i) {
388 stat.emplace_back(req[i].wait());
421 : comm_(), force_host_buffer_(force_host_buffer)
423 this->comm_.reset(
new MPI_Comm(comm));
437 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_split(comm, color, key, &comm_out));
438 this->comm_.reset(
new MPI_Comm(comm_out), comm_deleter{});
452 GKO_ASSERT_NO_MPI_ERRORS(
453 MPI_Comm_split(comm.
get(), color, key, &comm_out));
454 this->comm_.reset(
new MPI_Comm(comm_out), comm_deleter{});
462 const MPI_Comm&
get()
const {
return *(this->comm_.get()); }
464 bool force_host_buffer()
const {
return force_host_buffer_; }
471 int size()
const {
return get_num_ranks(); }
478 int rank()
const {
return get_my_rank(); };
494 return compare(rhs.get());
510 GKO_ASSERT_NO_MPI_ERRORS(MPI_Barrier(this->
get()));
526 template <
typename SendType>
527 void send(std::shared_ptr<const Executor> exec,
const SendType* send_buffer,
528 const int send_count,
const int destination_rank,
529 const int send_tag)
const
531 auto guard = exec->get_scoped_device_id_guard();
532 GKO_ASSERT_NO_MPI_ERRORS(
534 destination_rank, send_tag, this->
get()));
553 template <
typename SendType>
555 const SendType* send_buffer,
const int send_count,
556 const int destination_rank,
const int send_tag)
const
558 auto guard = exec->get_scoped_device_id_guard();
560 GKO_ASSERT_NO_MPI_ERRORS(
562 destination_rank, send_tag, this->
get(), req.
get()));
581 template <
typename RecvType>
582 status recv(std::shared_ptr<const Executor> exec, RecvType* recv_buffer,
583 const int recv_count,
const int source_rank,
584 const int recv_tag)
const
586 auto guard = exec->get_scoped_device_id_guard();
588 GKO_ASSERT_NO_MPI_ERRORS(
590 source_rank, recv_tag, this->
get(), st.
get()));
609 template <
typename RecvType>
611 const int recv_count,
const int source_rank,
612 const int recv_tag)
const
614 auto guard = exec->get_scoped_device_id_guard();
616 GKO_ASSERT_NO_MPI_ERRORS(
618 source_rank, recv_tag, this->
get(), req.
get()));
634 template <
typename BroadcastType>
635 void broadcast(std::shared_ptr<const Executor> exec, BroadcastType* buffer,
636 int count,
int root_rank)
const
638 auto guard = exec->get_scoped_device_id_guard();
639 GKO_ASSERT_NO_MPI_ERRORS(MPI_Bcast(buffer, count,
641 root_rank, this->
get()));
659 template <
typename BroadcastType>
661 BroadcastType* buffer,
int count,
int root_rank)
const
663 auto guard = exec->get_scoped_device_id_guard();
665 GKO_ASSERT_NO_MPI_ERRORS(
667 root_rank, this->
get(), req.
get()));
685 template <
typename ReduceType>
686 void reduce(std::shared_ptr<const Executor> exec,
687 const ReduceType* send_buffer, ReduceType* recv_buffer,
688 int count, MPI_Op operation,
int root_rank)
const
690 auto guard = exec->get_scoped_device_id_guard();
691 GKO_ASSERT_NO_MPI_ERRORS(MPI_Reduce(send_buffer, recv_buffer, count,
693 operation, root_rank, this->
get()));
712 template <
typename ReduceType>
714 const ReduceType* send_buffer, ReduceType* recv_buffer,
715 int count, MPI_Op operation,
int root_rank)
const
717 auto guard = exec->get_scoped_device_id_guard();
719 GKO_ASSERT_NO_MPI_ERRORS(MPI_Ireduce(
721 operation, root_rank, this->
get(), req.
get()));
738 template <
typename ReduceType>
740 ReduceType* recv_buffer,
int count, MPI_Op operation)
const
742 auto guard = exec->get_scoped_device_id_guard();
743 GKO_ASSERT_NO_MPI_ERRORS(MPI_Allreduce(
745 operation, this->
get()));
763 template <
typename ReduceType>
765 ReduceType* recv_buffer,
int count,
766 MPI_Op operation)
const
768 auto guard = exec->get_scoped_device_id_guard();
770 GKO_ASSERT_NO_MPI_ERRORS(MPI_Iallreduce(
772 operation, this->
get(), req.
get()));
790 template <
typename ReduceType>
792 const ReduceType* send_buffer, ReduceType* recv_buffer,
793 int count, MPI_Op operation)
const
795 auto guard = exec->get_scoped_device_id_guard();
796 GKO_ASSERT_NO_MPI_ERRORS(MPI_Allreduce(
798 operation, this->
get()));
817 template <
typename ReduceType>
819 const ReduceType* send_buffer, ReduceType* recv_buffer,
820 int count, MPI_Op operation)
const
822 auto guard = exec->get_scoped_device_id_guard();
824 GKO_ASSERT_NO_MPI_ERRORS(MPI_Iallreduce(
826 operation, this->
get(), req.
get()));
846 template <
typename SendType,
typename RecvType>
847 void gather(std::shared_ptr<const Executor> exec,
848 const SendType* send_buffer,
const int send_count,
849 RecvType* recv_buffer,
const int recv_count,
852 auto guard = exec->get_scoped_device_id_guard();
853 GKO_ASSERT_NO_MPI_ERRORS(
856 root_rank, this->
get()));
878 template <
typename SendType,
typename RecvType>
880 const SendType* send_buffer,
const int send_count,
881 RecvType* recv_buffer,
const int recv_count,
884 auto guard = exec->get_scoped_device_id_guard();
886 GKO_ASSERT_NO_MPI_ERRORS(MPI_Igather(
911 template <
typename SendType,
typename RecvType>
912 void gather_v(std::shared_ptr<const Executor> exec,
913 const SendType* send_buffer,
const int send_count,
914 RecvType* recv_buffer,
const int* recv_counts,
915 const int* displacements,
int root_rank)
const
917 auto guard = exec->get_scoped_device_id_guard();
918 GKO_ASSERT_NO_MPI_ERRORS(MPI_Gatherv(
920 recv_buffer, recv_counts, displacements,
944 template <
typename SendType,
typename RecvType>
946 const SendType* send_buffer,
const int send_count,
947 RecvType* recv_buffer,
const int* recv_counts,
948 const int* displacements,
int root_rank)
const
950 auto guard = exec->get_scoped_device_id_guard();
952 GKO_ASSERT_NO_MPI_ERRORS(MPI_Igatherv(
954 recv_buffer, recv_counts, displacements,
975 template <
typename SendType,
typename RecvType>
977 const SendType* send_buffer,
const int send_count,
978 RecvType* recv_buffer,
const int recv_count)
const
980 auto guard = exec->get_scoped_device_id_guard();
981 GKO_ASSERT_NO_MPI_ERRORS(MPI_Allgather(
1005 template <
typename SendType,
typename RecvType>
1007 const SendType* send_buffer,
const int send_count,
1008 RecvType* recv_buffer,
const int recv_count)
const
1010 auto guard = exec->get_scoped_device_id_guard();
1012 GKO_ASSERT_NO_MPI_ERRORS(MPI_Iallgather(
1015 this->
get(), req.
get()));
1034 template <
typename SendType,
typename RecvType>
1035 void scatter(std::shared_ptr<const Executor> exec,
1036 const SendType* send_buffer,
const int send_count,
1037 RecvType* recv_buffer,
const int recv_count,
1038 int root_rank)
const
1040 auto guard = exec->get_scoped_device_id_guard();
1041 GKO_ASSERT_NO_MPI_ERRORS(MPI_Scatter(
1065 template <
typename SendType,
typename RecvType>
1067 const SendType* send_buffer,
const int send_count,
1068 RecvType* recv_buffer,
const int recv_count,
1069 int root_rank)
const
1071 auto guard = exec->get_scoped_device_id_guard();
1073 GKO_ASSERT_NO_MPI_ERRORS(MPI_Iscatter(
1076 this->
get(), req.
get()));
1098 template <
typename SendType,
typename RecvType>
1100 const SendType* send_buffer,
const int* send_counts,
1101 const int* displacements, RecvType* recv_buffer,
1102 const int recv_count,
int root_rank)
const
1104 auto guard = exec->get_scoped_device_id_guard();
1105 GKO_ASSERT_NO_MPI_ERRORS(MPI_Scatterv(
1106 send_buffer, send_counts, displacements,
1131 template <
typename SendType,
typename RecvType>
1133 const SendType* send_buffer,
const int* send_counts,
1134 const int* displacements, RecvType* recv_buffer,
1135 const int recv_count,
int root_rank)
const
1137 auto guard = exec->get_scoped_device_id_guard();
1139 GKO_ASSERT_NO_MPI_ERRORS(
1140 MPI_Iscatterv(send_buffer, send_counts, displacements,
1143 root_rank, this->
get(), req.
get()));
1163 template <
typename RecvType>
1164 void all_to_all(std::shared_ptr<const Executor> exec, RecvType* recv_buffer,
1165 const int recv_count)
const
1167 auto guard = exec->get_scoped_device_id_guard();
1168 GKO_ASSERT_NO_MPI_ERRORS(MPI_Alltoall(
1192 template <
typename RecvType>
1194 RecvType* recv_buffer,
const int recv_count)
const
1196 auto guard = exec->get_scoped_device_id_guard();
1198 GKO_ASSERT_NO_MPI_ERRORS(MPI_Ialltoall(
1201 this->
get(), req.
get()));
1221 template <
typename SendType,
typename RecvType>
1223 const SendType* send_buffer,
const int send_count,
1224 RecvType* recv_buffer,
const int recv_count)
const
1226 auto guard = exec->get_scoped_device_id_guard();
1227 GKO_ASSERT_NO_MPI_ERRORS(MPI_Alltoall(
1251 template <
typename SendType,
typename RecvType>
1253 const SendType* send_buffer,
const int send_count,
1254 RecvType* recv_buffer,
const int recv_count)
const
1256 auto guard = exec->get_scoped_device_id_guard();
1258 GKO_ASSERT_NO_MPI_ERRORS(MPI_Ialltoall(
1261 this->
get(), req.
get()));
1284 template <
typename SendType,
typename RecvType>
1286 const SendType* send_buffer,
const int* send_counts,
1287 const int* send_offsets, RecvType* recv_buffer,
1288 const int* recv_counts,
const int* recv_offsets)
const
1290 this->
all_to_all_v(std::move(exec), send_buffer, send_counts,
1292 recv_buffer, recv_counts, recv_offsets,
1312 const void* send_buffer,
const int* send_counts,
1313 const int* send_offsets, MPI_Datatype send_type,
1314 void* recv_buffer,
const int* recv_counts,
1315 const int* recv_offsets, MPI_Datatype recv_type)
const
1317 auto guard = exec->get_scoped_device_id_guard();
1318 GKO_ASSERT_NO_MPI_ERRORS(MPI_Alltoallv(
1319 send_buffer, send_counts, send_offsets, send_type, recv_buffer,
1320 recv_counts, recv_offsets, recv_type, this->
get()));
1343 const void* send_buffer,
const int* send_counts,
1344 const int* send_offsets, MPI_Datatype send_type,
1345 void* recv_buffer,
const int* recv_counts,
1346 const int* recv_offsets,
1347 MPI_Datatype recv_type)
const
1349 auto guard = exec->get_scoped_device_id_guard();
1351 GKO_ASSERT_NO_MPI_ERRORS(MPI_Ialltoallv(
1352 send_buffer, send_counts, send_offsets, send_type, recv_buffer,
1353 recv_counts, recv_offsets, recv_type, this->
get(), req.
get()));
1377 template <
typename SendType,
typename RecvType>
1379 const SendType* send_buffer,
const int* send_counts,
1380 const int* send_offsets, RecvType* recv_buffer,
1381 const int* recv_counts,
1382 const int* recv_offsets)
const
1385 std::move(exec), send_buffer, send_counts, send_offsets,
1404 template <
typename ScanType>
1405 void scan(std::shared_ptr<const Executor> exec,
const ScanType* send_buffer,
1406 ScanType* recv_buffer,
int count, MPI_Op operation)
const
1408 auto guard = exec->get_scoped_device_id_guard();
1409 GKO_ASSERT_NO_MPI_ERRORS(MPI_Scan(send_buffer, recv_buffer, count,
1411 operation, this->
get()));
1430 template <
typename ScanType>
1432 const ScanType* send_buffer, ScanType* recv_buffer,
1433 int count, MPI_Op operation)
const
1435 auto guard = exec->get_scoped_device_id_guard();
1437 GKO_ASSERT_NO_MPI_ERRORS(MPI_Iscan(send_buffer, recv_buffer, count,
1439 operation, this->
get(), req.
get()));
1444 std::shared_ptr<MPI_Comm> comm_;
1445 bool force_host_buffer_;
1447 int get_my_rank()
const
1450 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_rank(
get(), &my_rank));
1454 int get_node_local_rank()
const
1456 MPI_Comm local_comm;
1458 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_split_type(
1459 this->
get(), MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &local_comm));
1460 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_rank(local_comm, &
rank));
1461 MPI_Comm_free(&local_comm);
1465 int get_num_ranks()
const
1468 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_size(this->
get(), &size));
1472 bool compare(
const MPI_Comm& other)
const
1475 GKO_ASSERT_NO_MPI_ERRORS(MPI_Comm_compare(
get(), other, &flag));
1476 return flag == MPI_IDENT;
1505template <
typename ValueType>
1511 enum class create_type { allocate = 1, create = 2, dynamic_create = 3 };
1544 window_ = std::exchange(other.window_, MPI_WIN_NULL);
1559 window(std::shared_ptr<const Executor> exec, ValueType* base,
int num_elems,
1560 const communicator& comm,
const int disp_unit =
sizeof(ValueType),
1561 MPI_Info input_info = MPI_INFO_NULL,
1564 auto guard = exec->get_scoped_device_id_guard();
1565 unsigned size = num_elems *
sizeof(ValueType);
1566 if (c_type == create_type::create) {
1567 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_create(
1568 base, size, disp_unit, input_info, comm.
get(), &this->window_));
1569 }
else if (c_type == create_type::dynamic_create) {
1570 GKO_ASSERT_NO_MPI_ERRORS(
1571 MPI_Win_create_dynamic(input_info, comm.
get(), &this->window_));
1572 }
else if (c_type == create_type::allocate) {
1573 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_allocate(
1574 size, disp_unit, input_info, comm.
get(), base, &this->window_));
1576 GKO_NOT_IMPLEMENTED;
1595 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_fence(assert, this->window_));
1607 int assert = 0)
const
1609 if (lock_t == lock_type::shared) {
1610 GKO_ASSERT_NO_MPI_ERRORS(
1611 MPI_Win_lock(MPI_LOCK_SHARED, rank, assert, this->window_));
1612 }
else if (lock_t == lock_type::exclusive) {
1613 GKO_ASSERT_NO_MPI_ERRORS(
1614 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, assert, this->window_));
1616 GKO_NOT_IMPLEMENTED;
1628 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_unlock(rank, this->window_));
1639 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_lock_all(assert, this->window_));
1648 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_unlock_all(this->window_));
1659 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_flush(rank, this->window_));
1670 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_flush_local(rank, this->window_));
1679 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_flush_all(this->window_));
1688 GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_flush_local_all(this->window_));
1694 void sync()
const { GKO_ASSERT_NO_MPI_ERRORS(MPI_Win_sync(this->window_)); }
1701 if (this->window_ && this->window_ != MPI_WIN_NULL) {
1702 MPI_Win_free(&this->window_);
1716 template <
typename PutType>
1717 void put(std::shared_ptr<const Executor> exec,
const PutType* origin_buffer,
1718 const int origin_count,
const int target_rank,
1719 const unsigned int target_disp,
const int target_count)
const
1721 auto guard = exec->get_scoped_device_id_guard();
1722 GKO_ASSERT_NO_MPI_ERRORS(
1724 target_rank, target_disp, target_count,
1740 template <
typename PutType>
1742 const PutType* origin_buffer,
const int origin_count,
1743 const int target_rank,
const unsigned int target_disp,
1744 const int target_count)
const
1746 auto guard = exec->get_scoped_device_id_guard();
1748 GKO_ASSERT_NO_MPI_ERRORS(MPI_Rput(
1750 target_rank, target_disp, target_count,
1766 template <
typename PutType>
1768 const PutType* origin_buffer,
const int origin_count,
1769 const int target_rank,
const unsigned int target_disp,
1770 const int target_count, MPI_Op operation)
const
1772 auto guard = exec->get_scoped_device_id_guard();
1773 GKO_ASSERT_NO_MPI_ERRORS(MPI_Accumulate(
1775 target_rank, target_disp, target_count,
1792 template <
typename PutType>
1794 const PutType* origin_buffer,
const int origin_count,
1795 const int target_rank,
const unsigned int target_disp,
1796 const int target_count, MPI_Op operation)
const
1798 auto guard = exec->get_scoped_device_id_guard();
1800 GKO_ASSERT_NO_MPI_ERRORS(MPI_Raccumulate(
1802 target_rank, target_disp, target_count,
1818 template <
typename GetType>
1819 void get(std::shared_ptr<const Executor> exec, GetType* origin_buffer,
1820 const int origin_count,
const int target_rank,
1821 const unsigned int target_disp,
const int target_count)
const
1823 auto guard = exec->get_scoped_device_id_guard();
1824 GKO_ASSERT_NO_MPI_ERRORS(
1826 target_rank, target_disp, target_count,
1842 template <
typename GetType>
1843 request r_get(std::shared_ptr<const Executor> exec, GetType* origin_buffer,
1844 const int origin_count,
const int target_rank,
1845 const unsigned int target_disp,
const int target_count)
const
1847 auto guard = exec->get_scoped_device_id_guard();
1849 GKO_ASSERT_NO_MPI_ERRORS(MPI_Rget(
1851 target_rank, target_disp, target_count,
1869 template <
typename GetType>
1871 GetType* origin_buffer,
const int origin_count,
1872 GetType* result_buffer,
const int result_count,
1873 const int target_rank,
const unsigned int target_disp,
1874 const int target_count, MPI_Op operation)
const
1876 auto guard = exec->get_scoped_device_id_guard();
1877 GKO_ASSERT_NO_MPI_ERRORS(MPI_Get_accumulate(
1880 target_rank, target_disp, target_count,
1899 template <
typename GetType>
1901 GetType* origin_buffer,
const int origin_count,
1902 GetType* result_buffer,
const int result_count,
1903 const int target_rank,
1904 const unsigned int target_disp,
1905 const int target_count, MPI_Op operation)
const
1907 auto guard = exec->get_scoped_device_id_guard();
1909 GKO_ASSERT_NO_MPI_ERRORS(MPI_Rget_accumulate(
1912 target_rank, target_disp, target_count,
1928 template <
typename GetType>
1930 GetType* origin_buffer, GetType* result_buffer,
1931 const int target_rank,
const unsigned int target_disp,
1932 MPI_Op operation)
const
1934 auto guard = exec->get_scoped_device_id_guard();
1935 GKO_ASSERT_NO_MPI_ERRORS(MPI_Fetch_and_op(
1937 target_rank, target_disp, operation, this->
get_window()));
A thin wrapper of MPI_Comm that supports most MPI calls.
Definition mpi.hpp:408
status recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const
Receive data from source rank.
Definition mpi.hpp:582
void scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const
Scatter data from root rank to all ranks in the communicator with offsets.
Definition mpi.hpp:1099
request i_broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const
(Non-blocking) Broadcast data from calling process to all ranks in the communicator
Definition mpi.hpp:660
void gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
Gather data onto the root rank from all ranks in the communicator.
Definition mpi.hpp:847
request i_recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const
Receive (Non-blocking, Immediate return) data from source rank.
Definition mpi.hpp:610
request i_scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Scatter data from root rank to all ranks in the communicator with offsets.
Definition mpi.hpp:1132
void all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
Communicate data from all ranks to all other ranks (MPI_Alltoall).
Definition mpi.hpp:1222
request i_all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
(Non-blocking) Communicate data from all ranks to all other ranks (MPI_Ialltoall).
Definition mpi.hpp:1252
request i_all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
Definition mpi.hpp:1342
bool operator!=(const communicator &rhs) const
Compare two communicator objects for non-equality.
Definition mpi.hpp:502
void scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
Scatter data from root rank to all ranks in the communicator.
Definition mpi.hpp:1035
void synchronize() const
This function is used to synchronize the ranks in the communicator.
Definition mpi.hpp:508
int rank() const
Return the rank of the calling process in the communicator.
Definition mpi.hpp:478
request i_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const
(Non-blocking) Reduce data into root from all calling processes on the same communicator.
Definition mpi.hpp:713
int size() const
Return the size of the communicator (number of ranks).
Definition mpi.hpp:471
void send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const
Send (Blocking) data from calling process to destination rank.
Definition mpi.hpp:527
request i_all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
Definition mpi.hpp:1378
request i_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Gather data onto the root rank from all ranks in the communicator.
Definition mpi.hpp:879
void all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const
(In-place) Communicate data from all ranks to all other ranks in place (MPI_Alltoall).
Definition mpi.hpp:1164
void all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
Definition mpi.hpp:1285
request i_all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const
(In-place, non-blocking) Reduce data from all calling processes from all calling processes on same co...
Definition mpi.hpp:764
request i_all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const
(In-place, Non-blocking) Communicate data from all ranks to all other ranks in place (MPI_Ialltoall).
Definition mpi.hpp:1193
void all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
Definition mpi.hpp:1311
int node_local_rank() const
Return the node local rank of the calling process in the communicator.
Definition mpi.hpp:485
void broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const
Broadcast data from calling process to all ranks in the communicator.
Definition mpi.hpp:635
const MPI_Comm & get() const
Return the underlying MPI_Comm object.
Definition mpi.hpp:462
communicator(const MPI_Comm &comm, int color, int key)
Create a communicator object from an existing MPI_Comm object using color and key.
Definition mpi.hpp:434
void all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const
(In-place) Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:739
void all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
Gather data onto all ranks from all ranks in the communicator.
Definition mpi.hpp:976
request i_all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
(Non-blocking) Gather data onto all ranks from all ranks in the communicator.
Definition mpi.hpp:1006
bool operator==(const communicator &rhs) const
Compare two communicator objects for equality.
Definition mpi.hpp:492
void all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const
Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:791
request i_gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const
(Non-blocking) Gather data onto the root rank from all ranks in the communicator with offsets.
Definition mpi.hpp:945
request i_all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const
Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:818
communicator(const MPI_Comm &comm, bool force_host_buffer=false)
Non-owning constructor for an existing communicator of type MPI_Comm.
Definition mpi.hpp:420
request i_scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const
Does a scan operation with the given operator.
Definition mpi.hpp:1431
void reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const
Reduce data into root from all calling processes on the same communicator.
Definition mpi.hpp:686
request i_scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Scatter data from root rank to all ranks in the communicator.
Definition mpi.hpp:1066
void scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const
Does a scan operation with the given operator.
Definition mpi.hpp:1405
void gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const
Gather data onto the root rank from all ranks in the communicator with offsets.
Definition mpi.hpp:912
request i_send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const
Send (Non-blocking, Immediate return) data from calling process to destination rank.
Definition mpi.hpp:554
communicator(const communicator &comm, int color, int key)
Create a communicator object from an existing MPI_Comm object using color and key.
Definition mpi.hpp:449
MPI_Datatype get() const
Access the underlying MPI_Datatype.
Definition mpi.hpp:170
contiguous_type(int count, MPI_Datatype old_type)
Constructs a wrapper for a contiguous MPI_Datatype.
Definition mpi.hpp:109
contiguous_type()
Constructs empty wrapper with MPI_DATATYPE_NULL.
Definition mpi.hpp:118
contiguous_type(const contiguous_type &)=delete
Disallow copying of wrapper type.
contiguous_type(contiguous_type &&other) noexcept
Move constructor, leaves other with MPI_DATATYPE_NULL.
Definition mpi.hpp:135
contiguous_type & operator=(contiguous_type &&other) noexcept
Move assignment, leaves other with MPI_DATATYPE_NULL.
Definition mpi.hpp:147
contiguous_type & operator=(const contiguous_type &)=delete
Disallow copying of wrapper type.
~contiguous_type()
Destructs object by freeing wrapped MPI_Datatype.
Definition mpi.hpp:158
Class that sets up and finalizes the MPI environment.
Definition mpi.hpp:198
~environment()
Call MPI_Finalize at the end of the scope of this class.
Definition mpi.hpp:241
int get_provided_thread_support() const
Return the provided thread support.
Definition mpi.hpp:219
environment(int &argc, char **&argv, const thread_type thread_t=thread_type::serialized)
Call MPI_Init_thread and initialize the MPI environment.
Definition mpi.hpp:229
The request class is a light, move-only wrapper around the MPI_Request handle.
Definition mpi.hpp:319
request()
The default constructor.
Definition mpi.hpp:325
MPI_Request * get()
Get a pointer to the underlying MPI_Request handle.
Definition mpi.hpp:356
status wait()
Allows a rank to wait on a particular request handle.
Definition mpi.hpp:364
This class wraps the MPI_Window class with RAII functionality.
Definition mpi.hpp:1506
void get(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Get data from the target window.
Definition mpi.hpp:1819
request r_put(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Put data into the target window.
Definition mpi.hpp:1741
window()
The default constructor.
Definition mpi.hpp:1521
void get_accumulate(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, GetType *result_buffer, const int result_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
Get Accumulate data from the target window.
Definition mpi.hpp:1870
void put(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Put data into the target window.
Definition mpi.hpp:1717
~window()
The deleter which calls MPI_Win_free when the window leaves its scope.
Definition mpi.hpp:1699
lock_type
The lock type for passive target synchronization of the windows.
Definition mpi.hpp:1516
window & operator=(window &&other)
The move assignment operator.
Definition mpi.hpp:1542
request r_accumulate(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
(Non-blocking) Accumulate data into the target window.
Definition mpi.hpp:1793
request r_get_accumulate(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, GetType *result_buffer, const int result_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
(Non-blocking) Get Accumulate data (with handle) from the target window.
Definition mpi.hpp:1900
void fetch_and_op(std::shared_ptr< const Executor > exec, GetType *origin_buffer, GetType *result_buffer, const int target_rank, const unsigned int target_disp, MPI_Op operation) const
Fetch and operate on data from the target window (An optimized version of Get_accumulate).
Definition mpi.hpp:1929
void sync() const
Synchronize the public and private buffers for the window object.
Definition mpi.hpp:1694
void unlock(int rank) const
Close the epoch using MPI_Win_unlock for the window object.
Definition mpi.hpp:1626
void fence(int assert=0) const
The active target synchronization using MPI_Win_fence for the window object.
Definition mpi.hpp:1593
void flush(int rank) const
Flush the existing RDMA operations on the target rank for the calling process for the window object.
Definition mpi.hpp:1657
void unlock_all() const
Close the epoch on all ranks using MPI_Win_unlock_all for the window object.
Definition mpi.hpp:1646
create_type
The create type for the window object.
Definition mpi.hpp:1511
window(std::shared_ptr< const Executor > exec, ValueType *base, int num_elems, const communicator &comm, const int disp_unit=sizeof(ValueType), MPI_Info input_info=MPI_INFO_NULL, create_type c_type=create_type::create)
Create a window object with a given data pointer and type.
Definition mpi.hpp:1559
void accumulate(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
Accumulate data into the target window.
Definition mpi.hpp:1767
void lock_all(int assert=0) const
Create the epoch on all ranks using MPI_Win_lock_all for the window object.
Definition mpi.hpp:1637
void lock(int rank, lock_type lock_t=lock_type::shared, int assert=0) const
Create an epoch using MPI_Win_lock for the window object.
Definition mpi.hpp:1606
void flush_all_local() const
Flush all the local existing RDMA operations on the calling rank for the window object.
Definition mpi.hpp:1686
window(window &&other)
The move constructor.
Definition mpi.hpp:1533
void flush_local(int rank) const
Flush the existing RDMA operations on the calling rank from the target rank for the window object.
Definition mpi.hpp:1668
MPI_Win get_window() const
Get the underlying window object of MPI_Win type.
Definition mpi.hpp:1585
request r_get(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Get data (with handle) from the target window.
Definition mpi.hpp:1843
void flush_all() const
Flush all the existing RDMA operations for the calling process for the window object.
Definition mpi.hpp:1677
The mpi namespace, contains wrapper for many MPI functions.
Definition mpi.hpp:35
int map_rank_to_device_id(MPI_Comm comm, int num_devices)
Maps each MPI rank to a single device id in a round robin manner.
bool requires_host_buffer(const std::shared_ptr< const Executor > &exec, const communicator &comm)
Checks if the combination of Executor and communicator requires passing MPI buffers from the host mem...
double get_walltime()
Get the rank in the communicator of the calling process.
Definition mpi.hpp:1494
constexpr bool is_gpu_aware()
Return if GPU aware functionality is available.
Definition mpi.hpp:41
thread_type
This enum specifies the threading type to be used when creating an MPI environment.
Definition mpi.hpp:181
std::vector< status > wait_all(std::vector< request > &req)
Allows a rank to wait on multiple request handles.
Definition mpi.hpp:384
The Ginkgo namespace.
Definition abstract_factory.hpp:20
The status struct is a light wrapper around the MPI_Status struct.
Definition mpi.hpp:279
int get_count(const T *data) const
Get the count of the number of elements received by the communication call.
Definition mpi.hpp:303
status()
The default constructor.
Definition mpi.hpp:283
MPI_Status * get()
Get a pointer to the underlying MPI_Status object.
Definition mpi.hpp:290
A struct that is used to determine the MPI_Datatype of a specified type.
Definition mpi.hpp:76