34 #ifndef comm_helpers_hpp 35 #define comm_helpers_hpp 41 #include <schwarz/config.hpp> 43 #include <communicate.hpp> 45 #include <scatter.hpp> 46 #include <settings.hpp> 55 namespace CommHelpers {
58 template <
typename ValueType,
typename IndexType,
typename MixedValueType>
59 void transfer_one_by_one(
63 ValueType *buffer, IndexType **offset,
int num_neighbors,
66 auto mpi_vtype = schwz::mpi::get_mpi_datatype(buffer[0]);
67 for (
auto p = 0; p < num_neighbors; p++) {
68 if ((offset[p])[0] > 0) {
70 for (
auto i = 0; i < (offset[p])[0]; i++) {
71 MPI_Put(&buffer[(offset[p])[i + 1]], 1, mpi_vtype,
72 neighbors[p], (offset[p])[i + 1], 1, mpi_vtype,
75 }
else if (settings.comm_settings.
enable_get) {
76 for (
auto i = 0; i < (offset[p])[0]; i++) {
77 MPI_Get(&buffer[(offset[p])[i + 1]], 1, mpi_vtype,
78 neighbors[p], (offset[p])[i + 1], 1, mpi_vtype,
83 MPI_Win_flush(neighbors[p], comm_struct.
window_x);
85 MPI_Win_flush_local(neighbors[p], comm_struct.
window_x);
92 template <
typename ValueType,
typename IndexType>
93 void pack_buffer(
const Settings &settings,
const ValueType *buffer,
94 ValueType *send_buffer, IndexType **num_send_elems,
95 IndexType **host_num_send_elems,
int offset,
int send_subd)
97 using vec_vtype = gko::matrix::Dense<ValueType>;
98 using arr = gko::Array<IndexType>;
99 using varr = gko::Array<ValueType>;
102 auto tmp_send_buf = vec_vtype::create(
104 gko::dim<2>((host_num_send_elems[send_subd])[0], 1));
106 (host_num_send_elems[send_subd])[0],
107 (num_send_elems[send_subd]) + 1, buffer, tmp_send_buf->get_values(),
109 settings.
executor->copy((host_num_send_elems[send_subd])[0],
110 tmp_send_buf->get_values(),
111 &(send_buffer[offset]));
113 settings.
executor->get_master()->run(
115 (host_num_send_elems[send_subd]) + 1,
116 buffer, &(send_buffer[offset]), copy));
121 template <
typename ValueType,
typename IndexType>
122 void transfer_buffer(
const Settings &settings, MPI_Win &window,
123 ValueType *target_buffer, IndexType **host_num_elems,
124 int offset,
int target_subd, IndexType *neighbors,
125 IndexType *displacements)
127 auto mpi_vtype = schwz::mpi::get_mpi_datatype(target_buffer[0]);
129 MPI_Win_lock(MPI_LOCK_SHARED, neighbors[target_subd], 0, window);
132 MPI_Put(&target_buffer[offset], (host_num_elems[target_subd])[0],
133 mpi_vtype, neighbors[target_subd],
134 displacements[neighbors[target_subd]],
135 (host_num_elems[target_subd])[0], mpi_vtype, window);
136 }
else if (settings.comm_settings.
enable_get) {
137 MPI_Get(&target_buffer[offset], (host_num_elems[target_subd])[0],
138 mpi_vtype, neighbors[target_subd],
139 displacements[neighbors[target_subd]],
140 (host_num_elems[target_subd])[0], mpi_vtype, window);
143 MPI_Win_flush(neighbors[target_subd], window);
145 MPI_Win_flush_local(neighbors[target_subd], window);
148 MPI_Win_unlock(neighbors[target_subd], window);
153 template <
typename ValueType,
typename IndexType>
154 void unpack_buffer(
const Settings &settings, ValueType *buffer,
155 const ValueType *recv_buffer, IndexType **num_recv_elems,
156 IndexType **host_num_recv_elems,
int offset,
int recv_subd)
158 using vec_vtype = gko::matrix::Dense<ValueType>;
159 using arr = gko::Array<IndexType>;
160 using varr = gko::Array<ValueType>;
161 auto num_elems = (host_num_recv_elems[recv_subd])[0];
164 auto tmp_recv_buf = vec_vtype::create(
166 gko::dim<2>((host_num_recv_elems[recv_subd])[0], 1));
167 settings.
executor->copy(num_elems, &(recv_buffer[offset]),
168 tmp_recv_buf->get_values());
170 num_elems, (num_recv_elems[recv_subd]) + 1,
171 tmp_recv_buf->get_values(), buffer, copy));
174 num_elems, (num_recv_elems[recv_subd]) + 1, &(recv_buffer[offset]),
184 #endif // comm_helpers.hpp bool enable_lock_local
Use local locks.
Definition: settings.hpp:261
bool enable_flush_local
Use local flush.
Definition: settings.hpp:251
The communication struct used to store the communication data.
Definition: communicate.hpp:67
bool enable_put
Put the data to the window using MPI_Put rather than get.
Definition: settings.hpp:231
bool stage_through_host
Stage the MPI transfers through the host.
Definition: settings.hpp:241
Definition: scatter.hpp:71
Definition: gather.hpp:71
The struct that contains the solver settings and the parameters to be set by the user.
Definition: settings.hpp:77
MPI_Win window_x
The RDMA window for the solution vector.
Definition: communicate.hpp:223
std::string executor_string
The string that contains the ginkgo executor paradigm.
Definition: settings.hpp:81
The Schwarz wrappers namespace.
Definition: comm_helpers.hpp:49
std::shared_ptr< gko::Executor > executor
The ginkgo executor the code is to be executed on.
Definition: settings.hpp:86
bool enable_get
Get the data to the window using MPI_Get rather than put.
Definition: settings.hpp:236
bool enable_flush_all
Use flush all.
Definition: settings.hpp:256