如何在MPI中使用MPI_Send发送嵌套结构
How to send nested structures in MPI using MPI_Send
我需要使用MPI_SEND:
发送plista数组Traza* plista;
struct Evento{
char* evento;
unsigned char cant;
};
struct Traza
{
char* nombre;
Evento* eventos;
unsigned int cantEventos;
bool revisado;
unsigned int idTraza;
};
我在网上读到一些关于人们使用mpi_pack的文章,但这是一个复杂的结构。
要发送的数据结构比较复杂,因为它包含指向子结构的指针。传递指针显然没有意义,相反,您必须以深度复制的方式求助于子结构。此外,一些子结构包含动态长度的数组,只有发送方事先知道,因此接收方无法分配正确的内存量。
您可能想要创建自己的复合MPI数据类型,通过使用MPI_BOTTOM
并对数据结构的所有子部分的绝对地址进行编码,确实可以这样做。每次需要进行通信时,都需要重新创建这个自定义数据类型,因为您可能会使用不同的数据,因此绝对地址会发生变化。然而,这只适用于发送端,因为在接收端,您仍然不知道为数据结构的每个部分分配多少内存。
解决这个问题的一种方法是进行多次通信,先发送大小,然后分别发送数据。然而,这样做的缺点是增加了通信的数量,并可能引入不必要的延迟。
输入您已经提到的MPI_PACKED
。MPI_PACKED
可以让你打包你想要发送的数据一块在一个缓冲时间,然后发送整个缓冲区在一个单一的通信。在接收端,可以单次通信接收,然后逐个拆包。
下面是使用MPI_PACKED
的数据结构的解决方案。
注意:我对拉丁语言几乎一无所知,所以我不得不猜测你的一些标识符是什么意思。也就是我解释了
-
cant
作为某种元素计数 -
Evento::evento
作为恰好适合char
的数字数组,数组的长度在Evento::cant
中。 -
Traza::nombre
为数字数组,Traza::eventos
为事件数组,Traza::cantEventos
给出两个数组的长度
纠正这些不正确的解释应该很容易(只需从正确的来源获得数组的大小,并将它们分开打包,就像我在消息中打包Traza
的数量一样)。
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <mpi.h>
struct Evento {
char* evento;
unsigned char cant;
};
struct Traza
{
char* nombre;
Evento* eventos;
unsigned int cantEventos;
bool revisado;
unsigned int idTraza;
};
void pack_plista(int incount, Traza* data, MPI_Comm comm,
std::vector<char> &buf)
{
int pos = 0;
buf.clear();
int size;
MPI_Pack_size(1, MPI_INT, comm, &size);
buf.resize(pos+size);
MPI_Pack(&incount, 1, MPI_INT, buf.data(), buf.size(), &pos, comm);
for(int t = 0; t < incount; ++t)
{
MPI_Pack_size(2, MPI_UNSIGNED, comm, &size);
buf.resize(pos+size);
MPI_Pack(&data[t].cantEventos, 1, MPI_UNSIGNED,
buf.data(), buf.size(), &pos, comm);
MPI_Pack(&data[t].idTraza, 1, MPI_UNSIGNED,
buf.data(), buf.size(), &pos, comm);
MPI_Pack_size(1, MPI_UNSIGNED_CHAR, comm, &size);
buf.resize(pos+size);
{ // MPI does not know about C++ bool
unsigned char revisado = data[t].revisado;
MPI_Pack(&revisado, 1, MPI_UNSIGNED_CHAR,
buf.data(), buf.size(), &pos, comm);
}
// This interprets Traza::nombre as a character code, which is probably incorrect
// However, that is unlikely to be a problem, unless you are in a
// heterogeneous ASCII/EBCDIC environment
MPI_Pack_size(data[t].cantEventos, MPI_CHAR, comm, &size);
buf.resize(pos+size);
MPI_Pack(data[t].nombre, data[t].cantEventos, MPI_CHAR,
buf.data(), buf.size(), &pos, comm);
for(unsigned int e = 0; e < data[t].cantEventos; ++e)
{
// send count (interpret as a number)
MPI_Pack_size(1, MPI_UNSIGNED_CHAR, comm, &size);
buf.resize(pos+size);
MPI_Pack(&data[t].eventos[e].cant, 1, MPI_UNSIGNED_CHAR,
buf.data(), buf.size(), &pos, comm);
// send events (interpret as character codes)
MPI_Pack_size(data[t].eventos[e].cant, MPI_CHAR, comm, &size);
buf.resize(pos+size);
MPI_Pack(data[t].eventos[e].evento, data[t].eventos[e].cant, MPI_CHAR,
buf.data(), buf.size(), &pos, comm);
}
}
buf.resize(pos);
}
void unpack_plista(int &outcount, Traza* &data, MPI_Comm comm,
// buf cannot be reference-to-const since MPI_Unpack takes
// pointer-to-nonconst
std::vector<char> &buf)
{
int pos = 0;
MPI_Unpack(buf.data(), buf.size(), &pos, &outcount, 1, MPI_INT, comm);
data = new Traza[outcount];
for(int t = 0; t < outcount; ++t)
{
MPI_Unpack(buf.data(), buf.size(), &pos,
&data[t].cantEventos, 1, MPI_UNSIGNED, comm);
MPI_Unpack(buf.data(), buf.size(), &pos,
&data[t].idTraza, 1, MPI_UNSIGNED, comm);
{ // MPI does not know about C++ bool
unsigned char revisado;
MPI_Unpack(buf.data(), buf.size(), &pos,
&revisado, 1, MPI_UNSIGNED_CHAR, comm);
data[t].revisado = revisado;
}
// This interprets Traza::nombre as a character code, which is probably incorrect
// However, that is unlikely to be a problem, unless you are in a
// heterogeneous ASCII/EBCDIC environment
data[t].nombre = new char[data[t].cantEventos];
MPI_Unpack(buf.data(), buf.size(), &pos,
data[t].nombre, data[t].cantEventos, MPI_CHAR, comm);
data[t].eventos = new Evento[data[t].cantEventos];
for(unsigned int e = 0; e < data[t].cantEventos; ++e)
{
// receive count (interpret as a number)
MPI_Unpack(buf.data(), buf.size(), &pos,
&data[t].eventos[e].cant, 1, MPI_UNSIGNED_CHAR, comm);
// receive events (interpret as character codes)
data[t].eventos[e].evento = new char[data[t].eventos[e].cant];
MPI_Unpack(buf.data(), buf.size(), &pos,
data[t].eventos[e].evento, data[t].eventos[e].cant, MPI_CHAR,
comm);
}
}
}
void send_plista(int incount, Traza* data, int dest, int tag, MPI_Comm comm)
{
std::vector<char> buf;
pack_plista(incount, data, comm, buf);
MPI_Send(buf.data(), buf.size(), MPI_PACKED, dest, tag, comm);
}
void recv_plista(int &outcount, Traza* &data,
int src, int tag, MPI_Comm comm)
{
MPI_Status status;
MPI_Probe(src, tag, comm, &status);
int size;
MPI_Get_count(&status, MPI_PACKED, &size);
std::vector<char> buf(size);
MPI_Recv(buf.data(), buf.size(), MPI_PACKED, src, tag, comm, &status);
unpack_plista(outcount, data, comm, buf);
}
void make_test_data(int &count, Traza *&data) {
count = 2;
data = new Traza[2] {
{
new char[3] { char(0), char(1), char(3) },
new Evento[3] {
{
new char[4] { 'a', 'b', 'c', 'd' },
(unsigned char)4,
},
{
new char[3] { 'e', 'f', 'g' },
(unsigned char)3,
},
{
new char[2] { 'h', 'i' },
(unsigned char)2,
},
},
3u,
true,
0u,
},
{
new char[1] { char(4) },
new Evento[1] {
{
new char[1] { 'j' },
(unsigned char)1,
},
},
1u,
false,
1u,
},
};
}
void print_data(std::ostream &out, int count, const Traza *data)
{
for(int t = 0; t < count; ++t)
{
std::cout << "{n"
<< " nombre = { ";
for(unsigned e = 0; e < data[t].cantEventos; ++e)
std::cout << int(data[t].nombre[e]) << ", ";
std::cout << "},n"
<< " eventos = {n";
for(unsigned e = 0; e < data[t].cantEventos; ++e)
{
std::cout << " {n"
<< " evento = { ";
for(int c = 0; c < data[t].eventos[e].cant; ++c)
std::cout << "'" << data[t].eventos[e].evento[c] << "', ";
std::cout << "},n"
<< " cant = " << int(data[t].eventos[e].cant) << ",n"
<< " },n";
}
std::cout << " },n"
<< " cantEventos = " << data[t].cantEventos << ",n"
<< " revisado = " << data[t].revisado << ",n"
<< " idTraza = " << data[t].idTraza << ",n"
<< "}," << std::endl;
}
}
int main(int argc, char **argv)
{
int rank;
Traza *plista = nullptr;
int count = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank == 1) {
make_test_data(count, plista);
send_plista(count, plista, 0, 0, MPI_COMM_WORLD);
}
else if(rank == 0) {
recv_plista(count, plista, 1, 0, MPI_COMM_WORLD);
std::cout << std::boolalpha;
print_data(std::cout, count, plista);
}
// don't bother freeing allocated memory, the process is ending anyway
MPI_Finalize();
}
示例运行:
-*- mode: compilation; default-directory: "/tmp/" -*-
Compilation started at Wed Sep 9 20:04:24
set -ex; mpic++ -std=c++11 -Wall -Wno-literal-suffix -o check check.cc; mpirun -n 2 ./check
+ mpic++ -std=c++11 -Wall -Wno-literal-suffix -o check check.cc
+ mpirun -n 2 ./check
{
nombre = { 0, 1, 3, },
eventos = {
{
evento = { 'a', 'b', 'c', 'd', },
cant = 4,
},
{
evento = { 'e', 'f', 'g', },
cant = 3,
},
{
evento = { 'h', 'i', },
cant = 2,
},
},
cantEventos = 3,
revisado = true,
idTraza = 0,
},
{
nombre = { 4, },
eventos = {
{
evento = { 'j', },
cant = 1,
},
},
cantEventos = 1,
revisado = false,
idTraza = 1,
},
Compilation finished at Wed Sep 9 20:04:26
相关文章:
- 用MacOS Mojave编译C++:致命错误:mpi.h:没有这样的文件或目录
- 通过套接字[TCP]传输数据 如何在C / C ++中打包多个整数并使用send() recv()传输数据
- MPI突然停止了对多个核心的操作
- 设置 Visual Studio for MPI: 找不到标识符错误
- 使用 make 编译 MPI,几个命名空间错误,例如"错误:未知类型名称'使用'?
- 如何使用 MPI 的远程内存访问 (RMA) 功能并行化数据聚合?
- 重载 MPI 中的运算符 ()
- MPI:检查是否有任何进程已终止
- 当对套接字 send() 的同步调用由于连接另一端丢失而被阻止时,如何恢复?
- 使用 pybind11 共享 MPI 通信器
- 使用 CMake,Microsoft MPI 和 Visual Studio 2017 找不到 mpi.h
- 在具有 MPI 的超立方体中广播
- 通过 mpi 发送 c++ 标准::矢量<bool>
- 使用 MPI 的 C++ 中的并行 for 循环
- 如何将 OpenMP 和 MPI 导入到大型 CLion CMake 项目中?
- 如何通过Boost.MPI发送2d Boost.MultiArray的子阵列?
- HDF5 构建了并行支持,但找不到特定于 mpi 的功能
- ZMQ::send() 抛出异常并终止 QNX 进程.为什么以及如何从中恢复?
- MPI 集合通信中的指针分配
- 可以使用 MPI::Send 发送的最大数据量