ZeroMQ 在使用 std::thread 创建工作线程时崩溃
ZeroMQ crashed when created workers with std::thread
我正在将 lbbroker.c 示例从 ZeroMQ 指南手册移植到更高级别的 APIczmq
。
出于某种原因,使用zsock
创建并连接到终结点的std::thread
总是崩溃。 以下是使用 C++ 编写的负载平衡代理的代码:
// Load-balancing broker
// Demonstrates use of the CZMQ API
#include <czmq.h>
#include <cstdio>
#include <thread>
#include <iostream>
#include <vector>
#define NBR_CLIENTS 10
#define NBR_WORKERS 3
#define WORKER_READY "READY" // Signals worker is ready
// Basic request-reply client using REQ socket
//
void client_task()
{
std::cout << "Start client...n";
zsock_t *client = zsock_new_req("tcp://*:5672");
// zsock_t *client = zsock_new_req("ipc://frontend.ipc");
std::cout << "Client connect...n";
// zsock_connect(client, "ipc://frontend.ipc");
// Send request, get reply
std::cout << "Client send HELLOn";
zstr_send(client, "HELLO");
char *reply = zstr_recv(client);
if (reply) {
printf("Client: %sn", reply);
free(reply);
}
}
// Worker using REQ socket to do load-balancing
//
void worker_task()
{
std::cout << "Start work task...n";
zsock_t *worker = zsock_new(ZMQ_REQ);
#if (defined (WIN32))
// worker = zsock_new_req("tcp://localhost:5673"); // backend
#else
// worker = zsock_new_req("ipc://backend.ipc");
#endif
std::cout << "Worker connectn";
zsock_connect(worker, "tcp::/localhost:5673");
// zsock_connect(worker, "ipc://backend.ipc");
// Tell broker we're ready for work
zframe_t *frame = zframe_new(WORKER_READY, strlen(WORKER_READY));
zframe_send(&frame, worker, 0);
// Process messages as they arrive
while (true) {
zmsg_t *msg = zmsg_recv(worker);
if (!msg)
break; // Interrupted
zframe_print(zmsg_last(msg), "Worker: ");
zframe_reset(zmsg_last(msg), "OK", 2);
zmsg_send(&msg, worker);
}
}
// .split main task
// Now we come to the main task. This has the identical functionality to
// the previous {{lbbroker}} broker example, but uses CZMQ to start child
// threads, to hold the list of workers, and to read and send messages:
int main(void)
{
zsock_t *frontend = zsock_new_router("tcp://*:5672");
zsock_t *backend = zsock_new_router ("tcp://*:5673");
// zsock_t *frontend = zsock_new_router("ipc://frontend.ipc");
// zsock_t *backend = zsock_new_router ("ipc://backend.ipc");
// printf("Create server threads");
for (int worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) {
std::thread t(worker_task);
std::cout << "Created worker...n";
// pthread_t worker;
// pthread_create(&worker, NULL, worker_task, (void *)(intptr_t)worker_nbr);
}
int client_nbr;
std::cout << "Create client threadsn";
for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) {
std::thread t(client_task);
// pthread_t client;
// printf("Create client thread %d", client_nbr);
// pthread_create(&client, NULL, client_task, (void *)(intptr_t)client_nbr);
}
// Queue of available workers
zlist_t *workers = zlist_new();
// .split main load-balancer loop
// Here is the main loop for the load balancer. It works the same way
// as the previous example, but is a lot shorter because CZMQ gives
// us an API that does more with fewer calls:
while (true) {
zmq_pollitem_t items[] = {
{ backend, 0, ZMQ_POLLIN, 0 },
{ frontend, 0, ZMQ_POLLIN, 0 }
};
std::cout << "Runningn";
// Poll frontend only if we have available workers
int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
if (rc == -1)
break; // Interrupted
// Handle worker activity on backend
if (items[0].revents & ZMQ_POLLIN) {
// Use worker identity for load-balancing
zmsg_t *msg = zmsg_recv(backend);
if (!msg)
break; // Interrupted
#if 0
// zmsg_unwrap is DEPRECATED as over-engineered, poor style
zframe_t *identity = zmsg_unwrap(msg);
#else
zframe_t *identity = zmsg_pop(msg);
zframe_t *delimiter = zmsg_pop(msg);
zframe_destroy(&delimiter);
#endif
zlist_append(workers, identity);
// Forward message to client if it's not a READY
zframe_t *frame = zmsg_first(msg);
if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) {
zmsg_destroy(&msg);
} else {
zmsg_send(&msg, frontend);
if (--client_nbr == 0)
break; // Exit after N messages
}
}
if (items[1].revents & ZMQ_POLLIN) {
// Get client request, route to first available worker
zmsg_t *msg = zmsg_recv(frontend);
if (msg) {
#if 0
// zmsg_wrap is DEPRECATED as unsafe
zmsg_wrap(msg, (zframe_t *)zlist_pop(workers));
#else
zmsg_pushmem(msg, NULL, 0); // delimiter
zmsg_push(msg, (zframe_t *)zlist_pop(workers));
#endif
zmsg_send(&msg, backend);
}
}
}
// When we're done, clean up properly
while (zlist_size(workers)) {
zframe_t *frame = (zframe_t *)zlist_pop(workers);
zframe_destroy(&frame);
}
zlist_destroy(&workers);
zsock_destroy(&frontend);
zsock_destroy(&backend);
return 0;
}
您可以使用g++ lbbroker.cpp -o lbbroker -lzmq -lpthread
编译它。它编译成功,但是,在运行时,我遇到了异常并转储了一个核心:
Created worker...
terminate called without an active exception
Start work task...
aborted (core dumped)
它在worker_task
函数中崩溃了。根据GDB的说法,似乎成功创建了1个线程,然后程序崩溃了。C 版本工作正常。
C++具有析构函数,这些析构函数在声明变量的作用域末尾调用。
在std::thread
的情况下,如果使用可连接线程调用析构函数,则将调用std::terminate
。您应该在到达析构函数之前决定是要join
线程还是detach
线程。
因此,要么分离线程(因此它们将自由运行,而无需简单的方法来等待它们完成(,要么增加对象的生存期并在以后加入它们。
相关文章:
- 工作线程在执行太快后永久休眠
- ZeroMQ 在使用 std::thread 创建工作线程时崩溃
- 工作线程一直在等待,condition_variable甚至调用了notify_all
- 使用 std::atomic 标志和 std::condition_variable 在工作线程上等待
- Qt-工作线程崩溃时将cv::Mat转换为QImage
- 无法将接口从主线程封送到工作线程
- 在 C++ 中扩展作业/工作线程多线程系统
- Qt C++ - 如何将数据从工作线程传递到主线程?
- 从线程池工作线程使用 GetQueuedCompletionStatus 的奇怪行为
- QtThread:I/O 队列的工作线程
- 将信号从工作线程类连接到控制器类 - QThreads
- Qt:工作线程和 GUI 事件之间的关系
- 将数据集几乎平均分配给工作线程
- 在为工作线程访问 lambda 中捕获的向量列表中的元素引用时,是否需要互斥锁?
- 如何将C++ dll 在 C# 窗口窗体应用程序下的工作线程中运行
- 在Qt中使用工作线程将数据写入文件的正确方法是什么?
- 在工作线程中使用 QT 主窗口
- C++,pthreads:如何从多个线程停止工作线程
- 同步主线程和工作线程
- 从工作线程更新QtCharts的正确方法