ZeroMQ 在使用 std::thread 创建工作线程时崩溃

ZeroMQ crashed when created workers with std::thread

本文关键字:工作 线程 崩溃 创建 thread std ZeroMQ      更新时间:2023-10-16

我正在将 lbbroker.c 示例从 ZeroMQ 指南手册移植到更高级别的 APIczmq

出于某种原因,使用zsock创建并连接到终结点的std::thread总是崩溃。 以下是使用 C++ 编写的负载平衡代理的代码:

//  Load-balancing broker
//  Demonstrates use of the CZMQ API
#include <czmq.h>
#include <cstdio>
#include <thread>
#include <iostream>
#include <vector>
#define NBR_CLIENTS 10
#define NBR_WORKERS 3
#define WORKER_READY   "READY"      //  Signals worker is ready
//  Basic request-reply client using REQ socket
//
void client_task()
{
std::cout << "Start client...n";
zsock_t *client = zsock_new_req("tcp://*:5672");
// zsock_t *client = zsock_new_req("ipc://frontend.ipc");
std::cout << "Client connect...n";
// zsock_connect(client, "ipc://frontend.ipc");
//  Send request, get reply
std::cout << "Client send HELLOn";
zstr_send(client, "HELLO");
char *reply = zstr_recv(client);
if (reply) {
printf("Client: %sn", reply);
free(reply);
}
}
//  Worker using REQ socket to do load-balancing
//
void worker_task()
{
std::cout << "Start work task...n";
zsock_t *worker = zsock_new(ZMQ_REQ);
#if (defined (WIN32))
// worker = zsock_new_req("tcp://localhost:5673"); // backend
#else
// worker = zsock_new_req("ipc://backend.ipc");
#endif
std::cout << "Worker connectn";
zsock_connect(worker, "tcp::/localhost:5673");
// zsock_connect(worker, "ipc://backend.ipc");
//  Tell broker we're ready for work
zframe_t *frame = zframe_new(WORKER_READY, strlen(WORKER_READY));
zframe_send(&frame, worker, 0);
//  Process messages as they arrive
while (true) {
zmsg_t *msg = zmsg_recv(worker);
if (!msg)
break;              //  Interrupted
zframe_print(zmsg_last(msg), "Worker: ");
zframe_reset(zmsg_last(msg), "OK", 2);
zmsg_send(&msg, worker);
}
}
//  .split main task
//  Now we come to the main task. This has the identical functionality to
//  the previous {{lbbroker}} broker example, but uses CZMQ to start child 
//  threads, to hold the list of workers, and to read and send messages:
int main(void)
{
zsock_t *frontend = zsock_new_router("tcp://*:5672");
zsock_t *backend = zsock_new_router ("tcp://*:5673");
// zsock_t *frontend = zsock_new_router("ipc://frontend.ipc");
// zsock_t *backend = zsock_new_router ("ipc://backend.ipc");
// printf("Create server threads");
for (int worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) {
std::thread t(worker_task);
std::cout << "Created worker...n";
//     pthread_t worker;
//     pthread_create(&worker, NULL, worker_task, (void *)(intptr_t)worker_nbr);
}
int client_nbr;
std::cout << "Create client threadsn";
for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) {
std::thread t(client_task);
//     pthread_t client;
//     printf("Create client thread %d", client_nbr);
//     pthread_create(&client, NULL, client_task, (void *)(intptr_t)client_nbr);
}
//  Queue of available workers
zlist_t *workers = zlist_new();
//  .split main load-balancer loop
//  Here is the main loop for the load balancer. It works the same way
//  as the previous example, but is a lot shorter because CZMQ gives
//  us an API that does more with fewer calls:
while (true) {
zmq_pollitem_t items[] = {
{ backend, 0, ZMQ_POLLIN, 0 },
{ frontend, 0, ZMQ_POLLIN, 0 }
};
std::cout << "Runningn";
//  Poll frontend only if we have available workers
int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
if (rc == -1)
break;              //  Interrupted
//  Handle worker activity on backend
if (items[0].revents & ZMQ_POLLIN) {
//  Use worker identity for load-balancing
zmsg_t *msg = zmsg_recv(backend);
if (!msg)
break;          //  Interrupted
#if 0
// zmsg_unwrap is DEPRECATED as over-engineered, poor style
zframe_t *identity = zmsg_unwrap(msg);
#else
zframe_t *identity = zmsg_pop(msg);
zframe_t *delimiter = zmsg_pop(msg);
zframe_destroy(&delimiter); 
#endif
zlist_append(workers, identity);
//  Forward message to client if it's not a READY
zframe_t *frame = zmsg_first(msg);
if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) {
zmsg_destroy(&msg);
} else {
zmsg_send(&msg, frontend);
if (--client_nbr == 0)
break; // Exit after N messages
}
}
if (items[1].revents & ZMQ_POLLIN) {
//  Get client request, route to first available worker
zmsg_t *msg = zmsg_recv(frontend);
if (msg) {
#if 0
// zmsg_wrap is DEPRECATED as unsafe
zmsg_wrap(msg, (zframe_t *)zlist_pop(workers));
#else
zmsg_pushmem(msg, NULL, 0); // delimiter
zmsg_push(msg, (zframe_t *)zlist_pop(workers));
#endif
zmsg_send(&msg, backend);
}
}
}
//  When we're done, clean up properly
while (zlist_size(workers)) {
zframe_t *frame = (zframe_t *)zlist_pop(workers);
zframe_destroy(&frame);
}
zlist_destroy(&workers);
zsock_destroy(&frontend);
zsock_destroy(&backend);
return 0;
}

您可以使用g++ lbbroker.cpp -o lbbroker -lzmq -lpthread编译它。它编译成功,但是,在运行时,我遇到了异常并转储了一个核心:

Created worker...
terminate called without an active exception
Start work task...
aborted (core dumped)

它在worker_task函数中崩溃了。根据GDB的说法,似乎成功创建了1个线程,然后程序崩溃了。C 版本工作正常。

C++具有析构函数,这些析构函数在声明变量的作用域末尾调用。

std::thread的情况下,如果使用可连接线程调用析构函数,则将调用std::terminate。您应该在到达析构函数之前决定是要join线程还是detach线程。

因此,要么分离线程(因此它们将自由运行,而无需简单的方法来等待它们完成(,要么增加对象的生存期并在以后加入它们。