Boost asio async_write:如何不交叉async_write调用

boost asio async_write : how to not interleaving async_write calls?

本文关键字:async write 不交叉 调用 Boost asio      更新时间:2023-10-16

这是我的实现:

  • 客户端A向客户端B发送消息
  • 服务器处理的消息由async_read的适量数据和将等待来自客户端A的新数据(为了不阻塞客户端A)
  • 之后服务器将处理信息(可能做一个mysql查询),然后通过async_write将消息发送给客户端B。

问题是,如果客户端A发送消息非常快,async_writes将在之前的async_write处理程序被调用之前交错。

有没有简单的方法来避免这个问题?

编辑1:如果客户端C在客户端a之后发送消息给客户端B,应该出现同样的问题…

编辑2:这能行吗?因为它似乎阻塞了,我不知道在哪里…

 namespace structure {                                                              
  class User {                                                                     
  public:                                                                          
    User(boost::asio::io_service& io_service, boost::asio::ssl::context& context) :
      m_socket(io_service, context), m_strand(io_service), is_writing(false) {}    
    ssl_socket& getSocket() {                                                      
      return m_socket;                                                             
    }                                                                              
    boost::asio::strand getStrand() {                                              
      return m_strand;                                                             
    }                                                                              
    void push(std::string str) {                                                   
      m_strand.post(boost::bind(&structure::User::strand_push, this, str));        
    }                                                                              
    void strand_push(std::string str) {                                            
      std::cout << "pushing: " << boost::this_thread::get_id() << std::endl;       
      m_queue.push(str);                                                           
      if (!is_writing) {                                                           
        write();                                                                   
        std::cout << "going to write" << std::endl;                                
      }                                                                            
      std::cout << "Already writing" << std::endl;                                 
    }                                                                              
    void write() {                                                                 
      std::cout << "writing" << std::endl;                                         
      is_writing = true;                                                           
      std::string str = m_queue.front();                                           
      boost::asio::async_write(m_socket,                                           
                               boost::asio::buffer(str.c_str(), str.size()),       
                               boost::bind(&structure::User::sent, this)           
                               );                                                  
    }                                                                              
    void sent() {                                                                  
      std::cout << "sent" << std::endl;                                            
      m_queue.pop();                                                               
      if (!m_queue.empty()) {                                                      
        write();                                                                   
        return;                                                                    
      }                                                                            
      else                                                                         
        is_writing = false;                                                        
      std::cout << "done sent" << std::endl;                                       
    }                                          
  private:                                     
    ssl_socket          m_socket;              
    boost::asio::strand m_strand;              
    std::queue<std::string>     m_queue;       
    bool                        is_writing;    
  };                                           
}                                              
#endif

有没有简单的方法来避免这个问题?

是的,为每个客户端维护一个传出队列。检查async_write完成处理程序中的队列大小,如果非零,则启动另一个async_write操作。下面是一个示例

#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <deque>
#include <iostream>
#include <string>
class Connection
{
public:
    Connection(
            boost::asio::io_service& io_service
            ) :
        _io_service( io_service ),
        _strand( _io_service ),
        _socket( _io_service ),
        _outbox()
    {
    }
    void write( 
            const std::string& message
            )
    {
        _strand.post(
                boost::bind(
                    &Connection::writeImpl,
                    this,
                    message
                    )
                );
    }
private:
    void writeImpl(
            const std::string& message
            )
    {
        _outbox.push_back( message );
        if ( _outbox.size() > 1 ) {
            // outstanding async_write
            return;
        }
        this->write();
    }
    void write()
    {
        const std::string& message = _outbox[0];
        boost::asio::async_write(
                _socket,
                boost::asio::buffer( message.c_str(), message.size() ),
                _strand.wrap(
                    boost::bind(
                        &Connection::writeHandler,
                        this,
                        boost::asio::placeholders::error,
                        boost::asio::placeholders::bytes_transferred
                        )
                    )
                );
    }
    void writeHandler(
            const boost::system::error_code& error,
            const size_t bytesTransferred
            )
    {
        _outbox.pop_front();
        if ( error ) {
            std::cerr << "could not write: " << boost::system::system_error(error).what() << std::endl;
            return;
        }
        if ( !_outbox.empty() ) {
            // more messages to send
            this->write();
        }
    }

private:
    typedef std::deque<std::string> Outbox;
private:
    boost::asio::io_service& _io_service;
    boost::asio::io_service::strand _strand;
    boost::asio::ip::tcp::socket _socket;
    Outbox _outbox;
};
int
main()
{
    boost::asio::io_service io_service;
    Connection foo( io_service );
}

几个要点

    boost::asio::io_service::strand保护访问Connection::_outbox
  • Connection::write()调度处理程序,因为它是公共的

如果你在你的问题的例子中使用类似的做法对我来说并不明显,因为所有的方法都是公共的。

只是想改进Sam的回答。改进点如下:

  • async_write努力在完成之前从缓冲区发送每一个字节,这意味着你应该提供所有输入数据到写操作,否则由于TCP数据包比它们本来可以的小,帧开销可能会增加。

  • asio::streambuf,虽然非常方便使用,但不是零复制。下面的例子演示了一种零拷贝方法:保持输入数据块在原来的位置,并使用async_write的scatter/gather过载,接收一系列输入缓冲区(这些缓冲区只是指向实际输入数据的指针)。

完整源代码:

#include <boost/asio.hpp>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_set>
#include <vector>
using namespace std::chrono_literals;
using boost::asio::ip::tcp;
class Server
{
  class Connection : public std::enable_shared_from_this<Connection>
  {
    friend class Server;
    void ProcessCommand(const std::string& cmd) {
      if (cmd == "stop") {
        server_.Stop();
        return;
      }
      if (cmd == "") {
        Close();
        return;
      }
      std::thread t([this, self = shared_from_this(), cmd] {
        for (int i = 0; i < 30; ++i) {
          Write("Hello, " + cmd + " " + std::to_string(i) + "rn");
        }
        server_.io_service_.post([this, self] {
          DoReadCmd();
        });
      });
      t.detach();
    }
    void DoReadCmd() {
      read_timer_.expires_from_now(server_.read_timeout_);
      read_timer_.async_wait([this](boost::system::error_code ec) {
        if (!ec) {
          std::cout << "Read timeoutn";
          Shutdown();
        }
      });
      boost::asio::async_read_until(socket_, buf_in_, 'n', [this, self = shared_from_this()](boost::system::error_code ec, std::size_t bytes_read) {
        read_timer_.cancel();
        if (!ec) {
          const char* p = boost::asio::buffer_cast<const char*>(buf_in_.data());
          std::string cmd(p, bytes_read - (bytes_read > 1 && p[bytes_read - 2] == 'r' ? 2 : 1));
          buf_in_.consume(bytes_read);
          ProcessCommand(cmd);
        }
        else {
          Close();
        }
      });
    }
    void DoWrite() {
      active_buffer_ ^= 1; // switch buffers
      for (const auto& data : buffers_[active_buffer_]) {
        buffer_seq_.push_back(boost::asio::buffer(data));
      }
      write_timer_.expires_from_now(server_.write_timeout_);
      write_timer_.async_wait([this](boost::system::error_code ec) {
        if (!ec) {
          std::cout << "Write timeoutn";
          Shutdown();
        }
      });
      boost::asio::async_write(socket_, buffer_seq_, [this, self = shared_from_this()](const boost::system::error_code& ec, size_t bytes_transferred) {
        write_timer_.cancel();
        std::lock_guard<std::mutex> lock(buffers_mtx_);
        buffers_[active_buffer_].clear();
        buffer_seq_.clear();
        if (!ec) {
          std::cout << "Wrote " << bytes_transferred << " bytesn";
          if (!buffers_[active_buffer_ ^ 1].empty()) // have more work
            DoWrite();
        }
        else {
          Close();
        }
      });
    }
    bool Writing() const { return !buffer_seq_.empty(); }
    Server& server_;
    boost::asio::streambuf buf_in_;
    std::mutex buffers_mtx_;
    std::vector<std::string> buffers_[2]; // a double buffer
    std::vector<boost::asio::const_buffer> buffer_seq_;
    int active_buffer_ = 0;
    bool closing_ = false;
    bool closed_ = false;
    boost::asio::deadline_timer read_timer_, write_timer_;
    tcp::socket socket_;
  public:
    Connection(Server& server) : server_(server), read_timer_(server.io_service_), write_timer_(server.io_service_), socket_(server.io_service_) {
    }
    void Start() {
      socket_.set_option(tcp::no_delay(true));
      DoReadCmd();
    }
    void Close() {
      closing_ = true;
      if (!Writing())
        Shutdown();
    }
    void Shutdown() {
      if (!closed_) {
        closing_ = closed_ = true;
        boost::system::error_code ec;
        socket_.shutdown(tcp::socket::shutdown_both, ec);
        socket_.close();
        server_.active_connections_.erase(shared_from_this());
      }
    }
    void Write(std::string&& data) {
      std::lock_guard<std::mutex> lock(buffers_mtx_);
      buffers_[active_buffer_ ^ 1].push_back(std::move(data)); // move input data to the inactive buffer
      if (!Writing())
        DoWrite();
    }
  };
  void DoAccept() {
    if (acceptor_.is_open()) {
      auto session = std::make_shared<Connection>(*this);
      acceptor_.async_accept(session->socket_, [this, session](boost::system::error_code ec) {
        if (!ec) {
          active_connections_.insert(session);
          session->Start();
        }
        DoAccept();
      });
    }
  }
  boost::asio::io_service io_service_;
  tcp::acceptor acceptor_;
  std::unordered_set<std::shared_ptr<Connection>> active_connections_;
  const boost::posix_time::time_duration read_timeout_ = boost::posix_time::seconds(30);
  const boost::posix_time::time_duration write_timeout_ = boost::posix_time::seconds(30);
public:
  Server(int port) : acceptor_(io_service_, tcp::endpoint(tcp::v6(), port), false) { }
  void Run() {
    std::cout << "Listening on " << acceptor_.local_endpoint() << "n";
    DoAccept();
    io_service_.run();
  }
  void Stop() {
    acceptor_.close();
    {
      std::vector<std::shared_ptr<Connection>> sessionsToClose;
      copy(active_connections_.begin(), active_connections_.end(), back_inserter(sessionsToClose));
      for (auto& s : sessionsToClose)
        s->Shutdown();
    }
    active_connections_.clear();
    io_service_.stop();
  }
};
int main() {
  try {
    Server srv(8888);
    srv.Run();
  }
  catch (const std::exception& e) {
    std::cerr << "Error: " << e.what() << "n";
  }
}