• boost asio 学习(八) 网络基础 二进制写发送和接收


    http://www.gamedev.net/blog/950/entry-2249317-a-guide-to-getting-
    started-with-boostasio?pg=9

    8. Networking basics: binary protocol sending and receiving (TCP)

    现在我们了解了boost::asio库和一些简单的tcp网络知识。现在进行一些简单的网路底层封装.通过使用这些分装。我们能重复使用并且将
    注意力集中在程序逻辑而不是一再编写网络通讯代码。
    重要注意事项:代码完全是出于教育目的。不要在商业系统中使用。
    另外 使用这个网络封装代码的开销是需要考虑的。比如说,大量的vector与list的分配。bind与shared_ptr指针的开销。这也是这个代码
    仅用来作为教育用途的原因。


    有多重类型的函数用来发送和接收。我们基于协议选择不同的类型。
    我们的例子中使用async_write and async_read_some.
    使用async_write的原因是函数写入所有的数据,而无需担心部分发送的问题。同样的,我们使用async_read_some作为通用函数去读取数
    据因为我们没有特定的协议用来接收。


    现在我们学习一个完整的使用IO函数的例子。我们扩展7-C的例子。

    #include <boost/asio.hpp>
    #include <boost/shared_ptr.hpp>
    #include <boost/thread.hpp>
    #include <boost/thread/mutex.hpp>
    #include <boost/bind.hpp>
    #include <boost/lexical_cast.hpp>
    #include <boost/cstdint.hpp>
    #include <boost/enable_shared_from_this.hpp>
    #include <iostream>
    #include <string>
    
    boost::mutex global_stream_lock;
    
    void WorkerThread( boost::shared_ptr< boost::asio::io_service > io_service )
    {
    	global_stream_lock.lock();
    	std::cout << "[" << boost::this_thread::get_id()
    		<< "] Thread Start" << std::endl;
    	global_stream_lock.unlock();
    
    	while( true )
    	{
    		try
    		{
    			boost::system::error_code ec;
    			io_service->run( ec );
    			if( ec )
    			{
    				global_stream_lock.lock();
    				std::cout << "[" << boost::this_thread::get_id()
    					<< "] Error: " << ec << std::endl;
    				global_stream_lock.unlock();
    			}
    			break;
    		}
    		catch( std::exception & ex )
    		{
    			global_stream_lock.lock();
    			std::cout << "[" << boost::this_thread::get_id()
    				<< "] Exception: " << ex.what() << std::endl;
    			global_stream_lock.unlock();
    		}
    	}
    
    	global_stream_lock.lock();
    	std::cout << "[" << boost::this_thread::get_id()
    		<< "] Thread Finish" << std::endl;
    	global_stream_lock.unlock();
    }
    
    struct ClientContext : public boost::enable_shared_from_this< ClientContext >
    {
    	boost::asio::ip::tcp::socket m_socket;
    	
    	std::vector< boost::uint8_t > m_recv_buffer;
    	size_t m_recv_buffer_index;
    
    	std::list< std::vector< boost::uint8_t > > m_send_buffer;
    
    	ClientContext( boost::asio::io_service & io_service )
    		: m_socket( io_service ), m_recv_buffer_index( 0 )
    	{
    		m_recv_buffer.resize( 4096 );
    	}
    
    	~ClientContext()
    	{
    	}
    
    	void Close()
    	{
    		boost::system::error_code ec;
    		m_socket.shutdown( boost::asio::ip::tcp::socket::shutdown_both, ec );
    		m_socket.close( ec );
    	}
    
    	void OnSend( const boost::system::error_code & ec, std::list< std::vector< boost::uint8_t > >::iterator itr )
    	{
    		if( ec )
    		{
    			global_stream_lock.lock();
    			std::cout << "[" << boost::this_thread::get_id()
    				<< "] Error: " << ec << std::endl;
    			global_stream_lock.unlock();
    
    			Close();
    		}
    		else
    		{
    			global_stream_lock.lock();
    			std::cout << "[" << boost::this_thread::get_id()
    				<< "] Sent " << (*itr).size() << " bytes." << std::endl;
    			global_stream_lock.unlock();
    		}
    		m_send_buffer.erase( itr );
    
    		// Start the next pending send
    		if( !m_send_buffer.empty() )
    		{
    			boost::asio::async_write( 
    				m_socket, 
    				boost::asio::buffer( m_send_buffer.front() ), 
    				boost::bind( 
    					&ClientContext::OnSend, 
    					shared_from_this(), 
    					boost::asio::placeholders::error, 
    					m_send_buffer.begin()
    				)
    			);
    		}
    	}
    
    	void Send( const void * buffer, size_t length )
    	{
    		bool can_send_now = false;
    
    		std::vector< boost::uint8_t > output;
    		std::copy( (const boost::uint8_t *)buffer, (const boost::uint8_t *)buffer + length, std::back_inserter( 
    
    output ) );
    
    		// Store if this is the only current send or not
    		can_send_now = m_send_buffer.empty();
    
    		// Save the buffer to be sent
    		m_send_buffer.push_back( output );
    
    		// Only send if there are no more pending buffers waiting!
    		if( can_send_now )
    		{
    			// Start the next pending send
    			boost::asio::async_write( 
    				m_socket, 
    				boost::asio::buffer( m_send_buffer.front() ), 
    				boost::bind( 
    					&ClientContext::OnSend, 
    					shared_from_this(), 
    					boost::asio::placeholders::error, 
    					m_send_buffer.begin()
    				)
    			);
    		}
    	}
    
    	void OnRecv( const boost::system::error_code & ec, size_t bytes_transferred )
    	{
    		if( ec )
    		{
    			global_stream_lock.lock();
    			std::cout << "[" << boost::this_thread::get_id()
    				<< "] Error: " << ec << std::endl;
    			global_stream_lock.unlock();
    
    			Close();
    		}
    		else
    		{
    			// Increase how many bytes we have saved up
    			m_recv_buffer_index += bytes_transferred;
    
    			// Debug information
    			global_stream_lock.lock();
    			std::cout << "[" << boost::this_thread::get_id()
    				<< "] Recv " << bytes_transferred << " bytes." << std::endl;
    			global_stream_lock.unlock();
    
    			// Dump all the data
    			global_stream_lock.lock();
    			for( size_t x = 0; x < m_recv_buffer_index; ++x )
    			{
    				std::cout << std::hex << std::setfill( '0' ) << 
    					std::setw( 2 ) << (int)m_recv_buffer[ x ] << " ";
    				if( ( x + 1 ) % 16 == 0 )
    				{
    					std::cout << std::endl;
    				}
    			}
    			std::cout << std::endl << std::dec;
    			global_stream_lock.unlock();
    
    			// Discard all the data (virtually, not physically!)
    			m_recv_buffer_index = 0;
    
    			// Start the next recv cycle
    			Recv();
    		}
    	}
    
    	void Recv()
    	{
    		m_socket.async_read_some( 
    			boost::asio::buffer( 
    				&m_recv_buffer[ m_recv_buffer_index ], 
    				m_recv_buffer.size() - m_recv_buffer_index ), 
    			boost::bind( &ClientContext::OnRecv, shared_from_this(), _1, _2 )
    		);
    	}
    };
    
    void OnAccept( const boost::system::error_code & ec, boost::shared_ptr< ClientContext > client )
    {
    	if( ec )
    	{
    		global_stream_lock.lock();
    		std::cout << "[" << boost::this_thread::get_id()
    			<< "] Error: " << ec << std::endl;
    		global_stream_lock.unlock();
    	}
    	else
    	{
    		global_stream_lock.lock();
    		std::cout << "[" << boost::this_thread::get_id()
    			<< "] Accepted!" << std::endl;
    		global_stream_lock.unlock();
    
    		// 2 bytes message size, followed by the message
    		client->Send( "\x02\x00Hi", 6 );
    		client->Recv();
    	}
    }
    
    int main( int argc, char * argv[] )
    {
    	boost::shared_ptr< boost::asio::io_service > io_service(
    		new boost::asio::io_service
    		);
    	boost::shared_ptr< boost::asio::io_service::work > work(
    		new boost::asio::io_service::work( *io_service )
    		);
    	boost::shared_ptr< boost::asio::io_service::strand > strand(
    		new boost::asio::io_service::strand( *io_service )
    		);
    
    	global_stream_lock.lock();
    	std::cout << "[" << boost::this_thread::get_id()
    		<< "] Press [return] to exit." << std::endl;
    	global_stream_lock.unlock();
    
    	// 1 worker thread so we do not have to deal with thread safety issues
    	boost::thread_group worker_threads;
    	for( int x = 0; x < 1; ++x )
    	{
    		worker_threads.create_thread( boost::bind( &WorkerThread, io_service ) );
    	}
    
    	boost::shared_ptr< boost::asio::ip::tcp::acceptor > acceptor(
    		new boost::asio::ip::tcp::acceptor( *io_service )
    		);
    	boost::shared_ptr< ClientContext > client(
    		new ClientContext( *io_service )
    		);
    
    	try
    	{
    		boost::asio::ip::tcp::resolver resolver( *io_service );
    		boost::asio::ip::tcp::resolver::query query( 
    			"127.0.0.1", 
    			boost::lexical_cast< std::string >( 7777 )
    			);
    		boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve( query );
    		acceptor->open( endpoint.protocol() );
    		acceptor->set_option( boost::asio::ip::tcp::acceptor::reuse_address( false ) );
    		acceptor->bind( endpoint );
    		acceptor->listen( boost::asio::socket_base::max_connections );
    		acceptor->async_accept( client->m_socket, boost::bind( OnAccept, _1, client ) );
    
    		global_stream_lock.lock();
    		std::cout << "Listening on: " << endpoint << std::endl;
    		global_stream_lock.unlock();
    	}
    	catch( std::exception & ex )
    	{
    		global_stream_lock.lock();
    		std::cout << "[" << boost::this_thread::get_id()
    			<< "] Exception: " << ex.what() << std::endl;
    		global_stream_lock.unlock();
    	}
    
    	std::cin.get();
    
    	boost::system::error_code ec;
    	acceptor->close( ec );
    
    	io_service->stop();
    
    	worker_threads.join_all();
    
    	return 0;
    }
    

      

    这个例子中,我们添加了ClientContext 类。它用来存储所有传入连接的上下文的具体结构。类封装了必须的io接收发送的函数。例子中
    ,服务器将发送一个具体的信息给传入连接。我们没配置一个具体的客户端去测试,仅仅使用简单的telnet即可。任意数据发送到服务器
    将被输出到命令行。
    同上一个例子不同的是,每个连接都需要自己的上下文。上下文应包含socket 发送接收缓存以及其他用户数据。另外,例子不是线程安装
    的,所以我们限制在一个线程运行。我们将稍后讨论这个问题。现在,对于socket的读写将选择正确的api函数以避免发生问题.


    为了正确的使用,我们必须确认上下文和缓存在函数的生存的整个期间都是有效的。这个例子中,我们使用一个vector list来发送,使用一个vector来接收缓存。根据我们实现的协议,我们可能需要做一些小小的改动。比如说,如果我们想处理流中的包。这种情况下,我们将希望使用async_read读取头结构然后使用async_read读取具体尺寸的数据。


    在大型程序中,一次只处理一个包的缺点就是效率低下。假设我们有1003字节信息在流中。我们将执行读取包这个逻辑100次。而我们使用async_read_some,效率会更高。

  • 相关阅读:
    Maven学习笔记
    [学习笔记] 网络流
    [Contest on 2021.11.3] 女子口阿
    [杂题合集] 25 岁小伙突然没了心跳,他的习惯很多年轻人都有!
    CSP 2021 提高组游记
    [题目小结] 可持久化数据结构
    [学习笔记] 无向图和有向图的连通分量
    [Contest on 2021.10.17] HustOJ 就是个 **
    [Contest on 2021.10.15] 细思极恐
    妖怪寺外,灯火通明
  • 原文地址:https://www.cnblogs.com/itdef/p/5620791.html
Copyright © 2020-2023  润新知