TCP客戶端連接步驟:
① .連接方法
Uv_loop_t *loop = uv_default_loop();
uv_tcp_t *client = malloc…;
uv_connect_t* connect_req = malloc…;
uv_tcp_init(loop, client)
uv_tcp_connect(connect_req, client, addr, connect_cb);
uv_run(loop);
getchar(); //服務端不需要這個,現在還不明白為什么
②.回調函數
static void connect_cb(uv_connect_t*req, int status)
{
int r;
uv_buf_t buf = uv_buf_init("just test", 10);
這必須是動態分配內存,在uv_write函數內部會對這個指針賦值。
uv_write_t *reqw = (uv_write_t*)malloc(sizeof *reqw);
在write_cb中,釋放分配的內存。
r = uv_write(reqw, (uv_stream_t*)(req->handle),&buf, 1, write_cb);
}
static void write_cb(uv_write_t*req, int status)
{
}
static void read_cb(uv_stream_t*tcp, ssize_t nread, uv_buf_t buf)
{
}
uv_connect_tis a subclass of uv_req_t
2.TCP服務端連接步驟
①.連接方法
loop = uv_default_loop();
structsockaddr_in addr = uv_ip4_addr("127.0.0.1",5432);
int r;
server = (uv_handle_t*)&tcpServer;
r = uv_tcp_init(loop, &tcpServer);
if (r){
std::cout << "Socket creation error" <<std::endl;
return;
}
r = uv_tcp_bind(&tcpServer, addr);
if (r){
std::cout << "Bind error" << std::endl;
return;
}
r =uv_listen((uv_stream_t*)&tcpServer, 10, on_connection);
if (r){
std::cout << "Listen error" << std::endl;
return;
}
uv_run(loop);
②.回調函數
static voidon_connection(uv_stream_t* server, int status)
{
uv_stream_t* stream;
int r;
用這種方式來初始化一個新連接
stream = (uv_stream_t*)malloc(sizeof(uv_tcp_t));
ASSERT(stream != NULL);
r = uv_tcp_init(loop,(uv_tcp_t*)stream);
stream->data = server;
r = uv_accept(server, stream);
}
*uv_stream_t is a subclass of uv_handle_t
*
* uv_stream is an abstract class.
*
* uv_stream_t is the parent class of uv_tcp_t,uv_pipe_t, uv_tty_t, and
* soon uv_file_t.
在客戶端的連接中
Uv_tcp_connect(connect_req…);connect_req是一個uv_connect_t*參數,相應的connect_cb的第一個參數為uv_connect_t*,(uv_connect_t是uv_req_t的子類)
服務端的連接中
uv_listen((uv_stream_t*)&tcpServer,10, on_connection);相應的on_connect的第一個參數為uv_stream_t*
看來node.js的思想和ACE非常像,把請求對象和連接對象分別封裝成不同概念的東西。
3.tcp –open
創建套接字然后使用uv_tcp_open,再uv_tcp_connect時,函數內部不會再創建套接字,僅此而已。
4.tcp_read_stop
Uv_read_stop((uv_stream_t*)&tcp_handle);
Uv_close((uv_handle_t*)tcp_handle);
UDP客戶端:
uv_udp_send_t req;
r = uv_udp_init(uv_default_loop(),&client);
ASSERT(r == 0);
buf = uv_buf_init("PING", 4);
r = uv_udp_send(&req, &client,&buf, 1, addr, cl_send_cb);
voidcl_send_cb(uv_udp_send_t* req, intstatus)
{}
UDP服務端:
r = uv_udp_init(uv_default_loop(),&server);
ASSERT(r == 0);
r = uv_udp_bind(&server, addr, 0);
ASSERT(r == 0);
r = uv_udp_recv_start(&server,alloc_cb, sv_recv_cb);
ASSERT(r == 0);
static void sv_recv_cb(uv_udp_t* handle,
ssize_tnread,
uv_buf_tbuf,
struct sockaddr* addr,
unsigned flags)
{}
定時器:
int64_t start_time = uv_now(uv_default_loop());
void never_cb(uv_timer_t* handle, int status)
{
std::cout << "never_cb should never be called"<< std::endl;
}
static voidonce_close_cb(uv_handle_t*handle)
{}
static voidonce_cb(uv_timer_t* handle, int status)
{
uv_close((uv_handle_t*)handle, once_close_cb);
uv_update_time(uv_default_loop());
}
r = uv_timer_init(uv_default_loop(), &never);
ASSERT(r == 0);
r = uv_timer_start(&never, never_cb, 100, 100);
ASSERT(r == 0);
r = uv_timer_stop(&never);
ASSERT(r == 0);
uv_unref((uv_handle_t*)&never);
uv_run(uv_default_loop());
同步對象:
uv_cond_init(&signal_cond)
uv_cond_destroy(&signal_cond);
If libuv has been compiled with debuggingenabled, uv_mutex_destroy(), uv_mutex_lock() and uv_mutex_unlock() will abort() on error.Similarly uv_mutex_trylock() will abort if the error is anything otherthan EAGAIN.
Note:
Libuv 里面有read/write鎖uv_rwlock_t numlock;
Warning
mutexesand rwlocks DO NOT work inside a signal handler, whereas uv_async_send does.
線程間通信用uv_async_t
線程對象:
uv_thread_t tid;
int r;
r = uv_thread_create(&tid,thread_entry, (void*)42);
ASSERT(r == 0);
r = uv_thread_join(&tid);
線程池:
r = uv_queue_work(uv_default_loop(),&work_req, NULL, after_work_cb); //倒數第二個參數為NULL,返回-1,after_work_cb也不會被調用
ASSERT(r == -1);
libuv work queue?
uv_queue_work() is a convenience function that allows an application torun a task in a separate thread, and have a callback that is triggered when thetask is done. A seemingly simple function, what makes uv_queue_work() tempting is that it allows potentiallyany third-party libraries to be used with the event-loop paradigm. When you useevent loops, it is imperativeto make sure that no function which runs periodically in the loop thread blockswhen performing I/O or is a serious CPU hog, because this means the loop slows downand events are not being dealt with at full capacity.
意思是:IO線程里面不應該有阻塞操作,libuv的處理方式是讓系統自己處理這些阻塞操作。就是IOCP嘛。