remove receiving timeout for client
Now we enforce nonblocking everywhere, hopefully properly, this rx timeout stuff shouldn't be needed. Signed-off-by: Andy Green <andy.green@linaro.org>
This commit is contained in:
parent
8e0c98484e
commit
da1fb0b89f
1 changed files with 0 additions and 6 deletions
|
@ -5,7 +5,6 @@ struct libwebsocket *__libwebsocket_client_connect_2(
|
|||
struct libwebsocket *wsi
|
||||
) {
|
||||
struct pollfd pfd;
|
||||
struct timeval tv;
|
||||
struct hostent *server_hostent;
|
||||
struct sockaddr_in server_addr;
|
||||
int n;
|
||||
|
@ -72,11 +71,6 @@ struct libwebsocket *__libwebsocket_client_connect_2(
|
|||
&opt, sizeof(opt));
|
||||
#endif
|
||||
|
||||
/* Set receiving timeout */
|
||||
tv.tv_sec = 0;
|
||||
tv.tv_usec = 100 * 1000;
|
||||
setsockopt(wsi->sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof tv);
|
||||
|
||||
if (connect(wsi->sock, (struct sockaddr *)&server_addr,
|
||||
sizeof(struct sockaddr)) == -1) {
|
||||
lwsl_debug("Connect failed\n");
|
||||
|
|
Loading…
Add table
Reference in a new issue