2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2015-11-02 20:34:12 +08:00
|
|
|
* Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
|
2014-04-03 07:36:41 +08:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "private-libwebsockets.h"
|
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
static int
|
2015-12-17 07:54:44 +08:00
|
|
|
lws_calllback_as_writeable(struct lws *wsi)
|
2014-10-22 15:37:28 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
switch (wsi->mode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_CLIENT:
|
2014-10-22 15:37:28 +08:00
|
|
|
n = LWS_CALLBACK_CLIENT_WRITEABLE;
|
|
|
|
break;
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_SERVING:
|
2014-10-22 15:37:28 +08:00
|
|
|
n = LWS_CALLBACK_SERVER_WRITEABLE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
n = LWS_CALLBACK_HTTP_WRITEABLE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
lwsl_info("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
|
2015-12-17 07:54:44 +08:00
|
|
|
return user_callback_handle_rxflow(wsi->protocol->callback,
|
2015-12-06 05:52:09 +08:00
|
|
|
wsi, (enum lws_callback_reasons) n,
|
|
|
|
wsi->user_space, NULL, 0);
|
2014-10-22 15:37:28 +08:00
|
|
|
}
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
int
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
2015-12-06 05:52:09 +08:00
|
|
|
int write_type = LWS_WRITE_PONG;
|
2014-04-03 07:36:41 +08:00
|
|
|
struct lws_tokens eff_buf;
|
2014-10-22 15:37:28 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2015-12-04 11:08:32 +08:00
|
|
|
struct lws *wsi2;
|
2014-10-22 15:37:28 +08:00
|
|
|
#endif
|
2015-12-06 05:52:09 +08:00
|
|
|
int ret, m, n;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
/* pending truncated sends have uber priority */
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->trunc_len) {
|
|
|
|
if (lws_issue_raw(wsi, wsi->trunc_alloc +
|
|
|
|
wsi->trunc_offset,
|
|
|
|
wsi->trunc_len) < 0) {
|
2015-12-15 21:15:58 +08:00
|
|
|
lwsl_info("%s signalling to close\n", __func__);
|
2014-04-10 14:25:24 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
/* leave POLLOUT active either way */
|
|
|
|
return 0;
|
2014-04-10 17:06:59 +08:00
|
|
|
} else
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
|
2014-04-10 17:06:59 +08:00
|
|
|
return -1; /* retry closing now */
|
2015-12-15 21:15:58 +08:00
|
|
|
|
2014-10-08 12:15:15 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2014-10-08 12:00:53 +08:00
|
|
|
/* protocol packets are next */
|
|
|
|
if (wsi->pps) {
|
2014-10-08 12:15:15 +08:00
|
|
|
lwsl_info("servicing pps %d\n", wsi->pps);
|
2014-10-08 12:00:53 +08:00
|
|
|
switch (wsi->pps) {
|
|
|
|
case LWS_PPS_HTTP2_MY_SETTINGS:
|
|
|
|
case LWS_PPS_HTTP2_ACK_SETTINGS:
|
2015-12-17 18:25:25 +08:00
|
|
|
lws_http2_do_pps_send(lws_get_context(wsi), wsi);
|
2014-10-08 12:00:53 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
wsi->pps = LWS_PPS_NONE;
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_rx_flow_control(wsi, 1);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
return 0; /* leave POLLOUT active */
|
|
|
|
}
|
2014-10-08 12:15:15 +08:00
|
|
|
#endif
|
2014-08-24 14:39:19 +08:00
|
|
|
/* pending control packets have next priority */
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if ((wsi->state == LWSS_ESTABLISHED &&
|
2015-11-06 18:18:32 +02:00
|
|
|
wsi->u.ws.ping_pending_flag) ||
|
2015-12-17 17:03:59 +08:00
|
|
|
(wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
|
2015-11-06 18:18:32 +02:00
|
|
|
wsi->u.ws.payload_is_close)) {
|
2015-04-26 05:32:03 +08:00
|
|
|
|
|
|
|
if (wsi->u.ws.payload_is_close)
|
|
|
|
write_type = LWS_WRITE_CLOSE;
|
|
|
|
|
2015-12-06 05:52:09 +08:00
|
|
|
n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[
|
2014-08-24 14:39:19 +08:00
|
|
|
LWS_SEND_BUFFER_PRE_PADDING],
|
2015-12-06 05:52:09 +08:00
|
|
|
wsi->u.ws.ping_payload_len, write_type);
|
2014-08-24 14:39:19 +08:00
|
|
|
if (n < 0)
|
|
|
|
return -1;
|
2015-04-17 20:29:58 +08:00
|
|
|
|
2014-08-24 14:39:19 +08:00
|
|
|
/* well he is sent, mark him done */
|
2015-03-24 21:07:01 +08:00
|
|
|
wsi->u.ws.ping_pending_flag = 0;
|
2015-04-17 20:29:58 +08:00
|
|
|
if (wsi->u.ws.payload_is_close)
|
|
|
|
/* oh... a close frame was it... then we are done */
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* otherwise for PING, leave POLLOUT active either way */
|
2014-08-24 14:39:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2015-10-16 11:07:52 +08:00
|
|
|
/* if we are closing, don't confuse the user with writeable cb */
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
|
2015-10-16 11:07:52 +08:00
|
|
|
goto user_service;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-08-24 14:39:19 +08:00
|
|
|
/* if nothing critical, user can get the callback */
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
m = lws_ext_cb_wsi_active_exts(wsi, LWS_EXT_CALLBACK_IS_WRITEABLE,
|
2014-04-03 07:36:41 +08:00
|
|
|
NULL, 0);
|
|
|
|
#ifndef LWS_NO_EXTENSIONS
|
2014-12-10 10:28:46 +08:00
|
|
|
if (!wsi->extension_data_pending)
|
2014-04-03 07:36:41 +08:00
|
|
|
goto user_service;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* check in on the active extensions, see if they
|
|
|
|
* had pending stuff to spill... they need to get the
|
|
|
|
* first look-in otherwise sequence will be disordered
|
|
|
|
*
|
|
|
|
* NULL, zero-length eff_buf means just spill pending
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
while (ret == 1) {
|
|
|
|
|
|
|
|
/* default to nobody has more to spill */
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
|
|
|
|
/* give every extension a chance to spill */
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
m = lws_ext_cb_wsi_active_exts(wsi,
|
2014-04-03 07:36:41 +08:00
|
|
|
LWS_EXT_CALLBACK_PACKET_TX_PRESEND,
|
|
|
|
&eff_buf, 0);
|
|
|
|
if (m < 0) {
|
|
|
|
lwsl_err("ext reports fatal error\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (m)
|
|
|
|
/*
|
|
|
|
* at least one extension told us he has more
|
|
|
|
* to spill, so we will go around again after
|
|
|
|
*/
|
|
|
|
ret = 1;
|
|
|
|
|
|
|
|
/* assuming they gave us something to send, send it */
|
|
|
|
|
|
|
|
if (eff_buf.token_len) {
|
|
|
|
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
|
|
|
eff_buf.token_len);
|
2014-04-10 14:25:24 +08:00
|
|
|
if (n < 0) {
|
|
|
|
lwsl_info("closing from POLLOUT spill\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
return -1;
|
2014-04-10 14:25:24 +08:00
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* Keep amount spilled small to minimize chance of this
|
|
|
|
*/
|
|
|
|
if (n != eff_buf.token_len) {
|
|
|
|
lwsl_err("Unable to spill ext %d vs %s\n",
|
|
|
|
eff_buf.token_len, n);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* no extension has more to spill */
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There's more to spill from an extension, but we just sent
|
|
|
|
* something... did that leave the pipe choked?
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!lws_send_pipe_choked(wsi))
|
|
|
|
/* no we could add more */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
lwsl_info("choked in POLLOUT service\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Yes, he's choked. Leave the POLLOUT masked on so we will
|
|
|
|
* come back here when he is unchoked. Don't call the user
|
|
|
|
* callback to enforce ordering of spilling, he'll get called
|
|
|
|
* when we come back here and there's nothing more to spill.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#ifndef LWS_NO_EXTENSIONS
|
|
|
|
wsi->extension_data_pending = 0;
|
|
|
|
#endif
|
2015-10-16 11:07:52 +08:00
|
|
|
user_service:
|
2014-04-03 07:36:41 +08:00
|
|
|
/* one shot */
|
|
|
|
|
|
|
|
if (pollfd) {
|
2014-10-08 12:00:53 +08:00
|
|
|
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
|
2015-12-16 18:19:08 +08:00
|
|
|
lwsl_info("failed at set pollfd\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
return 1;
|
2014-10-08 12:00:53 +08:00
|
|
|
}
|
2014-04-11 13:14:37 +08:00
|
|
|
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2014-10-22 15:37:28 +08:00
|
|
|
* we are the 'network wsi' for potentially many muxed child wsi with
|
|
|
|
* no network connection of their own, who have to use us for all their
|
|
|
|
* network actions. So we use a round-robin scheme to share out the
|
|
|
|
* POLLOUT notifications to our children.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2014-10-22 15:37:28 +08:00
|
|
|
* But because any child could exhaust the socket's ability to take
|
|
|
|
* writes, we can only let one child get notified each time.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2014-10-22 15:37:28 +08:00
|
|
|
* In addition children may be closed / deleted / added between POLLOUT
|
|
|
|
* notifications, so we can't hold pointers
|
|
|
|
*/
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode != LWSCM_HTTP2_SERVING) {
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: non http2\n", __func__);
|
|
|
|
goto notify;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
wsi->u.http2.requested_POLLOUT = 0;
|
|
|
|
if (!wsi->u.http2.initialized) {
|
|
|
|
lwsl_info("pollout on uninitialized http2 conn\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: doing children\n", __func__);
|
|
|
|
|
|
|
|
wsi2 = wsi;
|
|
|
|
do {
|
|
|
|
wsi2 = wsi2->u.http2.next_child_wsi;
|
|
|
|
lwsl_info("%s: child %p\n", __func__, wsi2);
|
|
|
|
if (!wsi2)
|
|
|
|
continue;
|
|
|
|
if (!wsi2->u.http2.requested_POLLOUT)
|
|
|
|
continue;
|
|
|
|
wsi2->u.http2.requested_POLLOUT = 0;
|
2015-12-17 07:54:44 +08:00
|
|
|
if (lws_calllback_as_writeable(wsi2)) {
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_debug("Closing POLLOUT child\n");
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
|
2014-10-22 15:37:28 +08:00
|
|
|
}
|
|
|
|
wsi2 = wsi;
|
|
|
|
} while (wsi2 != NULL && !lws_send_pipe_choked(wsi));
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: completed\n", __func__);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
return 0;
|
|
|
|
notify:
|
|
|
|
#endif
|
2015-12-17 07:54:44 +08:00
|
|
|
return lws_calllback_as_writeable(wsi);
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_service_timeout_check(struct lws *wsi, unsigned int sec)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if extensions want in on it (eg, we are a mux parent)
|
|
|
|
* give them a chance to service child timeouts
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
if (lws_ext_cb_wsi_active_exts(wsi, LWS_EXT_CALLBACK_1HZ,
|
2015-12-04 11:30:53 +08:00
|
|
|
NULL, sec) < 0)
|
2014-04-03 07:36:41 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!wsi->pending_timeout)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we went beyond the allowed time, kill the
|
|
|
|
* connection
|
|
|
|
*/
|
2015-11-02 13:10:33 +08:00
|
|
|
if ((time_t)sec > wsi->pending_timeout_limit) {
|
2015-12-04 11:30:53 +08:00
|
|
|
lwsl_info("wsi %p: TIMEDOUT WAITING on %d\n",
|
|
|
|
(void *)wsi, wsi->pending_timeout);
|
2015-04-12 08:17:26 +08:00
|
|
|
/*
|
|
|
|
* Since he failed a timeout, he already had a chance to do
|
|
|
|
* something and was unable to... that includes situations like
|
|
|
|
* half closed connections. So process this "failed timeout"
|
|
|
|
* close as a violent death and don't try to do protocol
|
|
|
|
* cleanup like flush partials.
|
|
|
|
*/
|
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
|
2014-10-08 12:00:53 +08:00
|
|
|
{
|
|
|
|
/* his RX is flowcontrolled, don't send remaining now */
|
|
|
|
if (wsi->rxflow_buffer) {
|
|
|
|
/* rxflow while we were spilling prev rxflow */
|
|
|
|
lwsl_info("stalling in existing rxflow buf\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* a new rxflow, buffer it and warn caller */
|
|
|
|
lwsl_info("new rxflow input buffer len %d\n", len - n);
|
2014-12-04 23:59:35 +01:00
|
|
|
wsi->rxflow_buffer = lws_malloc(len - n);
|
2014-10-08 12:00:53 +08:00
|
|
|
wsi->rxflow_len = len - n;
|
|
|
|
wsi->rxflow_pos = 0;
|
|
|
|
memcpy(wsi->rxflow_buffer, buf + n, len - n);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_service_fd() - Service polled socket with something waiting
|
2014-04-03 07:36:41 +08:00
|
|
|
* @context: Websocket context
|
|
|
|
* @pollfd: The pollfd entry describing the socket fd and which events
|
|
|
|
* happened.
|
|
|
|
*
|
|
|
|
* This function takes a pollfd that has POLLIN or POLLOUT activity and
|
|
|
|
* services it according to the state of the associated
|
2015-12-04 11:08:32 +08:00
|
|
|
* struct lws.
|
2014-04-03 07:36:41 +08:00
|
|
|
*
|
|
|
|
* The one call deals with all "service" that might happen on a socket
|
|
|
|
* including listen accepts, http files as well as websocket protocol.
|
|
|
|
*
|
|
|
|
* If a pollfd says it has something, you can just pass it to
|
2015-12-04 11:08:32 +08:00
|
|
|
* lws_service_fd() whether it is a socket handled by lws or not.
|
2014-04-03 07:36:41 +08:00
|
|
|
* If it sees it is a lws socket, the traffic will be handled and
|
|
|
|
* pollfd->revents will be zeroed now.
|
|
|
|
*
|
|
|
|
* If the socket is foreign to lws, it leaves revents alone. So you can
|
|
|
|
* see if you should service yourself by checking the pollfd revents
|
|
|
|
* after letting lws try to service it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:30:53 +08:00
|
|
|
lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
2015-11-08 12:10:26 +08:00
|
|
|
#if LWS_POSIX
|
2015-12-17 17:03:59 +08:00
|
|
|
int idx = 0;
|
2015-11-08 12:10:26 +08:00
|
|
|
#endif
|
2015-11-02 20:34:12 +08:00
|
|
|
lws_sockfd_type our_fd = 0;
|
2014-04-03 07:36:41 +08:00
|
|
|
struct lws_tokens eff_buf;
|
2015-11-02 13:10:33 +08:00
|
|
|
unsigned int pending = 0;
|
2015-12-06 05:52:09 +08:00
|
|
|
char draining_flow = 0;
|
|
|
|
lws_sockfd_type mfd;
|
|
|
|
int timed_out = 0;
|
|
|
|
struct lws *wsi;
|
|
|
|
time_t now;
|
|
|
|
int n, m;
|
|
|
|
int more;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#if LWS_POSIX
|
2015-12-17 17:03:59 +08:00
|
|
|
if (context->lserv_fd)
|
|
|
|
idx = wsi_from_fd(context, context->lserv_fd)->position_in_fds_table;
|
2015-11-08 12:10:26 +08:00
|
|
|
#endif
|
2015-01-30 10:13:01 +08:00
|
|
|
/*
|
2014-04-03 07:36:41 +08:00
|
|
|
* you can call us with pollfd = NULL to just allow the once-per-second
|
|
|
|
* global timeout checks; if less than a second since the last check
|
|
|
|
* it returns immediately then.
|
|
|
|
*/
|
|
|
|
|
|
|
|
time(&now);
|
|
|
|
|
|
|
|
/* TODO: if using libev, we should probably use timeout watchers... */
|
|
|
|
if (context->last_timeout_check_s != now) {
|
|
|
|
context->last_timeout_check_s = now;
|
|
|
|
|
|
|
|
lws_plat_service_periodic(context);
|
|
|
|
|
|
|
|
/* global timeout check once per second */
|
|
|
|
|
|
|
|
if (pollfd)
|
|
|
|
our_fd = pollfd->fd;
|
|
|
|
|
|
|
|
for (n = 0; n < context->fds_count; n++) {
|
2015-11-02 20:34:12 +08:00
|
|
|
mfd = context->fds[n].fd;
|
|
|
|
wsi = wsi_from_fd(context, mfd);
|
2014-04-03 07:36:41 +08:00
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
if (lws_service_timeout_check(wsi, (unsigned int)now))
|
2014-04-03 07:36:41 +08:00
|
|
|
/* he did time out... */
|
2015-12-06 05:52:09 +08:00
|
|
|
if (mfd == our_fd)
|
2014-04-03 07:36:41 +08:00
|
|
|
/* it was the guy we came to service! */
|
|
|
|
timed_out = 1;
|
2015-10-15 08:34:21 +08:00
|
|
|
/* he's gone, no need to mark as handled */
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the socket we came to service timed out, nothing to do */
|
|
|
|
if (timed_out)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* just here for timeout management? */
|
2015-12-06 05:52:09 +08:00
|
|
|
if (!pollfd)
|
2014-04-03 07:36:41 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* no, here to service a socket descriptor */
|
2015-11-02 20:34:12 +08:00
|
|
|
wsi = wsi_from_fd(context, pollfd->fd);
|
2015-12-06 05:52:09 +08:00
|
|
|
if (!wsi)
|
2014-04-03 07:36:41 +08:00
|
|
|
/* not lws connection ... leave revents alone and return */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* so that caller can tell we handled, past here we need to
|
|
|
|
* zero down pollfd->revents after handling
|
|
|
|
*/
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#if LWS_POSIX
|
2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* deal with listen service piggybacking
|
2015-12-17 17:03:59 +08:00
|
|
|
* every lserv_mod services of other fds, we
|
2014-04-03 07:36:41 +08:00
|
|
|
* sneak one in to service the listen socket if there's anything waiting
|
|
|
|
*
|
|
|
|
* To handle connection storms, as found in ab, if we previously saw a
|
|
|
|
* pending connection here, it causes us to check again next time.
|
|
|
|
*/
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (context->lserv_fd && pollfd != &context->fds[idx]) {
|
|
|
|
context->lserv_count++;
|
|
|
|
if (context->lserv_seen ||
|
|
|
|
context->lserv_count == context->lserv_mod) {
|
|
|
|
context->lserv_count = 0;
|
2014-04-03 07:36:41 +08:00
|
|
|
m = 1;
|
2015-12-17 17:03:59 +08:00
|
|
|
if (context->lserv_seen > 5)
|
2014-04-03 07:36:41 +08:00
|
|
|
m = 2;
|
|
|
|
while (m--) {
|
|
|
|
/*
|
|
|
|
* even with extpoll, we prepared this
|
|
|
|
* internal fds for listen
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
n = lws_poll_listen_fd(&context->fds[idx]);
|
|
|
|
if (n <= 0) {
|
|
|
|
if (context->lserv_seen)
|
|
|
|
context->lserv_seen--;
|
2014-04-03 07:36:41 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-12-17 17:03:59 +08:00
|
|
|
/* there's a conn waiting for us */
|
|
|
|
lws_service_fd(context, &context->fds[idx]);
|
|
|
|
context->lserv_seen++;
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle session socket closed */
|
|
|
|
|
|
|
|
if ((!(pollfd->revents & LWS_POLLIN)) &&
|
|
|
|
(pollfd->revents & LWS_POLLHUP)) {
|
|
|
|
|
|
|
|
lwsl_debug("Session Socket %p (fd=%d) dead\n",
|
|
|
|
(void *)wsi, pollfd->fd);
|
|
|
|
|
|
|
|
goto close_and_handled;
|
|
|
|
}
|
2015-12-17 15:35:41 +08:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
if (pollfd->revents & LWS_POLLOUT)
|
|
|
|
wsi->sock_send_blocking = FALSE;
|
|
|
|
#endif
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#endif
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
/* okay, what we came here to do... */
|
|
|
|
|
|
|
|
switch (wsi->mode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_HTTP_SERVING:
|
|
|
|
case LWSCM_HTTP_SERVING_ACCEPTED:
|
|
|
|
case LWSCM_SERVER_LISTENER:
|
|
|
|
case LWSCM_SSL_ACK_PENDING:
|
2014-04-03 07:36:41 +08:00
|
|
|
n = lws_server_socket_service(context, wsi, pollfd);
|
2015-12-06 05:55:52 +08:00
|
|
|
if (n) /* closed by above */
|
|
|
|
return 1;
|
2015-12-02 15:13:56 -05:00
|
|
|
pending = lws_ssl_pending(wsi);
|
|
|
|
if (pending)
|
|
|
|
goto handle_pending;
|
2014-04-03 07:36:41 +08:00
|
|
|
goto handled;
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_SERVING:
|
|
|
|
case LWSCM_WS_CLIENT:
|
|
|
|
case LWSCM_HTTP2_SERVING:
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
/* the guy requested a callback when it was OK to write */
|
|
|
|
|
|
|
|
if ((pollfd->revents & LWS_POLLOUT) &&
|
2015-12-17 17:03:59 +08:00
|
|
|
(wsi->state == LWSS_ESTABLISHED ||
|
|
|
|
wsi->state == LWSS_HTTP2_ESTABLISHED ||
|
|
|
|
wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
|
|
|
|
wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
|
|
|
|
wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_handle_POLLOUT_event(wsi, pollfd)) {
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
|
|
|
|
wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
|
2015-12-04 08:43:54 +08:00
|
|
|
lwsl_info("lws_service_fd: closing\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
goto close_and_handled;
|
|
|
|
}
|
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
if (wsi->rxflow_buffer &&
|
|
|
|
(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
|
2014-04-03 07:36:41 +08:00
|
|
|
lwsl_info("draining rxflow\n");
|
|
|
|
/* well, drain it */
|
2014-10-08 12:00:53 +08:00
|
|
|
eff_buf.token = (char *)wsi->rxflow_buffer +
|
|
|
|
wsi->rxflow_pos;
|
|
|
|
eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
|
2014-04-03 07:36:41 +08:00
|
|
|
draining_flow = 1;
|
|
|
|
goto drain;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* any incoming data ready? */
|
|
|
|
|
|
|
|
if (!(pollfd->revents & LWS_POLLIN))
|
|
|
|
break;
|
2015-08-19 16:23:33 +02:00
|
|
|
read:
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
eff_buf.token_len = lws_ssl_capable_read(wsi,
|
2015-12-17 17:03:59 +08:00
|
|
|
context->serv_buf,
|
2015-12-05 21:51:47 +08:00
|
|
|
pending ? pending :
|
2015-12-17 17:03:59 +08:00
|
|
|
sizeof(context->serv_buf));
|
2014-04-05 16:48:48 +01:00
|
|
|
switch (eff_buf.token_len) {
|
|
|
|
case 0:
|
|
|
|
lwsl_info("service_fd: closing due to 0 length read\n");
|
|
|
|
goto close_and_handled;
|
2014-07-05 11:25:11 +08:00
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
|
|
|
lwsl_info("SSL Capable more service\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
n = 0;
|
|
|
|
goto handled;
|
2014-07-05 11:25:11 +08:00
|
|
|
case LWS_SSL_CAPABLE_ERROR:
|
|
|
|
lwsl_info("Closing when error\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
goto close_and_handled;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* give any active extensions a chance to munge the buffer
|
|
|
|
* before parse. We pass in a pointer to an lws_tokens struct
|
|
|
|
* prepared with the default buffer and content length that's in
|
|
|
|
* there. Rather than rewrite the default buffer, extensions
|
|
|
|
* that expect to grow the buffer can adapt .token to
|
|
|
|
* point to their own per-connection buffer in the extension
|
|
|
|
* user allocation. By default with no extensions or no
|
|
|
|
* extension callback handling, just the normal input buffer is
|
|
|
|
* used then so it is efficient.
|
|
|
|
*/
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
eff_buf.token = (char *)context->serv_buf;
|
2014-04-03 07:36:41 +08:00
|
|
|
drain:
|
|
|
|
|
|
|
|
do {
|
|
|
|
more = 0;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
m = lws_ext_cb_wsi_active_exts(wsi,
|
2014-04-03 07:36:41 +08:00
|
|
|
LWS_EXT_CALLBACK_PACKET_RX_PREPARSE, &eff_buf, 0);
|
|
|
|
if (m < 0)
|
|
|
|
goto close_and_handled;
|
|
|
|
if (m)
|
|
|
|
more = 1;
|
|
|
|
|
|
|
|
/* service incoming data */
|
|
|
|
|
|
|
|
if (eff_buf.token_len) {
|
2015-12-16 18:19:08 +08:00
|
|
|
n = lws_read(wsi, (unsigned char *)eff_buf.token,
|
|
|
|
eff_buf.token_len);
|
2014-04-03 07:36:41 +08:00
|
|
|
if (n < 0) {
|
|
|
|
/* we closed wsi */
|
|
|
|
n = 0;
|
|
|
|
goto handled;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
} while (more);
|
|
|
|
|
2015-08-19 16:23:33 +02:00
|
|
|
pending = lws_ssl_pending(wsi);
|
|
|
|
if (pending) {
|
2015-12-02 15:13:56 -05:00
|
|
|
handle_pending:
|
2015-12-17 17:03:59 +08:00
|
|
|
pending = pending > sizeof(context->serv_buf) ?
|
|
|
|
sizeof(context->serv_buf) : pending;
|
2015-08-19 16:23:33 +02:00
|
|
|
goto read;
|
|
|
|
}
|
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
if (draining_flow && wsi->rxflow_buffer &&
|
|
|
|
wsi->rxflow_pos == wsi->rxflow_len) {
|
2014-04-03 07:36:41 +08:00
|
|
|
lwsl_info("flow buffer: drained\n");
|
2015-12-17 17:03:59 +08:00
|
|
|
lws_free_set_NULL(wsi->rxflow_buffer);
|
2014-04-03 07:36:41 +08:00
|
|
|
/* having drained the rxflow buffer, can rearm POLLIN */
|
2014-12-10 10:24:33 +08:00
|
|
|
#ifdef LWS_NO_SERVER
|
|
|
|
n =
|
|
|
|
#endif
|
2015-12-06 05:52:09 +08:00
|
|
|
_lws_rx_flow_control(wsi);
|
|
|
|
/* n ignored, needed for NO_SERVER case */
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
#ifdef LWS_NO_CLIENT
|
|
|
|
break;
|
|
|
|
#else
|
|
|
|
n = lws_client_socket_service(context, wsi, pollfd);
|
|
|
|
goto handled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
goto handled;
|
|
|
|
|
|
|
|
close_and_handled:
|
2014-07-05 11:25:11 +08:00
|
|
|
lwsl_debug("Close and handled\n");
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2015-12-05 21:51:47 +08:00
|
|
|
* pollfd may point to something else after the close
|
|
|
|
* due to pollfd swapping scheme on delete on some platforms
|
|
|
|
* we can't clear revents now because it'd be the wrong guy's revents
|
|
|
|
*/
|
2015-10-15 08:34:21 +08:00
|
|
|
return 1;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
handled:
|
|
|
|
pollfd->revents = 0;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_service() - Service any pending websocket activity
|
2014-04-03 07:36:41 +08:00
|
|
|
* @context: Websocket context
|
|
|
|
* @timeout_ms: Timeout for poll; 0 means return immediately if nothing needed
|
|
|
|
* service otherwise block and service immediately, returning
|
|
|
|
* after the timeout if nothing needed service.
|
|
|
|
*
|
|
|
|
* This function deals with any pending websocket traffic, for three
|
|
|
|
* kinds of event. It handles these events on both server and client
|
|
|
|
* types of connection the same.
|
|
|
|
*
|
|
|
|
* 1) Accept new connections to our context's server
|
|
|
|
*
|
|
|
|
* 2) Call the receive callback for incoming frame data received by
|
|
|
|
* server or client connections.
|
|
|
|
*
|
|
|
|
* You need to call this service function periodically to all the above
|
|
|
|
* functions to happen; if your application is single-threaded you can
|
|
|
|
* just call it in your main event loop.
|
|
|
|
*
|
|
|
|
* Alternatively you can fork a new process that asynchronously handles
|
|
|
|
* calling this service in a loop. In that case you are happy if this
|
|
|
|
* call blocks your thread until it needs to take care of something and
|
|
|
|
* would call it with a large nonzero timeout. Your loop then takes no
|
|
|
|
* CPU while there is nothing happening.
|
|
|
|
*
|
|
|
|
* If you are calling it in a single-threaded app, you don't want it to
|
|
|
|
* wait around blocking other things in your loop from happening, so you
|
|
|
|
* would call it with a timeout_ms of 0, so it returns immediately if
|
|
|
|
* nothing is pending, or as soon as it services whatever was pending.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_service(struct lws_context *context, int timeout_ms)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
|
|
|
return lws_plat_service(context, timeout_ms);
|
|
|
|
}
|
|
|
|
|