2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2015-11-02 20:34:12 +08:00
|
|
|
* Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
|
2014-04-03 07:36:41 +08:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "private-libwebsockets.h"
|
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
static int
|
2015-12-17 07:54:44 +08:00
|
|
|
lws_calllback_as_writeable(struct lws *wsi)
|
2014-10-22 15:37:28 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
switch (wsi->mode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_CLIENT:
|
2014-10-22 15:37:28 +08:00
|
|
|
n = LWS_CALLBACK_CLIENT_WRITEABLE;
|
|
|
|
break;
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_SERVING:
|
2014-10-22 15:37:28 +08:00
|
|
|
n = LWS_CALLBACK_SERVER_WRITEABLE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
n = LWS_CALLBACK_HTTP_WRITEABLE;
|
|
|
|
break;
|
|
|
|
}
|
2015-12-26 15:47:06 +08:00
|
|
|
lwsl_debug("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
|
2015-12-17 07:54:44 +08:00
|
|
|
return user_callback_handle_rxflow(wsi->protocol->callback,
|
2015-12-06 05:52:09 +08:00
|
|
|
wsi, (enum lws_callback_reasons) n,
|
|
|
|
wsi->user_space, NULL, 0);
|
2014-10-22 15:37:28 +08:00
|
|
|
}
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
int
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
2015-12-06 05:52:09 +08:00
|
|
|
int write_type = LWS_WRITE_PONG;
|
2014-04-03 07:36:41 +08:00
|
|
|
struct lws_tokens eff_buf;
|
2014-10-22 15:37:28 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2015-12-04 11:08:32 +08:00
|
|
|
struct lws *wsi2;
|
2014-10-22 15:37:28 +08:00
|
|
|
#endif
|
2015-12-06 05:52:09 +08:00
|
|
|
int ret, m, n;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/*
|
|
|
|
* user callback is lowest priority to get these notifications
|
|
|
|
* actually, since other pending things cannot be disordered
|
|
|
|
*/
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 1: pending truncated sends are incomplete ws fragments
|
|
|
|
* If anything else sent first the protocol would be
|
|
|
|
* corrupted.
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->trunc_len) {
|
2016-01-11 11:34:01 +08:00
|
|
|
if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
|
|
|
|
wsi->trunc_len) < 0) {
|
2015-12-15 21:15:58 +08:00
|
|
|
lwsl_info("%s signalling to close\n", __func__);
|
2014-04-10 14:25:24 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
/* leave POLLOUT active either way */
|
|
|
|
return 0;
|
2014-04-10 17:06:59 +08:00
|
|
|
} else
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
|
2014-04-10 17:06:59 +08:00
|
|
|
return -1; /* retry closing now */
|
2015-12-15 21:15:58 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
|
2014-10-08 12:15:15 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 2: protocol packets
|
|
|
|
*/
|
2014-10-08 12:00:53 +08:00
|
|
|
if (wsi->pps) {
|
2014-10-08 12:15:15 +08:00
|
|
|
lwsl_info("servicing pps %d\n", wsi->pps);
|
2014-10-08 12:00:53 +08:00
|
|
|
switch (wsi->pps) {
|
|
|
|
case LWS_PPS_HTTP2_MY_SETTINGS:
|
|
|
|
case LWS_PPS_HTTP2_ACK_SETTINGS:
|
2015-12-17 18:25:25 +08:00
|
|
|
lws_http2_do_pps_send(lws_get_context(wsi), wsi);
|
2014-10-08 12:00:53 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
wsi->pps = LWS_PPS_NONE;
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_rx_flow_control(wsi, 1);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
return 0; /* leave POLLOUT active */
|
|
|
|
}
|
2014-10-08 12:15:15 +08:00
|
|
|
#endif
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 3: pending control packets (pong or close)
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
if ((wsi->state == LWSS_ESTABLISHED &&
|
2015-11-06 18:18:32 +02:00
|
|
|
wsi->u.ws.ping_pending_flag) ||
|
2015-12-17 17:03:59 +08:00
|
|
|
(wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
|
2015-11-06 18:18:32 +02:00
|
|
|
wsi->u.ws.payload_is_close)) {
|
2015-04-26 05:32:03 +08:00
|
|
|
|
|
|
|
if (wsi->u.ws.payload_is_close)
|
|
|
|
write_type = LWS_WRITE_CLOSE;
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
|
2015-12-06 05:52:09 +08:00
|
|
|
wsi->u.ws.ping_payload_len, write_type);
|
2014-08-24 14:39:19 +08:00
|
|
|
if (n < 0)
|
|
|
|
return -1;
|
2015-04-17 20:29:58 +08:00
|
|
|
|
2014-08-24 14:39:19 +08:00
|
|
|
/* well he is sent, mark him done */
|
2015-03-24 21:07:01 +08:00
|
|
|
wsi->u.ws.ping_pending_flag = 0;
|
2015-04-17 20:29:58 +08:00
|
|
|
if (wsi->u.ws.payload_is_close)
|
|
|
|
/* oh... a close frame was it... then we are done */
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* otherwise for PING, leave POLLOUT active either way */
|
2014-08-24 14:39:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 4: if we are closing, not allowed to send more data frags
|
|
|
|
* which means user callback or tx ext flush banned now
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
|
2015-10-16 11:07:52 +08:00
|
|
|
goto user_service;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 5: Tx path extension with more to send
|
|
|
|
*
|
|
|
|
* These are handled as new fragments each time around
|
|
|
|
* So while we must block new writeable callback to enforce
|
|
|
|
* payload ordering, but since they are always complete
|
|
|
|
* fragments control packets can interleave OK.
|
|
|
|
*/
|
|
|
|
if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
|
|
|
|
lwsl_ext("SERVICING TX EXT DRAINING\n");
|
|
|
|
if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
|
|
|
|
return -1;
|
|
|
|
/* leave POLLOUT active */
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* Priority 6: user can get the callback
|
|
|
|
*/
|
|
|
|
m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
|
2016-03-15 21:24:04 +08:00
|
|
|
if (m)
|
|
|
|
return -1;
|
2014-04-03 07:36:41 +08:00
|
|
|
#ifndef LWS_NO_EXTENSIONS
|
2014-12-10 10:28:46 +08:00
|
|
|
if (!wsi->extension_data_pending)
|
2014-04-03 07:36:41 +08:00
|
|
|
goto user_service;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* check in on the active extensions, see if they
|
|
|
|
* had pending stuff to spill... they need to get the
|
|
|
|
* first look-in otherwise sequence will be disordered
|
|
|
|
*
|
|
|
|
* NULL, zero-length eff_buf means just spill pending
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
while (ret == 1) {
|
|
|
|
|
|
|
|
/* default to nobody has more to spill */
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
|
|
|
|
/* give every extension a chance to spill */
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
m = lws_ext_cb_active(wsi,
|
|
|
|
LWS_EXT_CB_PACKET_TX_PRESEND,
|
|
|
|
&eff_buf, 0);
|
2014-04-03 07:36:41 +08:00
|
|
|
if (m < 0) {
|
|
|
|
lwsl_err("ext reports fatal error\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (m)
|
|
|
|
/*
|
|
|
|
* at least one extension told us he has more
|
|
|
|
* to spill, so we will go around again after
|
|
|
|
*/
|
|
|
|
ret = 1;
|
|
|
|
|
|
|
|
/* assuming they gave us something to send, send it */
|
|
|
|
|
|
|
|
if (eff_buf.token_len) {
|
|
|
|
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
2016-01-11 11:34:01 +08:00
|
|
|
eff_buf.token_len);
|
2014-04-10 14:25:24 +08:00
|
|
|
if (n < 0) {
|
|
|
|
lwsl_info("closing from POLLOUT spill\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
return -1;
|
2014-04-10 14:25:24 +08:00
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* Keep amount spilled small to minimize chance of this
|
|
|
|
*/
|
|
|
|
if (n != eff_buf.token_len) {
|
|
|
|
lwsl_err("Unable to spill ext %d vs %s\n",
|
|
|
|
eff_buf.token_len, n);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* no extension has more to spill */
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There's more to spill from an extension, but we just sent
|
|
|
|
* something... did that leave the pipe choked?
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!lws_send_pipe_choked(wsi))
|
|
|
|
/* no we could add more */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
lwsl_info("choked in POLLOUT service\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Yes, he's choked. Leave the POLLOUT masked on so we will
|
|
|
|
* come back here when he is unchoked. Don't call the user
|
|
|
|
* callback to enforce ordering of spilling, he'll get called
|
|
|
|
* when we come back here and there's nothing more to spill.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#ifndef LWS_NO_EXTENSIONS
|
|
|
|
wsi->extension_data_pending = 0;
|
|
|
|
#endif
|
2015-10-16 11:07:52 +08:00
|
|
|
user_service:
|
2014-04-03 07:36:41 +08:00
|
|
|
/* one shot */
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
if (pollfd)
|
2014-10-08 12:00:53 +08:00
|
|
|
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
|
2015-12-16 18:19:08 +08:00
|
|
|
lwsl_info("failed at set pollfd\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
return 1;
|
2014-10-08 12:00:53 +08:00
|
|
|
}
|
2014-04-11 13:14:37 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
#ifdef LWS_USE_HTTP2
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2014-10-22 15:37:28 +08:00
|
|
|
* we are the 'network wsi' for potentially many muxed child wsi with
|
|
|
|
* no network connection of their own, who have to use us for all their
|
|
|
|
* network actions. So we use a round-robin scheme to share out the
|
|
|
|
* POLLOUT notifications to our children.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2014-10-22 15:37:28 +08:00
|
|
|
* But because any child could exhaust the socket's ability to take
|
|
|
|
* writes, we can only let one child get notified each time.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2014-10-22 15:37:28 +08:00
|
|
|
* In addition children may be closed / deleted / added between POLLOUT
|
|
|
|
* notifications, so we can't hold pointers
|
|
|
|
*/
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode != LWSCM_HTTP2_SERVING) {
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: non http2\n", __func__);
|
|
|
|
goto notify;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
wsi->u.http2.requested_POLLOUT = 0;
|
|
|
|
if (!wsi->u.http2.initialized) {
|
|
|
|
lwsl_info("pollout on uninitialized http2 conn\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: doing children\n", __func__);
|
|
|
|
|
|
|
|
wsi2 = wsi;
|
|
|
|
do {
|
|
|
|
wsi2 = wsi2->u.http2.next_child_wsi;
|
|
|
|
lwsl_info("%s: child %p\n", __func__, wsi2);
|
|
|
|
if (!wsi2)
|
|
|
|
continue;
|
|
|
|
if (!wsi2->u.http2.requested_POLLOUT)
|
|
|
|
continue;
|
|
|
|
wsi2->u.http2.requested_POLLOUT = 0;
|
2015-12-17 07:54:44 +08:00
|
|
|
if (lws_calllback_as_writeable(wsi2)) {
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_debug("Closing POLLOUT child\n");
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
|
2014-10-22 15:37:28 +08:00
|
|
|
}
|
|
|
|
wsi2 = wsi;
|
|
|
|
} while (wsi2 != NULL && !lws_send_pipe_choked(wsi));
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: completed\n", __func__);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-22 15:37:28 +08:00
|
|
|
return 0;
|
|
|
|
notify:
|
|
|
|
#endif
|
2015-12-17 07:54:44 +08:00
|
|
|
return lws_calllback_as_writeable(wsi);
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_service_timeout_check(struct lws *wsi, unsigned int sec)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
2016-01-26 20:56:56 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* if extensions want in on it (eg, we are a mux parent)
|
|
|
|
* give them a chance to service child timeouts
|
|
|
|
*/
|
2016-01-19 04:32:14 +08:00
|
|
|
if (lws_ext_cb_active(wsi, LWS_EXT_CB_1HZ, NULL, sec) < 0)
|
2014-04-03 07:36:41 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!wsi->pending_timeout)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we went beyond the allowed time, kill the
|
|
|
|
* connection
|
|
|
|
*/
|
2015-11-02 13:10:33 +08:00
|
|
|
if ((time_t)sec > wsi->pending_timeout_limit) {
|
2016-01-26 20:56:56 +08:00
|
|
|
#if LWS_POSIX
|
|
|
|
lwsl_notice("wsi %p: TIMEDOUT WAITING on %d (did hdr %d, ah %p, wl %d, pfd events %d)\n",
|
|
|
|
(void *)wsi, wsi->pending_timeout,
|
|
|
|
wsi->hdr_parsing_completed, wsi->u.hdr.ah,
|
|
|
|
pt->ah_wait_list_length,
|
|
|
|
pt->fds[wsi->sock].events);
|
|
|
|
#endif
|
2015-04-12 08:17:26 +08:00
|
|
|
/*
|
|
|
|
* Since he failed a timeout, he already had a chance to do
|
|
|
|
* something and was unable to... that includes situations like
|
|
|
|
* half closed connections. So process this "failed timeout"
|
|
|
|
* close as a violent death and don't try to do protocol
|
|
|
|
* cleanup like flush partials.
|
|
|
|
*/
|
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2016-05-03 08:08:32 +08:00
|
|
|
if (wsi->mode == LWSCM_WSCL_WAITING_SSL)
|
|
|
|
wsi->context->protocols[0].callback(wsi,
|
|
|
|
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
|
|
|
wsi->user_space, NULL, 0);
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
|
2014-10-08 12:00:53 +08:00
|
|
|
{
|
|
|
|
/* his RX is flowcontrolled, don't send remaining now */
|
|
|
|
if (wsi->rxflow_buffer) {
|
|
|
|
/* rxflow while we were spilling prev rxflow */
|
|
|
|
lwsl_info("stalling in existing rxflow buf\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* a new rxflow, buffer it and warn caller */
|
|
|
|
lwsl_info("new rxflow input buffer len %d\n", len - n);
|
2014-12-04 23:59:35 +01:00
|
|
|
wsi->rxflow_buffer = lws_malloc(len - n);
|
2016-05-12 21:54:29 +08:00
|
|
|
if (!wsi->rxflow_buffer)
|
|
|
|
return -1;
|
2014-10-08 12:00:53 +08:00
|
|
|
wsi->rxflow_len = len - n;
|
|
|
|
wsi->rxflow_pos = 0;
|
|
|
|
memcpy(wsi->rxflow_buffer, buf + n, len - n);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* this is used by the platform service code to stop us waiting for network
|
|
|
|
* activity in poll() when we have something that already needs service
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
|
|
|
int n;
|
|
|
|
|
|
|
|
/* Figure out if we really want to wait in poll()
|
|
|
|
* We only need to wait if really nothing already to do and we have
|
|
|
|
* to wait for something from network
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* 1) if we know we are draining rx ext, do not wait in poll */
|
|
|
|
if (pt->rx_draining_ext_list)
|
2016-04-06 09:39:48 +08:00
|
|
|
return 0;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
|
|
|
#ifdef LWS_OPENSSL_SUPPORT
|
|
|
|
/* 2) if we know we have non-network pending data, do not wait in poll */
|
|
|
|
if (lws_ssl_anybody_has_buffered_read_tsi(context, tsi)) {
|
2016-04-06 09:39:48 +08:00
|
|
|
lwsl_info("ssl buffered read\n");
|
|
|
|
return 0;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* 3) if any ah has pending rx, do not wait in poll */
|
|
|
|
for (n = 0; n < context->max_http_header_pool; n++)
|
|
|
|
if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen) {
|
|
|
|
/* any ah with pending rx must be attached to someone */
|
|
|
|
assert(pt->ah_pool[n].wsi);
|
2016-04-06 09:39:48 +08:00
|
|
|
return 0;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return timeout_ms;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* guys that need POLLIN service again without waiting for network action
|
|
|
|
* can force POLLIN here if not flowcontrolled, so they will get service.
|
|
|
|
*
|
|
|
|
* Return nonzero if anybody got their POLLIN faked
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
lws_service_flag_pending(struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
|
|
|
#ifdef LWS_OPENSSL_SUPPORT
|
|
|
|
struct lws *wsi_next;
|
|
|
|
#endif
|
|
|
|
struct lws *wsi;
|
|
|
|
int forced = 0;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
/* POLLIN faking */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 1) For all guys with already-available ext data to drain, if they are
|
|
|
|
* not flowcontrolled, fake their POLLIN status
|
|
|
|
*/
|
|
|
|
wsi = pt->rx_draining_ext_list;
|
|
|
|
while (wsi) {
|
|
|
|
pt->fds[wsi->position_in_fds_table].revents |=
|
|
|
|
pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
|
|
|
|
if (pt->fds[wsi->position_in_fds_table].revents &
|
|
|
|
LWS_POLLIN)
|
|
|
|
forced = 1;
|
|
|
|
wsi = wsi->u.ws.rx_draining_ext_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef LWS_OPENSSL_SUPPORT
|
|
|
|
/*
|
|
|
|
* 2) For all guys with buffered SSL read data already saved up, if they
|
|
|
|
* are not flowcontrolled, fake their POLLIN status so they'll get
|
|
|
|
* service to use up the buffered incoming data, even though their
|
|
|
|
* network socket may have nothing
|
|
|
|
*/
|
|
|
|
wsi = pt->pending_read_list;
|
|
|
|
while (wsi) {
|
|
|
|
wsi_next = wsi->pending_read_list_next;
|
|
|
|
pt->fds[wsi->position_in_fds_table].revents |=
|
|
|
|
pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
|
|
|
|
if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
|
|
|
|
forced = 1;
|
|
|
|
/*
|
|
|
|
* he's going to get serviced now, take him off the
|
|
|
|
* list of guys with buffered SSL. If he still has some
|
|
|
|
* at the end of the service, he'll get put back on the
|
|
|
|
* list then.
|
|
|
|
*/
|
|
|
|
lws_ssl_remove_wsi_from_buffered_list(wsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
wsi = wsi_next;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* 3) For any wsi who have an ah with pending RX who did not
|
|
|
|
* complete their current headers, and are not flowcontrolled,
|
|
|
|
* fake their POLLIN status so they will be able to drain the
|
|
|
|
* rx buffered in the ah
|
|
|
|
*/
|
|
|
|
for (n = 0; n < context->max_http_header_pool; n++)
|
|
|
|
if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen &&
|
|
|
|
!pt->ah_pool[n].wsi->hdr_parsing_completed) {
|
|
|
|
pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |=
|
|
|
|
pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events &
|
|
|
|
LWS_POLLIN;
|
|
|
|
if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents &
|
|
|
|
LWS_POLLIN)
|
|
|
|
forced = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return forced;
|
|
|
|
}
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_service_fd() - Service polled socket with something waiting
|
2014-04-03 07:36:41 +08:00
|
|
|
* @context: Websocket context
|
|
|
|
* @pollfd: The pollfd entry describing the socket fd and which events
|
|
|
|
* happened.
|
|
|
|
*
|
|
|
|
* This function takes a pollfd that has POLLIN or POLLOUT activity and
|
|
|
|
* services it according to the state of the associated
|
2015-12-04 11:08:32 +08:00
|
|
|
* struct lws.
|
2014-04-03 07:36:41 +08:00
|
|
|
*
|
|
|
|
* The one call deals with all "service" that might happen on a socket
|
|
|
|
* including listen accepts, http files as well as websocket protocol.
|
|
|
|
*
|
|
|
|
* If a pollfd says it has something, you can just pass it to
|
2015-12-04 11:08:32 +08:00
|
|
|
* lws_service_fd() whether it is a socket handled by lws or not.
|
2014-04-03 07:36:41 +08:00
|
|
|
* If it sees it is a lws socket, the traffic will be handled and
|
|
|
|
* pollfd->revents will be zeroed now.
|
|
|
|
*
|
|
|
|
* If the socket is foreign to lws, it leaves revents alone. So you can
|
|
|
|
* see if you should service yourself by checking the pollfd revents
|
|
|
|
* after letting lws try to service it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2016-01-19 03:34:24 +08:00
|
|
|
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
2016-01-20 17:35:18 +08:00
|
|
|
lws_sockfd_type our_fd = 0, tmp_fd;
|
2014-04-03 07:36:41 +08:00
|
|
|
struct lws_tokens eff_buf;
|
2015-11-02 13:10:33 +08:00
|
|
|
unsigned int pending = 0;
|
2016-01-19 22:34:44 +08:00
|
|
|
struct lws *wsi, *wsi1;
|
2015-12-06 05:52:09 +08:00
|
|
|
char draining_flow = 0;
|
|
|
|
int timed_out = 0;
|
|
|
|
time_t now;
|
|
|
|
int n, m;
|
|
|
|
int more;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
/*
|
2014-04-03 07:36:41 +08:00
|
|
|
* you can call us with pollfd = NULL to just allow the once-per-second
|
|
|
|
* global timeout checks; if less than a second since the last check
|
|
|
|
* it returns immediately then.
|
|
|
|
*/
|
|
|
|
|
|
|
|
time(&now);
|
|
|
|
|
|
|
|
/* TODO: if using libev, we should probably use timeout watchers... */
|
|
|
|
if (context->last_timeout_check_s != now) {
|
|
|
|
context->last_timeout_check_s = now;
|
|
|
|
|
|
|
|
lws_plat_service_periodic(context);
|
|
|
|
|
|
|
|
/* global timeout check once per second */
|
|
|
|
|
|
|
|
if (pollfd)
|
|
|
|
our_fd = pollfd->fd;
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi = context->pt[tsi].timeout_list;
|
2016-01-19 04:32:14 +08:00
|
|
|
while (wsi) {
|
2016-01-19 22:34:44 +08:00
|
|
|
/* we have to take copies, because he may be deleted */
|
|
|
|
wsi1 = wsi->timeout_list;
|
2016-01-20 17:35:18 +08:00
|
|
|
tmp_fd = wsi->sock;
|
2016-01-19 22:34:44 +08:00
|
|
|
if (lws_service_timeout_check(wsi, (unsigned int)now)) {
|
2014-04-03 07:36:41 +08:00
|
|
|
/* he did time out... */
|
2016-01-20 17:35:18 +08:00
|
|
|
if (tmp_fd == our_fd)
|
2014-04-03 07:36:41 +08:00
|
|
|
/* it was the guy we came to service! */
|
|
|
|
timed_out = 1;
|
2015-10-15 08:34:21 +08:00
|
|
|
/* he's gone, no need to mark as handled */
|
2016-01-19 22:34:44 +08:00
|
|
|
}
|
|
|
|
wsi = wsi1;
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
2016-01-27 08:50:31 +08:00
|
|
|
#if 0
|
2016-01-26 20:56:56 +08:00
|
|
|
{
|
|
|
|
char s[300], *p = s;
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++)
|
|
|
|
p += sprintf(p, " %7lu (%5d), ",
|
|
|
|
context->pt[n].count_conns,
|
|
|
|
context->pt[n].fds_count);
|
|
|
|
|
|
|
|
lwsl_notice("load: %s\n", s);
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* the socket we came to service timed out, nothing to do */
|
|
|
|
if (timed_out)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* just here for timeout management? */
|
2015-12-06 05:52:09 +08:00
|
|
|
if (!pollfd)
|
2014-04-03 07:36:41 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* no, here to service a socket descriptor */
|
2015-11-02 20:34:12 +08:00
|
|
|
wsi = wsi_from_fd(context, pollfd->fd);
|
2015-12-06 05:52:09 +08:00
|
|
|
if (!wsi)
|
2014-04-03 07:36:41 +08:00
|
|
|
/* not lws connection ... leave revents alone and return */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* so that caller can tell we handled, past here we need to
|
|
|
|
* zero down pollfd->revents after handling
|
|
|
|
*/
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#if LWS_POSIX
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
/* handle session socket closed */
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
|
2015-12-28 14:24:49 +08:00
|
|
|
(pollfd->revents & LWS_POLLHUP)) {
|
2016-01-27 08:50:31 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2014-04-03 07:36:41 +08:00
|
|
|
lwsl_debug("Session Socket %p (fd=%d) dead\n",
|
|
|
|
(void *)wsi, pollfd->fd);
|
|
|
|
|
|
|
|
goto close_and_handled;
|
|
|
|
}
|
2015-12-17 15:35:41 +08:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
if (pollfd->revents & LWS_POLLOUT)
|
|
|
|
wsi->sock_send_blocking = FALSE;
|
|
|
|
#endif
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#endif
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
/* okay, what we came here to do... */
|
|
|
|
|
|
|
|
switch (wsi->mode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_HTTP_SERVING:
|
|
|
|
case LWSCM_HTTP_SERVING_ACCEPTED:
|
|
|
|
case LWSCM_SERVER_LISTENER:
|
|
|
|
case LWSCM_SSL_ACK_PENDING:
|
2014-04-03 07:36:41 +08:00
|
|
|
n = lws_server_socket_service(context, wsi, pollfd);
|
2015-12-06 05:55:52 +08:00
|
|
|
if (n) /* closed by above */
|
|
|
|
return 1;
|
2014-04-03 07:36:41 +08:00
|
|
|
goto handled;
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSCM_WS_SERVING:
|
|
|
|
case LWSCM_WS_CLIENT:
|
|
|
|
case LWSCM_HTTP2_SERVING:
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* 1: something requested a callback when it was OK to write */
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
if ((pollfd->revents & LWS_POLLOUT) &&
|
2015-12-17 17:03:59 +08:00
|
|
|
(wsi->state == LWSS_ESTABLISHED ||
|
|
|
|
wsi->state == LWSS_HTTP2_ESTABLISHED ||
|
|
|
|
wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
|
|
|
|
wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
|
|
|
|
wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
|
2016-01-11 11:34:01 +08:00
|
|
|
lws_handle_POLLOUT_event(wsi, pollfd)) {
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
|
|
|
|
wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
|
2015-12-04 08:43:54 +08:00
|
|
|
lwsl_info("lws_service_fd: closing\n");
|
2014-04-03 07:36:41 +08:00
|
|
|
goto close_and_handled;
|
|
|
|
}
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
|
|
|
|
wsi->state == LWSS_AWAITING_CLOSE_ACK) {
|
|
|
|
/*
|
|
|
|
* we stopped caring about anything except control
|
|
|
|
* packets. Force flow control off, defeat tx
|
|
|
|
* draining.
|
|
|
|
*/
|
|
|
|
lws_rx_flow_control(wsi, 1);
|
|
|
|
wsi->u.ws.tx_draining_ext = 0;
|
|
|
|
}
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->u.ws.tx_draining_ext) {
|
|
|
|
/* we cannot deal with new RX until the TX ext
|
|
|
|
* path has been drained. It's because new
|
|
|
|
* rx will, eg, crap on the wsi rx buf that
|
|
|
|
* may be needed to retain state.
|
|
|
|
*
|
|
|
|
* TX ext drain path MUST go through event loop
|
|
|
|
* to avoid blocking.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
|
|
|
|
/* We cannot deal with any kind of new RX
|
|
|
|
* because we are RX-flowcontrolled.
|
|
|
|
*/
|
|
|
|
break;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
/* 2: RX Extension needs to be drained
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (wsi->state == LWSS_ESTABLISHED &&
|
|
|
|
wsi->u.ws.rx_draining_ext) {
|
|
|
|
|
|
|
|
lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
|
|
|
|
#ifndef LWS_NO_CLIENT
|
|
|
|
if (wsi->mode == LWSCM_WS_CLIENT) {
|
|
|
|
n = lws_client_rx_sm(wsi, 0);
|
|
|
|
if (n < 0)
|
|
|
|
/* we closed wsi */
|
|
|
|
n = 0;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
n = lws_rx_sm(wsi, 0);
|
|
|
|
|
|
|
|
goto handled;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wsi->u.ws.rx_draining_ext)
|
|
|
|
/*
|
|
|
|
* We have RX EXT content to drain, but can't do it
|
|
|
|
* right now. That means we cannot do anything lower
|
|
|
|
* priority either.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* 3: RX Flowcontrol buffer needs to be drained
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (wsi->rxflow_buffer) {
|
|
|
|
lwsl_info("draining rxflow (len %d)\n",
|
|
|
|
wsi->rxflow_len - wsi->rxflow_pos
|
|
|
|
);
|
2014-04-03 07:36:41 +08:00
|
|
|
/* well, drain it */
|
2014-10-08 12:00:53 +08:00
|
|
|
eff_buf.token = (char *)wsi->rxflow_buffer +
|
|
|
|
wsi->rxflow_pos;
|
|
|
|
eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
|
2014-04-03 07:36:41 +08:00
|
|
|
draining_flow = 1;
|
|
|
|
goto drain;
|
|
|
|
}
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* 4: any incoming (or ah-stashed incoming rx) data ready?
|
2016-01-11 11:34:01 +08:00
|
|
|
* notice if rx flow going off raced poll(), rx flow wins
|
|
|
|
*/
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
|
2014-04-03 07:36:41 +08:00
|
|
|
break;
|
2015-08-19 16:23:33 +02:00
|
|
|
read:
|
2014-04-03 07:36:41 +08:00
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* all the union members start with hdr, so even in ws mode
|
|
|
|
* we can deal with the ah via u.hdr
|
|
|
|
*/
|
|
|
|
if (wsi->u.hdr.ah) {
|
|
|
|
lwsl_err("%s: %p: using inherited ah rx\n", __func__, wsi);
|
|
|
|
eff_buf.token_len = wsi->u.hdr.ah->rxlen -
|
|
|
|
wsi->u.hdr.ah->rxpos;
|
|
|
|
eff_buf.token = (char *)wsi->u.hdr.ah->rx +
|
|
|
|
wsi->u.hdr.ah->rxpos;
|
|
|
|
} else {
|
|
|
|
|
|
|
|
eff_buf.token_len = lws_ssl_capable_read(wsi, pt->serv_buf,
|
|
|
|
pending ? pending : LWS_MAX_SOCKET_IO_BUF);
|
|
|
|
switch (eff_buf.token_len) {
|
|
|
|
case 0:
|
|
|
|
lwsl_info("service_fd: closing due to 0 length read\n");
|
|
|
|
goto close_and_handled;
|
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
|
|
|
lwsl_info("SSL Capable more service\n");
|
|
|
|
n = 0;
|
|
|
|
goto handled;
|
|
|
|
case LWS_SSL_CAPABLE_ERROR:
|
|
|
|
lwsl_info("Closing when error\n");
|
|
|
|
goto close_and_handled;
|
|
|
|
}
|
|
|
|
|
|
|
|
eff_buf.token = (char *)pt->serv_buf;
|
|
|
|
}
|
2014-04-03 07:36:41 +08:00
|
|
|
/*
|
|
|
|
* give any active extensions a chance to munge the buffer
|
|
|
|
* before parse. We pass in a pointer to an lws_tokens struct
|
|
|
|
* prepared with the default buffer and content length that's in
|
|
|
|
* there. Rather than rewrite the default buffer, extensions
|
|
|
|
* that expect to grow the buffer can adapt .token to
|
|
|
|
* point to their own per-connection buffer in the extension
|
|
|
|
* user allocation. By default with no extensions or no
|
|
|
|
* extension callback handling, just the normal input buffer is
|
|
|
|
* used then so it is efficient.
|
|
|
|
*/
|
2016-01-29 21:18:54 +08:00
|
|
|
drain:
|
2014-04-03 07:36:41 +08:00
|
|
|
do {
|
|
|
|
more = 0;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
|
|
|
|
&eff_buf, 0);
|
2014-04-03 07:36:41 +08:00
|
|
|
if (m < 0)
|
|
|
|
goto close_and_handled;
|
|
|
|
if (m)
|
|
|
|
more = 1;
|
|
|
|
|
|
|
|
/* service incoming data */
|
|
|
|
|
|
|
|
if (eff_buf.token_len) {
|
2015-12-28 14:24:49 +08:00
|
|
|
/*
|
|
|
|
* if draining from rxflow buffer, not
|
|
|
|
* critical to track what was used since at the
|
|
|
|
* use it bumps wsi->rxflow_pos. If we come
|
|
|
|
* around again it will pick up from where it
|
|
|
|
* left off.
|
|
|
|
*/
|
2015-12-16 18:19:08 +08:00
|
|
|
n = lws_read(wsi, (unsigned char *)eff_buf.token,
|
|
|
|
eff_buf.token_len);
|
2014-04-03 07:36:41 +08:00
|
|
|
if (n < 0) {
|
|
|
|
/* we closed wsi */
|
|
|
|
n = 0;
|
|
|
|
goto handled;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
} while (more);
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
if (wsi->u.hdr.ah) {
|
|
|
|
lwsl_err("%s: %p: detaching inherited used ah\n", __func__, wsi);
|
|
|
|
/* show we used all the pending rx up */
|
|
|
|
wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
|
|
|
|
/* we can run the normal ah detach flow despite
|
|
|
|
* being in ws union mode, since all union members
|
|
|
|
* start with hdr */
|
|
|
|
lws_header_table_detach(wsi);
|
|
|
|
}
|
|
|
|
|
2015-08-19 16:23:33 +02:00
|
|
|
pending = lws_ssl_pending(wsi);
|
|
|
|
if (pending) {
|
2016-01-19 03:34:24 +08:00
|
|
|
pending = pending > LWS_MAX_SOCKET_IO_BUF ?
|
|
|
|
LWS_MAX_SOCKET_IO_BUF : pending;
|
2015-08-19 16:23:33 +02:00
|
|
|
goto read;
|
|
|
|
}
|
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
if (draining_flow && wsi->rxflow_buffer &&
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
wsi->rxflow_pos == wsi->rxflow_len) {
|
2014-04-03 07:36:41 +08:00
|
|
|
lwsl_info("flow buffer: drained\n");
|
2015-12-17 17:03:59 +08:00
|
|
|
lws_free_set_NULL(wsi->rxflow_buffer);
|
2014-04-03 07:36:41 +08:00
|
|
|
/* having drained the rxflow buffer, can rearm POLLIN */
|
2014-12-10 10:24:33 +08:00
|
|
|
#ifdef LWS_NO_SERVER
|
|
|
|
n =
|
|
|
|
#endif
|
2015-12-06 05:52:09 +08:00
|
|
|
_lws_rx_flow_control(wsi);
|
|
|
|
/* n ignored, needed for NO_SERVER case */
|
2014-04-03 07:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
#ifdef LWS_NO_CLIENT
|
|
|
|
break;
|
|
|
|
#else
|
|
|
|
n = lws_client_socket_service(context, wsi, pollfd);
|
2016-01-14 15:17:28 +08:00
|
|
|
if (n)
|
|
|
|
return 1;
|
2014-04-03 07:36:41 +08:00
|
|
|
goto handled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
goto handled;
|
|
|
|
|
|
|
|
close_and_handled:
|
2014-07-05 11:25:11 +08:00
|
|
|
lwsl_debug("Close and handled\n");
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2015-12-05 21:51:47 +08:00
|
|
|
* pollfd may point to something else after the close
|
|
|
|
* due to pollfd swapping scheme on delete on some platforms
|
|
|
|
* we can't clear revents now because it'd be the wrong guy's revents
|
|
|
|
*/
|
2015-10-15 08:34:21 +08:00
|
|
|
return 1;
|
2014-04-03 07:36:41 +08:00
|
|
|
|
|
|
|
handled:
|
|
|
|
pollfd->revents = 0;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2016-01-19 03:34:24 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
|
|
|
|
{
|
|
|
|
return lws_service_fd_tsi(context, pollfd, 0);
|
|
|
|
}
|
|
|
|
|
2014-04-03 07:36:41 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_service() - Service any pending websocket activity
|
2014-04-03 07:36:41 +08:00
|
|
|
* @context: Websocket context
|
|
|
|
* @timeout_ms: Timeout for poll; 0 means return immediately if nothing needed
|
|
|
|
* service otherwise block and service immediately, returning
|
|
|
|
* after the timeout if nothing needed service.
|
|
|
|
*
|
|
|
|
* This function deals with any pending websocket traffic, for three
|
|
|
|
* kinds of event. It handles these events on both server and client
|
|
|
|
* types of connection the same.
|
|
|
|
*
|
|
|
|
* 1) Accept new connections to our context's server
|
|
|
|
*
|
|
|
|
* 2) Call the receive callback for incoming frame data received by
|
|
|
|
* server or client connections.
|
|
|
|
*
|
|
|
|
* You need to call this service function periodically to all the above
|
|
|
|
* functions to happen; if your application is single-threaded you can
|
|
|
|
* just call it in your main event loop.
|
|
|
|
*
|
|
|
|
* Alternatively you can fork a new process that asynchronously handles
|
|
|
|
* calling this service in a loop. In that case you are happy if this
|
|
|
|
* call blocks your thread until it needs to take care of something and
|
|
|
|
* would call it with a large nonzero timeout. Your loop then takes no
|
|
|
|
* CPU while there is nothing happening.
|
|
|
|
*
|
|
|
|
* If you are calling it in a single-threaded app, you don't want it to
|
|
|
|
* wait around blocking other things in your loop from happening, so you
|
|
|
|
* would call it with a timeout_ms of 0, so it returns immediately if
|
|
|
|
* nothing is pending, or as soon as it services whatever was pending.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_service(struct lws_context *context, int timeout_ms)
|
2014-04-03 07:36:41 +08:00
|
|
|
{
|
|
|
|
return lws_plat_service(context, timeout_ms);
|
|
|
|
}
|
|
|
|
|
2016-01-19 03:34:24 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|
|
|
{
|
|
|
|
return lws_plat_service_tsi(context, timeout_ms, tsi);
|
|
|
|
}
|
|
|
|
|