2011-03-07 17:54:06 +00:00
|
|
|
/*
|
2010-11-08 17:12:19 +00:00
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
2010-12-19 22:13:26 +00:00
|
|
|
*
|
2016-01-26 20:56:56 +08:00
|
|
|
* Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
|
2010-11-08 17:12:19 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
2010-10-31 17:51:39 +00:00
|
|
|
*/
|
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
#include "private-libwebsockets.h"
|
2010-10-28 22:36:01 +01:00
|
|
|
|
2016-03-01 07:33:56 +08:00
|
|
|
#ifdef LWS_HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
int log_level = LLL_ERR | LLL_WARN | LLL_NOTICE;
|
2013-01-19 11:32:18 +08:00
|
|
|
static void (*lwsl_emit)(int level, const char *line) = lwsl_emit_stderr;
|
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
static const char * const log_level_names[] = {
|
2013-01-10 19:50:35 +08:00
|
|
|
"ERR",
|
|
|
|
"WARN",
|
2013-01-19 12:18:07 +08:00
|
|
|
"NOTICE",
|
2013-01-10 19:50:35 +08:00
|
|
|
"INFO",
|
|
|
|
"DEBUG",
|
|
|
|
"PARSER",
|
|
|
|
"HEADER",
|
|
|
|
"EXTENSION",
|
|
|
|
"CLIENT",
|
2013-01-29 12:36:17 +08:00
|
|
|
"LATENCY",
|
2013-01-10 19:50:35 +08:00
|
|
|
};
|
|
|
|
|
2015-06-25 17:51:07 +02:00
|
|
|
void
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_free_wsi(struct lws *wsi)
|
2015-06-25 17:51:07 +02:00
|
|
|
{
|
|
|
|
if (!wsi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Protocol user data may be allocated either internally by lws
|
2015-12-04 16:54:12 +08:00
|
|
|
* or by specified the user.
|
|
|
|
* We should only free what we allocated. */
|
|
|
|
if (wsi->protocol && wsi->protocol->per_session_data_size &&
|
|
|
|
wsi->user_space && !wsi->user_space_externally_allocated)
|
2015-06-25 17:51:07 +02:00
|
|
|
lws_free(wsi->user_space);
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
lws_free_set_NULL(wsi->rxflow_buffer);
|
|
|
|
lws_free_set_NULL(wsi->trunc_alloc);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-02-25 21:55:06 +08:00
|
|
|
if (wsi->u.hdr.ah)
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* we're closing, losing some rx is OK */
|
|
|
|
wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
|
2016-02-25 21:55:06 +08:00
|
|
|
|
|
|
|
/* we may not have an ah, but may be on the waiting list... */
|
|
|
|
lws_header_table_detach(wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
wsi->context->count_wsi_allocated--;
|
|
|
|
lwsl_debug("%s: %p, remaining wsi %d\n", __func__, wsi,
|
|
|
|
wsi->context->count_wsi_allocated);
|
|
|
|
|
2015-06-25 17:51:07 +02:00
|
|
|
lws_free(wsi);
|
|
|
|
}
|
|
|
|
|
2016-01-19 04:32:14 +08:00
|
|
|
static void
|
|
|
|
lws_remove_from_timeout_list(struct lws *wsi)
|
|
|
|
{
|
2016-01-26 20:56:56 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
2016-02-25 21:54:31 +08:00
|
|
|
if (!wsi->timeout_list_prev) /* ie, not part of the list */
|
2016-01-19 04:32:14 +08:00
|
|
|
return;
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_lock(pt);
|
2016-02-25 21:54:31 +08:00
|
|
|
/* if we have a next guy, set his prev to our prev */
|
2016-01-19 23:11:39 +08:00
|
|
|
if (wsi->timeout_list)
|
|
|
|
wsi->timeout_list->timeout_list_prev = wsi->timeout_list_prev;
|
2016-02-25 21:54:31 +08:00
|
|
|
/* set our prev guy to our next guy instead of us */
|
2016-01-19 04:32:14 +08:00
|
|
|
*wsi->timeout_list_prev = wsi->timeout_list;
|
2016-01-19 23:11:39 +08:00
|
|
|
|
2016-02-25 21:54:31 +08:00
|
|
|
/* we're out of the list, we should not point anywhere any more */
|
2016-01-19 04:32:14 +08:00
|
|
|
wsi->timeout_list_prev = NULL;
|
|
|
|
wsi->timeout_list = NULL;
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_unlock(pt);
|
2016-01-19 04:32:14 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
/**
|
|
|
|
* lws_set_timeout() - marks the wsi as subject to a timeout
|
|
|
|
*
|
|
|
|
* You will not need this unless you are doing something special
|
|
|
|
*
|
|
|
|
* @wsi: Websocket connection instance
|
|
|
|
* @reason: timeout reason
|
|
|
|
* @secs: how many seconds
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
time_t now;
|
|
|
|
|
|
|
|
lws_pt_lock(pt);
|
|
|
|
|
|
|
|
time(&now);
|
|
|
|
|
2016-02-25 21:54:31 +08:00
|
|
|
if (reason && !wsi->timeout_list_prev) {
|
|
|
|
/* our next guy is current first guy */
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->timeout_list = pt->timeout_list;
|
2016-02-25 21:54:31 +08:00
|
|
|
/* if there is a next guy, set his prev ptr to our next ptr */
|
2016-01-26 20:56:56 +08:00
|
|
|
if (wsi->timeout_list)
|
|
|
|
wsi->timeout_list->timeout_list_prev = &wsi->timeout_list;
|
2016-02-25 21:54:31 +08:00
|
|
|
/* our prev ptr is first ptr */
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->timeout_list_prev = &pt->timeout_list;
|
2016-02-25 21:54:31 +08:00
|
|
|
/* set the first guy to be us */
|
2016-01-26 20:56:56 +08:00
|
|
|
*wsi->timeout_list_prev = wsi;
|
|
|
|
}
|
|
|
|
|
|
|
|
wsi->pending_timeout_limit = now + secs;
|
|
|
|
wsi->pending_timeout = reason;
|
|
|
|
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
if (!reason)
|
|
|
|
lws_remove_from_timeout_list(wsi);
|
|
|
|
}
|
2016-01-19 04:32:14 +08:00
|
|
|
|
2010-12-19 22:13:26 +00:00
|
|
|
void
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason)
|
2010-11-03 11:13:06 +00:00
|
|
|
{
|
2016-01-19 21:32:08 +08:00
|
|
|
struct lws_context *context;
|
|
|
|
struct lws_context_per_thread *pt;
|
2016-01-26 20:56:56 +08:00
|
|
|
int n, m, ret;
|
2011-03-07 07:08:18 +00:00
|
|
|
struct lws_tokens eff_buf;
|
2010-12-18 15:13:50 +00:00
|
|
|
|
2011-02-14 08:03:48 +00:00
|
|
|
if (!wsi)
|
2010-12-18 15:13:50 +00:00
|
|
|
return;
|
|
|
|
|
2016-01-19 21:32:08 +08:00
|
|
|
context = wsi->context;
|
|
|
|
pt = &context->pt[(int)wsi->tsi];
|
2010-11-03 11:13:06 +00:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode == LWSCM_HTTP_SERVING_ACCEPTED &&
|
2015-12-04 16:54:12 +08:00
|
|
|
wsi->u.http.fd != LWS_INVALID_FILE) {
|
|
|
|
lwsl_debug("closing http file\n");
|
2015-12-11 13:12:58 +08:00
|
|
|
lws_plat_file_close(wsi, wsi->u.http.fd);
|
2015-12-04 16:54:12 +08:00
|
|
|
wsi->u.http.fd = LWS_INVALID_FILE;
|
2015-12-17 07:54:44 +08:00
|
|
|
context->protocols[0].callback(wsi, LWS_CALLBACK_CLOSED_HTTP,
|
|
|
|
wsi->user_space, NULL, 0);
|
2015-12-04 16:54:12 +08:00
|
|
|
}
|
2015-04-12 08:17:26 +08:00
|
|
|
if (wsi->socket_is_permanently_unusable ||
|
2016-01-26 20:56:56 +08:00
|
|
|
reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY ||
|
|
|
|
wsi->state == LWSS_SHUTDOWN)
|
2014-10-16 08:23:46 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->state_pre_close = wsi->state;
|
|
|
|
|
|
|
|
switch (wsi->state_pre_close) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSS_DEAD_SOCKET:
|
2011-02-10 09:07:05 +00:00
|
|
|
return;
|
|
|
|
|
2013-02-15 22:48:58 +08:00
|
|
|
/* we tried the polite way... */
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSS_AWAITING_CLOSE_ACK:
|
2013-02-15 22:48:58 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE:
|
|
|
|
if (wsi->trunc_len) {
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
2014-04-10 14:25:24 +08:00
|
|
|
return;
|
|
|
|
}
|
2015-12-17 17:03:59 +08:00
|
|
|
lwsl_info("wsi %p completed LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE\n", wsi);
|
2014-04-10 14:25:24 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
default:
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->trunc_len) {
|
|
|
|
lwsl_info("wsi %p entering LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE\n", wsi);
|
|
|
|
wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_set_timeout(wsi, PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5);
|
2014-04-10 14:25:24 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode == LWSCM_WSCL_WAITING_CONNECT ||
|
|
|
|
wsi->mode == LWSCM_WSCL_ISSUE_HANDSHAKE)
|
2013-09-20 20:26:12 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode == LWSCM_HTTP_SERVING)
|
2015-12-17 07:54:44 +08:00
|
|
|
context->protocols[0].callback(wsi, LWS_CALLBACK_CLOSED_HTTP,
|
|
|
|
wsi->user_space, NULL, 0);
|
2015-01-28 04:15:13 +08:00
|
|
|
|
2011-05-25 21:41:57 +01:00
|
|
|
/*
|
|
|
|
* are his extensions okay with him closing? Eg he might be a mux
|
|
|
|
* parent and just his ch1 aspect is closing?
|
|
|
|
*/
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (lws_ext_cb_active(wsi,
|
|
|
|
LWS_EXT_CB_CHECK_OK_TO_REALLY_CLOSE, NULL, 0) > 0) {
|
2014-04-02 21:02:54 +08:00
|
|
|
lwsl_ext("extension vetoed close\n");
|
|
|
|
return;
|
2011-05-25 21:41:57 +01:00
|
|
|
}
|
|
|
|
|
2011-03-07 07:08:18 +00:00
|
|
|
/*
|
|
|
|
* flush any tx pending from extensions, since we may send close packet
|
|
|
|
* if there are problems with send, just nuke the connection
|
|
|
|
*/
|
|
|
|
|
2014-04-02 21:02:54 +08:00
|
|
|
do {
|
2011-03-07 07:08:18 +00:00
|
|
|
ret = 0;
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
|
|
|
|
/* show every extension the new incoming data */
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
m = lws_ext_cb_active(wsi,
|
|
|
|
LWS_EXT_CB_FLUSH_PENDING_TX, &eff_buf, 0);
|
2014-04-02 21:02:54 +08:00
|
|
|
if (m < 0) {
|
|
|
|
lwsl_ext("Extension reports fatal error\n");
|
|
|
|
goto just_kill_connection;
|
2011-03-07 07:08:18 +00:00
|
|
|
}
|
2014-04-02 21:02:54 +08:00
|
|
|
if (m)
|
|
|
|
/*
|
|
|
|
* at least one extension told us he has more
|
|
|
|
* to spill, so we will go around again after
|
|
|
|
*/
|
|
|
|
ret = 1;
|
2011-03-07 07:08:18 +00:00
|
|
|
|
|
|
|
/* assuming they left us something to send, send it */
|
|
|
|
|
|
|
|
if (eff_buf.token_len)
|
|
|
|
if (lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
2015-12-04 16:54:12 +08:00
|
|
|
eff_buf.token_len) !=
|
|
|
|
eff_buf.token_len) {
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_debug("close: ext spill failed\n");
|
2011-03-07 07:08:18 +00:00
|
|
|
goto just_kill_connection;
|
2013-01-17 14:46:43 +08:00
|
|
|
}
|
2014-04-02 21:02:54 +08:00
|
|
|
} while (ret);
|
2011-03-07 07:08:18 +00:00
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
2015-12-04 08:43:54 +08:00
|
|
|
* signal we are closing, lws_write will
|
2011-03-07 07:08:12 +00:00
|
|
|
* add any necessary version-specific stuff. If the write fails,
|
|
|
|
* no worries we are closing anyway. If we didn't initiate this
|
|
|
|
* close, then our state has been changed to
|
2015-12-17 17:03:59 +08:00
|
|
|
* LWSS_RETURNED_CLOSE_ALREADY and we will skip this.
|
2011-03-07 07:08:12 +00:00
|
|
|
*
|
|
|
|
* Likewise if it's a second call to close this connection after we
|
|
|
|
* sent the close indication to the peer already, we are in state
|
2015-12-17 17:03:59 +08:00
|
|
|
* LWSS_AWAITING_CLOSE_ACK and will skip doing this a second time.
|
2011-03-07 07:08:12 +00:00
|
|
|
*/
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
if (wsi->state_pre_close == LWSS_ESTABLISHED &&
|
2015-12-26 17:20:34 +08:00
|
|
|
(wsi->u.ws.close_in_ping_buffer_len || /* already a reason */
|
|
|
|
(reason != LWS_CLOSE_STATUS_NOSTATUS &&
|
|
|
|
(reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY)))) {
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_debug("sending close indication...\n");
|
2015-12-26 17:20:34 +08:00
|
|
|
|
|
|
|
/* if no prepared close reason, use 1000 and no aux data */
|
|
|
|
if (!wsi->u.ws.close_in_ping_buffer_len) {
|
|
|
|
wsi->u.ws.close_in_ping_buffer_len = 2;
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.ping_payload_buf[LWS_PRE] =
|
2017-02-14 23:26:43 +08:00
|
|
|
(reason >> 8) & 0xff;
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.ping_payload_buf[LWS_PRE + 1] =
|
2015-12-26 17:20:34 +08:00
|
|
|
reason & 0xff;
|
|
|
|
}
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
|
2015-12-26 17:20:34 +08:00
|
|
|
wsi->u.ws.close_in_ping_buffer_len,
|
|
|
|
LWS_WRITE_CLOSE);
|
2013-02-23 10:50:10 +08:00
|
|
|
if (n >= 0) {
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
|
|
|
* we have sent a nice protocol level indication we
|
|
|
|
* now wish to close, we should not send anything more
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->state = LWSS_AWAITING_CLOSE_ACK;
|
2011-03-07 07:08:12 +00:00
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
/*
|
|
|
|
* ...and we should wait for a reply for a bit
|
|
|
|
* out of politeness
|
|
|
|
*/
|
2015-12-04 16:54:12 +08:00
|
|
|
lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 1);
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_debug("sent close indication, awaiting ack\n");
|
2011-03-07 07:08:12 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_info("close: sending close packet failed, hanging up\n");
|
2013-01-17 14:46:43 +08:00
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/* else, the send failed and we should just hang up */
|
|
|
|
}
|
|
|
|
|
2011-03-07 07:08:18 +00:00
|
|
|
just_kill_connection:
|
2011-05-24 22:07:45 +01:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
#if LWS_POSIX
|
|
|
|
/*
|
|
|
|
* Testing with ab shows that we have to stage the socket close when
|
|
|
|
* the system is under stress... shutdown any further TX, change the
|
|
|
|
* state to one that won't emit anything more, and wait with a timeout
|
|
|
|
* for the POLLIN to show a zero-size rx before coming back and doing
|
|
|
|
* the actual close.
|
|
|
|
*/
|
2016-02-14 09:27:41 +08:00
|
|
|
if (wsi->state != LWSS_SHUTDOWN &&
|
|
|
|
reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY &&
|
|
|
|
!wsi->socket_is_permanently_unusable) {
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info("%s: shutting down connection: %p\n", __func__, wsi);
|
|
|
|
n = shutdown(wsi->sock, SHUT_WR);
|
|
|
|
if (n)
|
|
|
|
lwsl_debug("closing: shutdown ret %d\n", LWS_ERRNO);
|
2016-02-24 21:32:31 +08:00
|
|
|
|
2016-03-01 07:33:56 +08:00
|
|
|
// This causes problems with disconnection when the events are half closing connection
|
|
|
|
// FD_READ | FD_CLOSE (33)
|
|
|
|
#ifndef _WIN32_WCE
|
2016-02-29 11:11:48 +08:00
|
|
|
/* libuv: no event available to guarantee completion */
|
|
|
|
if (!LWS_LIBUV_ENABLED(context)) {
|
|
|
|
|
|
|
|
lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
|
|
|
|
wsi->state = LWSS_SHUTDOWN;
|
|
|
|
lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH,
|
|
|
|
context->timeout_secs);
|
|
|
|
return;
|
|
|
|
}
|
2016-03-01 07:33:56 +08:00
|
|
|
#endif
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
lwsl_info("%s: real just_kill_connection: %p\n", __func__, wsi);
|
2011-05-24 22:07:45 +01:00
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
|
|
|
* we won't be servicing or receiving anything further from this guy
|
2013-01-17 12:26:48 +08:00
|
|
|
* delete socket from the internal poll list if still present
|
2011-03-07 07:08:12 +00:00
|
|
|
*/
|
2015-12-15 21:15:58 +08:00
|
|
|
lws_ssl_remove_wsi_from_buffered_list(wsi);
|
2016-01-19 04:32:14 +08:00
|
|
|
lws_remove_from_timeout_list(wsi);
|
2015-01-29 08:36:18 +08:00
|
|
|
|
2015-12-04 16:54:12 +08:00
|
|
|
/* checking return redundant since we anyway close */
|
2015-12-15 21:15:58 +08:00
|
|
|
remove_wsi_socket_from_fds(wsi);
|
2011-02-14 08:03:48 +00:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->state = LWSS_DEAD_SOCKET;
|
2014-12-05 00:09:20 +01:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
lws_free_set_NULL(wsi->rxflow_buffer);
|
2014-12-05 00:09:20 +01:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
if (wsi->state_pre_close == LWSS_ESTABLISHED ||
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->mode == LWSCM_WS_SERVING ||
|
|
|
|
wsi->mode == LWSCM_WS_CLIENT) {
|
2013-02-12 12:52:39 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->u.ws.rx_draining_ext) {
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws **w = &pt->rx_draining_ext_list;
|
2016-01-11 11:34:01 +08:00
|
|
|
|
|
|
|
wsi->u.ws.rx_draining_ext = 0;
|
|
|
|
/* remove us from context draining ext list */
|
|
|
|
while (*w) {
|
|
|
|
if (*w == wsi) {
|
|
|
|
*w = wsi->u.ws.rx_draining_ext_list;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
w = &((*w)->u.ws.rx_draining_ext_list);
|
|
|
|
}
|
|
|
|
wsi->u.ws.rx_draining_ext_list = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wsi->u.ws.tx_draining_ext) {
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws **w = &pt->tx_draining_ext_list;
|
2016-01-11 11:34:01 +08:00
|
|
|
|
|
|
|
wsi->u.ws.tx_draining_ext = 0;
|
|
|
|
/* remove us from context draining ext list */
|
|
|
|
while (*w) {
|
|
|
|
if (*w == wsi) {
|
|
|
|
*w = wsi->u.ws.tx_draining_ext_list;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
w = &((*w)->u.ws.tx_draining_ext_list);
|
|
|
|
}
|
|
|
|
wsi->u.ws.tx_draining_ext_list = NULL;
|
|
|
|
}
|
|
|
|
lws_free_set_NULL(wsi->u.ws.rx_ubuf);
|
2014-10-08 12:00:53 +08:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->trunc_alloc)
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
/* not going to be completed... nuke it */
|
2015-12-17 17:03:59 +08:00
|
|
|
lws_free_set_NULL(wsi->trunc_alloc);
|
2015-12-04 16:54:12 +08:00
|
|
|
|
2015-12-25 13:14:09 +08:00
|
|
|
wsi->u.ws.ping_payload_len = 0;
|
|
|
|
wsi->u.ws.ping_pending_flag = 0;
|
2013-02-06 21:10:16 +09:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:03:48 +00:00
|
|
|
/* tell the user it's all over for this guy */
|
|
|
|
|
2011-02-28 07:45:29 +00:00
|
|
|
if (wsi->protocol && wsi->protocol->callback &&
|
2016-01-26 20:56:56 +08:00
|
|
|
((wsi->state_pre_close == LWSS_ESTABLISHED) ||
|
|
|
|
(wsi->state_pre_close == LWSS_RETURNED_CLOSE_ALREADY) ||
|
|
|
|
(wsi->state_pre_close == LWSS_AWAITING_CLOSE_ACK) ||
|
2016-02-24 19:18:46 +08:00
|
|
|
(wsi->state_pre_close == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) ||
|
|
|
|
(wsi->mode == LWSCM_WS_CLIENT && wsi->state_pre_close == LWSS_HTTP) ||
|
|
|
|
(wsi->mode == LWSCM_WS_SERVING && wsi->state_pre_close == LWSS_HTTP))) {
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_debug("calling back CLOSED\n");
|
2015-12-17 07:54:44 +08:00
|
|
|
wsi->protocol->callback(wsi, LWS_CALLBACK_CLOSED,
|
2015-12-04 16:54:12 +08:00
|
|
|
wsi->user_space, NULL, 0);
|
2015-12-17 17:03:59 +08:00
|
|
|
} else if (wsi->mode == LWSCM_HTTP_SERVING_ACCEPTED) {
|
2013-06-29 10:16:18 +08:00
|
|
|
lwsl_debug("calling back CLOSED_HTTP\n");
|
2015-12-17 07:54:44 +08:00
|
|
|
context->protocols[0].callback(wsi, LWS_CALLBACK_CLOSED_HTTP,
|
|
|
|
wsi->user_space, NULL, 0 );
|
2015-12-17 17:03:59 +08:00
|
|
|
} else if (wsi->mode == LWSCM_WSCL_WAITING_SERVER_REPLY ||
|
|
|
|
wsi->mode == LWSCM_WSCL_WAITING_CONNECT) {
|
2016-06-13 08:43:03 +02:00
|
|
|
char* errorString;
|
|
|
|
|
2015-04-16 19:55:42 +08:00
|
|
|
lwsl_debug("Connection closed before server reply\n");
|
2017-01-04 14:38:39 +08:00
|
|
|
errorString = NULL;
|
|
|
|
if (wsi->u.hdr.ah)
|
|
|
|
errorString = lws_hdr_simple_ptr(wsi, WSI_TOKEN_HTTP);
|
2016-06-13 08:43:03 +02:00
|
|
|
if (errorString) {
|
|
|
|
context->protocols[0].callback(wsi,
|
|
|
|
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
|
|
|
wsi->user_space, errorString,
|
|
|
|
(unsigned int)strlen(errorString));
|
|
|
|
} else {
|
|
|
|
context->protocols[0].callback(wsi,
|
2015-12-04 16:54:12 +08:00
|
|
|
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
|
|
|
wsi->user_space, NULL, 0);
|
2016-06-13 08:43:03 +02:00
|
|
|
}
|
2011-11-07 19:53:23 +08:00
|
|
|
} else
|
2015-04-16 19:55:42 +08:00
|
|
|
lwsl_debug("not calling back closed mode=%d state=%d\n",
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->mode, wsi->state_pre_close);
|
2010-11-03 11:13:06 +00:00
|
|
|
|
2011-03-06 10:29:38 +00:00
|
|
|
/* deallocate any active extension contexts */
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (lws_ext_cb_active(wsi, LWS_EXT_CB_DESTROY, NULL, 0) < 0)
|
2014-04-02 21:02:54 +08:00
|
|
|
lwsl_warn("extension destruction failed\n");
|
2011-05-23 10:00:03 +01:00
|
|
|
/*
|
|
|
|
* inform all extensions in case they tracked this guy out of band
|
|
|
|
* even though not active on him specifically
|
|
|
|
*/
|
2015-12-17 17:03:59 +08:00
|
|
|
if (lws_ext_cb_all_exts(context, wsi,
|
2016-01-11 11:34:01 +08:00
|
|
|
LWS_EXT_CB_DESTROY_ANY_WSI_CLOSING, NULL, 0) < 0)
|
2014-04-02 21:02:54 +08:00
|
|
|
lwsl_warn("ext destroy wsi failed\n");
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
#ifdef LWS_USE_LIBUV
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
if (LWS_LIBUV_ENABLED(context)) {
|
|
|
|
/* libuv has to do his own close handle processing asynchronously */
|
|
|
|
lws_libuv_closehandle(wsi);
|
2016-02-14 09:27:41 +08:00
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
lws_close_free_wsi_final(wsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_close_free_wsi_final(struct lws *wsi)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2015-11-14 13:48:58 +08:00
|
|
|
if (!lws_ssl_close(wsi) && lws_socket_is_valid(wsi->sock)) {
|
2015-11-02 20:34:12 +08:00
|
|
|
#if LWS_POSIX
|
2014-04-12 10:07:02 +08:00
|
|
|
n = compatible_close(wsi->sock);
|
|
|
|
if (n)
|
|
|
|
lwsl_debug("closing: close ret %d\n", LWS_ERRNO);
|
2015-11-02 20:34:12 +08:00
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
#else
|
|
|
|
compatible_close(wsi->sock);
|
2015-11-02 20:34:12 +08:00
|
|
|
#endif
|
2015-11-08 12:10:26 +08:00
|
|
|
wsi->sock = LWS_SOCK_INVALID;
|
2010-11-08 17:03:03 +00:00
|
|
|
}
|
2014-02-15 19:25:50 +08:00
|
|
|
|
|
|
|
/* outermost destroy notification for wsi (user_space still intact) */
|
2016-02-14 09:27:41 +08:00
|
|
|
wsi->context->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY,
|
2015-12-04 16:54:12 +08:00
|
|
|
wsi->user_space, NULL, 0);
|
2014-02-15 19:25:50 +08:00
|
|
|
|
2015-06-25 17:51:07 +02:00
|
|
|
lws_free_wsi(wsi);
|
2010-11-03 11:13:06 +00:00
|
|
|
}
|
|
|
|
|
2016-01-16 12:09:38 +08:00
|
|
|
#if LWS_POSIX
|
|
|
|
LWS_VISIBLE int
|
|
|
|
interface_to_sa(struct lws_context *context, const char *ifname, struct sockaddr_in *addr, size_t addrlen)
|
|
|
|
{
|
|
|
|
int ipv6 = 0;
|
|
|
|
#ifdef LWS_USE_IPV6
|
|
|
|
ipv6 = LWS_IPV6_ENABLED(context);
|
|
|
|
#endif
|
|
|
|
(void)context;
|
|
|
|
|
|
|
|
return lws_interface_to_sa(ipv6, ifname, addr, addrlen);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:30:53 +08:00
|
|
|
lws_get_addresses(struct lws_context *context, void *ads, char *name,
|
|
|
|
int name_len, char *rip, int rip_len)
|
2015-01-28 21:03:49 +08:00
|
|
|
{
|
2015-11-02 20:34:12 +08:00
|
|
|
#if LWS_POSIX
|
2015-01-28 21:03:49 +08:00
|
|
|
struct addrinfo ai, *res;
|
2015-10-15 13:02:03 +03:00
|
|
|
struct sockaddr_in addr4;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
2015-12-06 11:07:41 +08:00
|
|
|
if (rip)
|
|
|
|
rip[0] = '\0';
|
2015-01-28 21:03:49 +08:00
|
|
|
name[0] = '\0';
|
2015-10-15 13:02:03 +03:00
|
|
|
addr4.sin_family = AF_UNSPEC;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
|
|
|
#ifdef LWS_USE_IPV6
|
|
|
|
if (LWS_IPV6_ENABLED(context)) {
|
|
|
|
if (!lws_plat_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)ads)->sin6_addr, rip, rip_len)) {
|
|
|
|
lwsl_err("inet_ntop", strerror(LWS_ERRNO));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip off the IPv4 to IPv6 header if one exists
|
|
|
|
if (strncmp(rip, "::ffff:", 7) == 0)
|
|
|
|
memmove(rip, rip + 7, strlen(rip) - 6);
|
|
|
|
|
|
|
|
getnameinfo((struct sockaddr *)ads,
|
|
|
|
sizeof(struct sockaddr_in6), name,
|
|
|
|
name_len, NULL, 0, 0);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2015-10-15 13:02:03 +03:00
|
|
|
struct addrinfo *result;
|
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
memset(&ai, 0, sizeof ai);
|
|
|
|
ai.ai_family = PF_UNSPEC;
|
|
|
|
ai.ai_socktype = SOCK_STREAM;
|
|
|
|
ai.ai_flags = AI_CANONNAME;
|
|
|
|
|
|
|
|
if (getnameinfo((struct sockaddr *)ads,
|
|
|
|
sizeof(struct sockaddr_in),
|
|
|
|
name, name_len, NULL, 0, 0))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!rip)
|
|
|
|
return 0;
|
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
if (getaddrinfo(name, NULL, &ai, &result))
|
2015-01-28 21:03:49 +08:00
|
|
|
return -1;
|
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
res = result;
|
|
|
|
while (addr4.sin_family == AF_UNSPEC && res) {
|
2015-01-28 21:03:49 +08:00
|
|
|
switch (res->ai_family) {
|
|
|
|
case AF_INET:
|
2015-10-15 13:02:03 +03:00
|
|
|
addr4.sin_addr = ((struct sockaddr_in *)res->ai_addr)->sin_addr;
|
|
|
|
addr4.sin_family = AF_INET;
|
2015-01-28 21:03:49 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = res->ai_next;
|
|
|
|
}
|
2015-10-15 13:02:03 +03:00
|
|
|
freeaddrinfo(result);
|
2015-01-28 21:03:49 +08:00
|
|
|
}
|
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
if (addr4.sin_family == AF_UNSPEC)
|
2015-01-28 21:03:49 +08:00
|
|
|
return -1;
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
if (lws_plat_inet_ntop(AF_INET, &addr4.sin_addr, rip, rip_len) == NULL)
|
|
|
|
return -1;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
|
|
|
return 0;
|
2015-11-02 20:34:12 +08:00
|
|
|
#else
|
|
|
|
(void)context;
|
|
|
|
(void)ads;
|
|
|
|
(void)name;
|
|
|
|
(void)name_len;
|
|
|
|
(void)rip;
|
|
|
|
(void)rip_len;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
#endif
|
2015-01-28 21:03:49 +08:00
|
|
|
}
|
|
|
|
|
2011-02-13 08:37:12 +00:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_get_peer_addresses() - Get client address information
|
2015-12-04 11:08:32 +08:00
|
|
|
* @wsi: Local struct lws associated with
|
2011-02-13 08:37:12 +00:00
|
|
|
* @fd: Connection socket descriptor
|
|
|
|
* @name: Buffer to take client address name
|
|
|
|
* @name_len: Length of client address name buffer
|
2014-10-23 15:34:26 +01:00
|
|
|
* @rip: Buffer to take client address IP dotted quad
|
2011-02-13 08:37:12 +00:00
|
|
|
* @rip_len: Length of client address IP buffer
|
|
|
|
*
|
|
|
|
* This function fills in @name and @rip with the name and IP of
|
2012-04-09 15:09:01 +08:00
|
|
|
* the client connected with socket descriptor @fd. Names may be
|
|
|
|
* truncated if there is not enough room. If either cannot be
|
|
|
|
* determined, they will be returned as valid zero-length strings.
|
2011-02-13 08:37:12 +00:00
|
|
|
*/
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE void
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_get_peer_addresses(struct lws *wsi, lws_sockfd_type fd, char *name,
|
|
|
|
int name_len, char *rip, int rip_len)
|
2011-02-13 08:37:12 +00:00
|
|
|
{
|
2015-11-02 20:34:12 +08:00
|
|
|
#if LWS_POSIX
|
2013-04-25 09:16:30 +08:00
|
|
|
socklen_t len;
|
2014-03-24 16:09:25 +08:00
|
|
|
#ifdef LWS_USE_IPV6
|
2014-03-24 16:09:25 +08:00
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
#endif
|
|
|
|
struct sockaddr_in sin4;
|
2015-12-16 18:19:08 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2014-03-24 16:09:25 +08:00
|
|
|
int ret = -1;
|
2015-01-28 21:03:49 +08:00
|
|
|
void *p;
|
2011-02-13 08:37:12 +00:00
|
|
|
|
|
|
|
rip[0] = '\0';
|
|
|
|
name[0] = '\0';
|
|
|
|
|
2013-01-30 08:12:20 +08:00
|
|
|
lws_latency_pre(context, wsi);
|
|
|
|
|
2014-03-24 16:09:25 +08:00
|
|
|
#ifdef LWS_USE_IPV6
|
2014-03-24 16:09:25 +08:00
|
|
|
if (LWS_IPV6_ENABLED(context)) {
|
|
|
|
len = sizeof(sin6);
|
2015-01-28 21:03:49 +08:00
|
|
|
p = &sin6;
|
2014-03-24 16:09:25 +08:00
|
|
|
} else
|
2011-03-10 18:14:01 +00:00
|
|
|
#endif
|
2014-03-24 16:09:25 +08:00
|
|
|
{
|
|
|
|
len = sizeof(sin4);
|
2015-01-28 21:03:49 +08:00
|
|
|
p = &sin4;
|
|
|
|
}
|
2011-02-13 08:37:12 +00:00
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
if (getpeername(fd, p, &len) < 0) {
|
|
|
|
lwsl_warn("getpeername: %s\n", strerror(LWS_ERRNO));
|
|
|
|
goto bail;
|
2011-02-13 08:37:12 +00:00
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-12-04 09:23:56 +08:00
|
|
|
ret = lws_get_addresses(context, p, name, name_len, rip, rip_len);
|
2013-01-30 08:12:20 +08:00
|
|
|
|
|
|
|
bail:
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_latency(context, wsi, "lws_get_peer_addresses", ret, 1);
|
2015-11-02 20:34:12 +08:00
|
|
|
#else
|
|
|
|
(void)wsi;
|
|
|
|
(void)fd;
|
|
|
|
(void)name;
|
|
|
|
(void)name_len;
|
|
|
|
(void)rip;
|
|
|
|
(void)rip_len;
|
|
|
|
#endif
|
2011-02-13 08:37:12 +00:00
|
|
|
}
|
2011-02-12 11:57:45 +00:00
|
|
|
|
2013-01-22 12:40:35 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_context_user() - get the user data associated with the context
|
2013-01-22 12:40:35 +08:00
|
|
|
* @context: Websocket context
|
|
|
|
*
|
|
|
|
* This returns the optional user allocation that can be attached to
|
|
|
|
* the context the sockets live in at context_create time. It's a way
|
|
|
|
* to let all sockets serviced in the same context share data without
|
|
|
|
* using globals statics in the user code.
|
|
|
|
*/
|
2012-10-19 11:21:56 +02:00
|
|
|
LWS_EXTERN void *
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_context_user(struct lws_context *context)
|
2012-10-19 11:21:56 +02:00
|
|
|
{
|
2013-02-11 17:13:32 +08:00
|
|
|
return context->user_space;
|
2012-10-19 11:21:56 +02:00
|
|
|
}
|
|
|
|
|
2011-01-19 13:11:55 +00:00
|
|
|
|
2014-02-15 16:36:38 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_callback_all_protocol() - Callback all connections using
|
2014-02-15 16:36:38 +08:00
|
|
|
* the given protocol with the given reason
|
|
|
|
*
|
|
|
|
* @protocol: Protocol whose connections will get callbacks
|
|
|
|
* @reason: Callback reason index
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-11 10:45:35 +08:00
|
|
|
lws_callback_all_protocol(struct lws_context *context,
|
|
|
|
const struct lws_protocols *protocol, int reason)
|
2014-02-15 16:36:38 +08:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[0];
|
2016-01-26 20:56:56 +08:00
|
|
|
unsigned int n, m = context->count_threads;
|
2016-01-29 21:18:54 +08:00
|
|
|
struct lws *wsi;
|
2016-01-19 03:34:24 +08:00
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
for (n = 0; n < pt->fds_count; n++) {
|
|
|
|
wsi = wsi_from_fd(context, pt->fds[n].fd);
|
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
if (wsi->protocol == protocol)
|
2016-01-29 21:18:54 +08:00
|
|
|
protocol->callback(wsi, reason, wsi->user_space,
|
|
|
|
NULL, 0);
|
2016-01-19 03:34:24 +08:00
|
|
|
}
|
|
|
|
pt++;
|
2014-02-15 16:36:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-02 20:34:12 +08:00
|
|
|
#if LWS_POSIX
|
|
|
|
|
2011-01-27 20:06:03 +00:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_get_socket_fd() - returns the socket file descriptor
|
2011-01-27 20:06:03 +00:00
|
|
|
*
|
|
|
|
* You will not need this unless you are doing something special
|
|
|
|
*
|
|
|
|
* @wsi: Websocket connection instance
|
|
|
|
*/
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_get_socket_fd(struct lws *wsi)
|
2011-01-27 20:06:03 +00:00
|
|
|
{
|
|
|
|
return wsi->sock;
|
|
|
|
}
|
|
|
|
|
2015-11-02 20:34:12 +08:00
|
|
|
#endif
|
|
|
|
|
2013-01-29 12:36:17 +08:00
|
|
|
#ifdef LWS_LATENCY
|
|
|
|
void
|
2015-12-04 11:30:53 +08:00
|
|
|
lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
|
|
|
|
int ret, int completed)
|
2013-01-29 12:36:17 +08:00
|
|
|
{
|
2014-02-26 21:37:31 +01:00
|
|
|
unsigned long long u;
|
2013-01-29 12:36:17 +08:00
|
|
|
char buf[256];
|
|
|
|
|
2014-02-26 21:37:31 +01:00
|
|
|
u = time_in_microseconds();
|
2013-01-29 12:36:17 +08:00
|
|
|
|
2014-03-31 11:01:32 +08:00
|
|
|
if (!action) {
|
2013-01-29 12:36:17 +08:00
|
|
|
wsi->latency_start = u;
|
|
|
|
if (!wsi->action_start)
|
|
|
|
wsi->action_start = u;
|
2014-03-31 11:01:32 +08:00
|
|
|
return;
|
2013-01-29 12:36:17 +08:00
|
|
|
}
|
2014-03-31 11:01:32 +08:00
|
|
|
if (completed) {
|
|
|
|
if (wsi->action_start == wsi->latency_start)
|
|
|
|
sprintf(buf,
|
2014-07-05 10:50:47 +08:00
|
|
|
"Completion first try lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->latency_start,
|
|
|
|
(void *)wsi, ret, action);
|
|
|
|
else
|
|
|
|
sprintf(buf,
|
2014-07-05 10:50:47 +08:00
|
|
|
"Completion %lluus: lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->action_start,
|
|
|
|
u - wsi->latency_start,
|
|
|
|
(void *)wsi, ret, action);
|
|
|
|
wsi->action_start = 0;
|
|
|
|
} else
|
2014-07-05 10:50:47 +08:00
|
|
|
sprintf(buf, "lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->latency_start, (void *)wsi, ret, action);
|
|
|
|
|
|
|
|
if (u - wsi->latency_start > context->worst_latency) {
|
|
|
|
context->worst_latency = u - wsi->latency_start;
|
|
|
|
strcpy(context->worst_latency_info, buf);
|
|
|
|
}
|
|
|
|
lwsl_latency("%s", buf);
|
2013-01-29 12:36:17 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-01-17 16:50:35 +08:00
|
|
|
|
2011-01-27 06:26:52 +00:00
|
|
|
|
2013-01-17 16:50:35 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_rx_flow_control() - Enable and disable socket servicing for
|
2014-10-23 15:34:26 +01:00
|
|
|
* received packets.
|
2013-01-17 16:50:35 +08:00
|
|
|
*
|
|
|
|
* If the output side of a server process becomes choked, this allows flow
|
|
|
|
* control for the input side.
|
|
|
|
*
|
|
|
|
* @wsi: Websocket connection instance to get callback for
|
|
|
|
* @enable: 0 = disable read servicing for this connection, 1 = enable
|
|
|
|
*/
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_rx_flow_control(struct lws *wsi, int enable)
|
2013-01-17 16:50:35 +08:00
|
|
|
{
|
2014-10-08 12:00:53 +08:00
|
|
|
if (enable == (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
|
2013-03-16 11:24:23 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-12-04 16:54:12 +08:00
|
|
|
lwsl_info("%s: (0x%p, %d)\n", __func__, wsi, enable);
|
2014-10-08 12:00:53 +08:00
|
|
|
wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE | !!enable;
|
2013-01-17 16:50:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-16 12:32:27 +08:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_rx_flow_allow_all_protocol() - Allow all connections with this protocol to receive
|
2013-03-16 12:32:27 +08:00
|
|
|
*
|
|
|
|
* When the user server code realizes it can accept more input, it can
|
|
|
|
* call this to have the RX flow restriction removed from all connections using
|
|
|
|
* the given protocol.
|
|
|
|
*
|
|
|
|
* @protocol: all connections using this protocol will be allowed to receive
|
|
|
|
*/
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE void
|
2015-12-11 10:45:35 +08:00
|
|
|
lws_rx_flow_allow_all_protocol(const struct lws_context *context,
|
|
|
|
const struct lws_protocols *protocol)
|
2013-03-16 12:32:27 +08:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
const struct lws_context_per_thread *pt = &context->pt[0];
|
2015-12-04 11:08:32 +08:00
|
|
|
struct lws *wsi;
|
2016-01-26 20:56:56 +08:00
|
|
|
unsigned int n, m = context->count_threads;
|
2016-01-19 03:34:24 +08:00
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
for (n = 0; n < pt->fds_count; n++) {
|
|
|
|
wsi = wsi_from_fd(context, pt->fds[n].fd);
|
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
if (wsi->protocol == protocol)
|
|
|
|
lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
|
|
|
|
}
|
|
|
|
pt++;
|
2013-03-16 12:32:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-17 16:50:35 +08:00
|
|
|
|
2011-01-28 10:00:18 +00:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_canonical_hostname() - returns this host's hostname
|
2011-01-28 10:00:18 +00:00
|
|
|
*
|
|
|
|
* This is typically used by client code to fill in the host parameter
|
|
|
|
* when making a client connection. You can only call it after the context
|
|
|
|
* has been created.
|
|
|
|
*
|
2011-03-02 22:03:47 +00:00
|
|
|
* @context: Websocket context
|
2011-01-28 10:00:18 +00:00
|
|
|
*/
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE extern const char *
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_canonical_hostname(struct lws_context *context)
|
2011-01-28 10:00:18 +00:00
|
|
|
{
|
2011-03-02 22:03:47 +00:00
|
|
|
return (const char *)context->canonical_hostname;
|
2011-01-28 10:00:18 +00:00
|
|
|
}
|
|
|
|
|
2015-12-21 18:06:38 +01:00
|
|
|
int user_callback_handle_rxflow(lws_callback_function callback_function,
|
2015-12-17 07:54:44 +08:00
|
|
|
struct lws *wsi,
|
2015-12-04 11:30:53 +08:00
|
|
|
enum lws_callback_reasons reason, void *user,
|
|
|
|
void *in, size_t len)
|
2013-01-17 16:50:35 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2015-12-17 07:54:44 +08:00
|
|
|
n = callback_function(wsi, reason, user, in, len);
|
2013-02-10 21:21:24 +08:00
|
|
|
if (!n)
|
2015-12-04 08:43:54 +08:00
|
|
|
n = _lws_rx_flow_control(wsi);
|
2013-01-17 16:50:35 +08:00
|
|
|
|
2013-02-10 21:21:24 +08:00
|
|
|
return n;
|
2013-01-17 16:50:35 +08:00
|
|
|
}
|
|
|
|
|
2010-12-18 15:13:50 +00:00
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
/**
|
2015-12-04 11:08:32 +08:00
|
|
|
* lws_set_proxy() - Setups proxy to lws_context.
|
|
|
|
* @context: pointer to struct lws_context you want set proxy to
|
2013-10-24 22:12:03 +08:00
|
|
|
* @proxy: pointer to c string containing proxy in format address:port
|
|
|
|
*
|
2015-12-14 08:52:03 +08:00
|
|
|
* Returns 0 if proxy string was parsed and proxy was setup.
|
2013-10-24 22:12:03 +08:00
|
|
|
* Returns -1 if @proxy is NULL or has incorrect format.
|
|
|
|
*
|
|
|
|
* This is only required if your OS does not provide the http_proxy
|
2014-10-23 15:34:26 +01:00
|
|
|
* environment variable (eg, OSX)
|
2013-10-24 22:12:03 +08:00
|
|
|
*
|
|
|
|
* IMPORTANT! You should call this function right after creation of the
|
2015-12-04 11:08:32 +08:00
|
|
|
* lws_context and before call to connect. If you call this
|
2013-10-24 22:12:03 +08:00
|
|
|
* function after connect behavior is undefined.
|
2015-12-04 11:08:32 +08:00
|
|
|
* This function will override proxy settings made on lws_context
|
2013-10-24 22:12:03 +08:00
|
|
|
* creation with genenv() call.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_set_proxy(struct lws_context *context, const char *proxy)
|
2013-10-24 22:12:03 +08:00
|
|
|
{
|
|
|
|
char *p;
|
2015-11-08 10:15:01 +08:00
|
|
|
char authstring[96];
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
if (!proxy)
|
|
|
|
return -1;
|
|
|
|
|
2015-11-08 10:15:01 +08:00
|
|
|
p = strchr(proxy, '@');
|
|
|
|
if (p) { /* auth is around */
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
if ((unsigned int)(p - proxy) > sizeof(authstring) - 1)
|
2015-11-08 10:15:01 +08:00
|
|
|
goto auth_too_long;
|
|
|
|
|
2015-11-18 19:32:01 +08:00
|
|
|
strncpy(authstring, proxy, p - proxy);
|
2015-11-08 10:15:01 +08:00
|
|
|
// null termination not needed on input
|
|
|
|
if (lws_b64_encode_string(authstring, (p - proxy),
|
|
|
|
context->proxy_basic_auth_token,
|
|
|
|
sizeof context->proxy_basic_auth_token) < 0)
|
|
|
|
goto auth_too_long;
|
|
|
|
|
|
|
|
lwsl_notice(" Proxy auth in use\n");
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-11-08 10:15:01 +08:00
|
|
|
proxy = p + 1;
|
|
|
|
} else
|
|
|
|
context->proxy_basic_auth_token[0] = '\0';
|
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
strncpy(context->http_proxy_address, proxy,
|
|
|
|
sizeof(context->http_proxy_address) - 1);
|
|
|
|
context->http_proxy_address[
|
|
|
|
sizeof(context->http_proxy_address) - 1] = '\0';
|
2015-11-08 10:15:01 +08:00
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
p = strchr(context->http_proxy_address, ':');
|
2015-11-08 10:15:01 +08:00
|
|
|
if (!p && !context->http_proxy_port) {
|
2013-10-24 22:12:03 +08:00
|
|
|
lwsl_err("http_proxy needs to be ads:port\n");
|
|
|
|
|
|
|
|
return -1;
|
2015-11-08 10:15:01 +08:00
|
|
|
} else {
|
2015-12-06 11:04:05 +08:00
|
|
|
if (p) {
|
|
|
|
*p = '\0';
|
|
|
|
context->http_proxy_port = atoi(p + 1);
|
|
|
|
}
|
2013-10-24 22:12:03 +08:00
|
|
|
}
|
2015-11-08 10:15:01 +08:00
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
lwsl_notice(" Proxy %s:%u\n", context->http_proxy_address,
|
|
|
|
context->http_proxy_port);
|
|
|
|
|
|
|
|
return 0;
|
2015-11-08 10:15:01 +08:00
|
|
|
|
|
|
|
auth_too_long:
|
|
|
|
lwsl_err("proxy auth too long\n");
|
|
|
|
|
|
|
|
return -1;
|
2013-10-24 22:12:03 +08:00
|
|
|
}
|
|
|
|
|
2010-12-18 15:13:50 +00:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_get_protocol() - Returns a protocol pointer from a websocket
|
2010-12-19 22:13:26 +00:00
|
|
|
* connection.
|
2010-12-18 15:13:50 +00:00
|
|
|
* @wsi: pointer to struct websocket you want to know the protocol of
|
|
|
|
*
|
2010-12-19 22:13:26 +00:00
|
|
|
*
|
2013-01-29 17:57:39 +08:00
|
|
|
* Some apis can act on all live connections of a given protocol,
|
|
|
|
* this is how you can get a pointer to the active protocol if needed.
|
2010-12-18 15:13:50 +00:00
|
|
|
*/
|
2010-10-28 22:36:01 +01:00
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
LWS_VISIBLE const struct lws_protocols *
|
|
|
|
lws_get_protocol(struct lws *wsi)
|
2010-12-18 15:13:50 +00:00
|
|
|
{
|
|
|
|
return wsi->protocol;
|
|
|
|
}
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_is_final_fragment(struct lws *wsi)
|
2011-03-07 21:16:31 +00:00
|
|
|
{
|
2016-01-11 11:34:01 +08:00
|
|
|
lwsl_info("%s: final %d, rx pk length %d, draining %d", __func__,
|
2016-01-29 21:18:54 +08:00
|
|
|
wsi->u.ws.final, wsi->u.ws.rx_packet_length,
|
|
|
|
wsi->u.ws.rx_draining_ext);
|
2016-01-11 11:34:01 +08:00
|
|
|
return wsi->u.ws.final && !wsi->u.ws.rx_packet_length && !wsi->u.ws.rx_draining_ext;
|
2011-03-07 21:16:31 +00:00
|
|
|
}
|
2011-11-07 17:19:25 +08:00
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE unsigned char
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_get_reserved_bits(struct lws *wsi)
|
2013-01-09 18:06:55 +08:00
|
|
|
{
|
2013-01-21 11:04:23 +08:00
|
|
|
return wsi->u.ws.rsv;
|
2013-01-09 18:06:55 +08:00
|
|
|
}
|
|
|
|
|
2013-02-18 16:30:10 +08:00
|
|
|
int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_ensure_user_space(struct lws *wsi)
|
2011-11-07 17:19:25 +08:00
|
|
|
{
|
2014-10-22 15:37:28 +08:00
|
|
|
lwsl_info("%s: %p protocol %p\n", __func__, wsi, wsi->protocol);
|
2013-02-15 22:31:55 +08:00
|
|
|
if (!wsi->protocol)
|
2013-02-18 16:30:10 +08:00
|
|
|
return 1;
|
2013-02-15 22:31:55 +08:00
|
|
|
|
2011-11-07 17:19:25 +08:00
|
|
|
/* allocate the per-connection user memory (if any) */
|
|
|
|
|
|
|
|
if (wsi->protocol->per_session_data_size && !wsi->user_space) {
|
2014-12-04 23:59:35 +01:00
|
|
|
wsi->user_space = lws_zalloc(wsi->protocol->per_session_data_size);
|
2011-11-07 17:19:25 +08:00
|
|
|
if (wsi->user_space == NULL) {
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_err("Out of memory for conn user space\n");
|
2013-02-18 16:30:10 +08:00
|
|
|
return 1;
|
2011-11-07 17:19:25 +08:00
|
|
|
}
|
2014-10-22 15:37:28 +08:00
|
|
|
} else
|
2015-12-04 16:54:12 +08:00
|
|
|
lwsl_info("%s: %p protocol pss %u, user_space=%d\n",
|
|
|
|
__func__, wsi, wsi->protocol->per_session_data_size,
|
|
|
|
wsi->user_space);
|
2013-02-18 16:30:10 +08:00
|
|
|
return 0;
|
2011-11-07 17:19:25 +08:00
|
|
|
}
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2014-04-03 23:34:09 +08:00
|
|
|
LWS_VISIBLE void lwsl_emit_stderr(int level, const char *line)
|
2013-01-12 09:17:42 +08:00
|
|
|
{
|
2016-02-21 07:45:03 +08:00
|
|
|
time_t o_now = time(NULL);
|
2014-02-26 21:37:31 +01:00
|
|
|
unsigned long long now;
|
2016-02-21 21:41:22 +08:00
|
|
|
struct tm *ptm = NULL;
|
2015-12-04 16:54:12 +08:00
|
|
|
char buf[300];
|
2016-02-21 21:41:22 +08:00
|
|
|
#ifndef WIN32
|
|
|
|
struct tm tm;
|
|
|
|
#endif
|
|
|
|
int n;
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2016-03-01 07:33:56 +08:00
|
|
|
#ifndef _WIN32_WCE
|
2016-02-21 21:41:22 +08:00
|
|
|
#ifdef WIN32
|
|
|
|
ptm = localtime(&o_now);
|
|
|
|
#else
|
|
|
|
if (localtime_r(&o_now, &tm))
|
|
|
|
ptm = &tm;
|
2016-03-01 07:33:56 +08:00
|
|
|
#endif
|
2016-02-21 21:41:22 +08:00
|
|
|
#endif
|
2013-01-19 11:17:56 +08:00
|
|
|
buf[0] = '\0';
|
2015-12-04 16:54:12 +08:00
|
|
|
for (n = 0; n < LLL_COUNT; n++) {
|
|
|
|
if (level != (1 << n))
|
|
|
|
continue;
|
|
|
|
now = time_in_microseconds() / 100;
|
2016-02-21 21:41:22 +08:00
|
|
|
if (ptm)
|
2016-02-21 07:45:03 +08:00
|
|
|
sprintf(buf, "[%04d/%02d/%02d %02d:%02d:%02d:%04d] %s: ",
|
2016-02-21 21:41:22 +08:00
|
|
|
ptm->tm_year + 1900,
|
|
|
|
ptm->tm_mon,
|
|
|
|
ptm->tm_mday,
|
|
|
|
ptm->tm_hour,
|
|
|
|
ptm->tm_min,
|
|
|
|
ptm->tm_sec,
|
2016-02-21 07:45:03 +08:00
|
|
|
(int)(now % 10000), log_level_names[n]);
|
|
|
|
else
|
|
|
|
sprintf(buf, "[%llu:%04d] %s: ",
|
|
|
|
(unsigned long long) now / 10000,
|
|
|
|
(int)(now % 10000), log_level_names[n]);
|
2015-12-04 16:54:12 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-02-11 17:13:32 +08:00
|
|
|
|
2013-01-19 11:17:56 +08:00
|
|
|
fprintf(stderr, "%s%s", buf, line);
|
|
|
|
}
|
2013-01-19 11:12:16 +08:00
|
|
|
|
2014-12-10 18:50:28 -06:00
|
|
|
LWS_VISIBLE void _lws_logv(int filter, const char *format, va_list vl)
|
2013-01-19 11:17:56 +08:00
|
|
|
{
|
|
|
|
char buf[256];
|
|
|
|
|
|
|
|
if (!(log_level & filter))
|
|
|
|
return;
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2014-12-10 18:50:28 -06:00
|
|
|
vsnprintf(buf, sizeof(buf), format, vl);
|
2013-02-11 17:13:32 +08:00
|
|
|
buf[sizeof(buf) - 1] = '\0';
|
2013-01-12 09:17:42 +08:00
|
|
|
|
2013-01-19 11:17:56 +08:00
|
|
|
lwsl_emit(filter, buf);
|
2013-01-10 19:50:35 +08:00
|
|
|
}
|
|
|
|
|
2014-12-10 18:50:28 -06:00
|
|
|
LWS_VISIBLE void _lws_log(int filter, const char *format, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, format);
|
|
|
|
_lws_logv(filter, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2013-01-10 19:50:35 +08:00
|
|
|
/**
|
|
|
|
* lws_set_log_level() - Set the logging bitfield
|
|
|
|
* @level: OR together the LLL_ debug contexts you want output from
|
2013-01-12 09:17:42 +08:00
|
|
|
* @log_emit_function: NULL to leave it as it is, or a user-supplied
|
|
|
|
* function to perform log string emission instead of
|
|
|
|
* the default stderr one.
|
2013-01-10 19:50:35 +08:00
|
|
|
*
|
2013-02-19 10:01:48 +08:00
|
|
|
* log level defaults to "err", "warn" and "notice" contexts enabled and
|
2013-01-12 09:17:42 +08:00
|
|
|
* emission on stderr.
|
2013-01-10 19:50:35 +08:00
|
|
|
*/
|
|
|
|
|
2015-12-04 11:30:53 +08:00
|
|
|
LWS_VISIBLE void lws_set_log_level(int level,
|
2015-12-04 16:54:12 +08:00
|
|
|
void (*func)(int level, const char *line))
|
2013-01-10 19:50:35 +08:00
|
|
|
{
|
|
|
|
log_level = level;
|
2015-12-04 16:54:12 +08:00
|
|
|
if (func)
|
|
|
|
lwsl_emit = func;
|
2014-04-27 13:28:22 +02:00
|
|
|
}
|
2014-08-16 09:54:27 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* lws_use_ssl() - Find out if connection is using SSL
|
|
|
|
* @wsi: websocket connection to check
|
|
|
|
*
|
|
|
|
* Returns 0 if the connection is not using SSL, 1 if using SSL and
|
|
|
|
* using verified cert, and 2 if using SSL but the cert was not
|
|
|
|
* checked (appears for client wsi told to skip check on connection)
|
|
|
|
*/
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_is_ssl(struct lws *wsi)
|
2014-08-16 09:54:27 +08:00
|
|
|
{
|
2014-08-19 08:41:26 +08:00
|
|
|
#ifdef LWS_OPENSSL_SUPPORT
|
2014-08-16 09:54:27 +08:00
|
|
|
return wsi->use_ssl;
|
2014-08-19 08:41:26 +08:00
|
|
|
#else
|
2015-11-02 13:10:33 +08:00
|
|
|
(void)wsi;
|
2014-08-19 08:41:26 +08:00
|
|
|
return 0;
|
|
|
|
#endif
|
2014-08-16 09:54:27 +08:00
|
|
|
}
|
2014-08-18 22:49:39 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* lws_partial_buffered() - find out if lws buffered the last write
|
|
|
|
* @wsi: websocket connection to check
|
|
|
|
*
|
2015-12-04 08:43:54 +08:00
|
|
|
* Returns 1 if you cannot use lws_write because the last
|
2014-08-18 22:49:39 +08:00
|
|
|
* write on this connection is still buffered, and can't be cleared without
|
|
|
|
* returning to the service loop and waiting for the connection to be
|
|
|
|
* writeable again.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2015-12-04 08:43:54 +08:00
|
|
|
* If you will try to do >1 lws_write call inside a single
|
2014-08-18 22:49:39 +08:00
|
|
|
* WRITEABLE callback, you must check this after every write and bail if
|
|
|
|
* set, ask for a new writeable callback and continue writing from there.
|
2015-12-14 08:52:03 +08:00
|
|
|
*
|
2014-08-18 22:49:39 +08:00
|
|
|
* This is never set at the start of a writeable callback, but any write
|
|
|
|
* may set it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_partial_buffered(struct lws *wsi)
|
2014-08-18 22:49:39 +08:00
|
|
|
{
|
2015-12-17 17:03:59 +08:00
|
|
|
return !!wsi->trunc_len;
|
2014-08-18 22:49:39 +08:00
|
|
|
}
|
2014-10-08 12:00:53 +08:00
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
void lws_set_protocol_write_pending(struct lws *wsi,
|
2014-10-08 12:00:53 +08:00
|
|
|
enum lws_pending_protocol_send pend)
|
|
|
|
{
|
2014-10-29 09:39:08 +08:00
|
|
|
lwsl_info("setting pps %d\n", pend);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-08 12:00:53 +08:00
|
|
|
if (wsi->pps)
|
|
|
|
lwsl_err("pps overwrite\n");
|
|
|
|
wsi->pps = pend;
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_rx_flow_control(wsi, 0);
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
2014-10-18 18:52:39 +08:00
|
|
|
}
|
2014-10-29 09:39:08 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE size_t
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_get_peer_write_allowance(struct lws *wsi)
|
2014-10-29 09:39:08 +08:00
|
|
|
{
|
|
|
|
#ifdef LWS_USE_HTTP2
|
|
|
|
/* only if we are using HTTP2 on this connection */
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->mode != LWSCM_HTTP2_SERVING)
|
2014-10-29 09:39:08 +08:00
|
|
|
return -1;
|
|
|
|
/* user is only interested in how much he can send, or that he can't */
|
|
|
|
if (wsi->u.http2.tx_credit <= 0)
|
|
|
|
return 0;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-10-29 09:39:08 +08:00
|
|
|
return wsi->u.http2.tx_credit;
|
|
|
|
#else
|
2015-11-02 13:10:33 +08:00
|
|
|
(void)wsi;
|
2014-10-29 09:39:08 +08:00
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
2014-11-08 11:18:47 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE void
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_union_transition(struct lws *wsi, enum connection_mode mode)
|
2014-11-08 11:18:47 +08:00
|
|
|
{
|
2015-12-25 12:44:12 +08:00
|
|
|
lwsl_debug("%s: %p: mode %d\n", __func__, wsi, mode);
|
2014-11-08 11:18:47 +08:00
|
|
|
memset(&wsi->u, 0, sizeof(wsi->u));
|
|
|
|
wsi->mode = mode;
|
|
|
|
}
|
2015-12-04 10:39:23 +08:00
|
|
|
|
lws_plat_fd implement platform default handlers
This is a rewrite of the patch from Soapyman here
https://github.com/warmcat/libwebsockets/pull/363
The main changes compared to Soapyman's original patch are
- There's no new stuff in the info struct user code does any overrides
it may want to do explicitly after lws_context_create returns
- User overrides for file ops can call through (subclass) to the original
platform implementation using lws_get_fops_plat()
- A typedef is provided for plat-specific fd type
- Public helpers are provided to allow user code to be platform-independent
about file access, using the lws platform file operations underneath:
static inline lws_filefd_type
lws_plat_file_open(struct lws_plat_file_ops *fops, const char *filename,
unsigned long *filelen, int flags)
static inline int
lws_plat_file_close(struct lws_plat_file_ops *fops, lws_filefd_type fd)
static inline unsigned long
lws_plat_file_seek_cur(struct lws_plat_file_ops *fops, lws_filefd_type fd,
long offset_from_cur_pos)
static inline int
lws_plat_file_read(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
static inline int
lws_plat_file_write(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
There's example documentation and implementation in the test server.
Signed-off-by: Andy Green <andy.green@linaro.org>
2015-12-10 07:58:58 +08:00
|
|
|
LWS_VISIBLE struct lws_plat_file_ops *
|
|
|
|
lws_get_fops(struct lws_context *context)
|
|
|
|
{
|
|
|
|
return &context->fops;
|
|
|
|
}
|
2015-12-04 10:39:23 +08:00
|
|
|
|
2015-12-11 09:36:14 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN struct lws_context *
|
2015-12-17 18:25:25 +08:00
|
|
|
lws_get_context(const struct lws *wsi)
|
2015-12-11 09:36:14 +08:00
|
|
|
{
|
|
|
|
return wsi->context;
|
|
|
|
}
|
|
|
|
|
2016-01-19 03:34:24 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_get_count_threads(struct lws_context *context)
|
|
|
|
{
|
|
|
|
return context->count_threads;
|
|
|
|
}
|
|
|
|
|
2015-12-14 07:16:32 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void *
|
|
|
|
lws_wsi_user(struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->user_space;
|
|
|
|
}
|
2015-12-26 17:20:34 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_close_reason(struct lws *wsi, enum lws_close_status status,
|
|
|
|
unsigned char *buf, size_t len)
|
|
|
|
{
|
|
|
|
unsigned char *p, *start;
|
2016-01-19 03:34:24 +08:00
|
|
|
int budget = sizeof(wsi->u.ws.ping_payload_buf) - LWS_PRE;
|
2015-12-26 17:20:34 +08:00
|
|
|
|
|
|
|
assert(wsi->mode == LWSCM_WS_SERVING || wsi->mode == LWSCM_WS_CLIENT);
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
start = p = &wsi->u.ws.ping_payload_buf[LWS_PRE];
|
2015-12-26 17:20:34 +08:00
|
|
|
|
|
|
|
*p++ = (((int)status) >> 8) & 0xff;
|
|
|
|
*p++ = ((int)status) & 0xff;
|
|
|
|
|
|
|
|
if (buf)
|
|
|
|
while (len-- && p < start + budget)
|
|
|
|
*p++ = *buf++;
|
|
|
|
|
|
|
|
wsi->u.ws.close_in_ping_buffer_len = p - start;
|
|
|
|
}
|
2015-12-28 14:24:49 +08:00
|
|
|
|
|
|
|
LWS_EXTERN int
|
|
|
|
_lws_rx_flow_control(struct lws *wsi)
|
|
|
|
{
|
|
|
|
/* there is no pending change */
|
2016-01-11 11:34:01 +08:00
|
|
|
if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)) {
|
2016-01-19 03:34:24 +08:00
|
|
|
lwsl_debug("%s: no pending change\n", __func__);
|
2015-12-28 14:24:49 +08:00
|
|
|
return 0;
|
2016-01-11 11:34:01 +08:00
|
|
|
}
|
2015-12-28 14:24:49 +08:00
|
|
|
|
|
|
|
/* stuff is still buffered, not ready to really accept new input */
|
|
|
|
if (wsi->rxflow_buffer) {
|
|
|
|
/* get ourselves called back to deal with stashed buffer */
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pending is cleared, we can change rxflow state */
|
|
|
|
|
|
|
|
wsi->rxflow_change_to &= ~LWS_RXFLOW_PENDING_CHANGE;
|
|
|
|
|
|
|
|
lwsl_info("rxflow: wsi %p change_to %d\n", wsi,
|
|
|
|
wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
|
|
|
|
|
|
|
|
/* adjust the pollfd for this wsi */
|
|
|
|
|
|
|
|
if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
|
|
|
|
if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
|
|
|
|
lwsl_info("%s: fail\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
if (lws_change_pollfd(wsi, LWS_POLLIN, 0))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-30 11:43:36 +08:00
|
|
|
|
|
|
|
LWS_EXTERN int
|
|
|
|
lws_check_utf8(unsigned char *state, unsigned char *buf, size_t len)
|
|
|
|
{
|
|
|
|
static const unsigned char e0f4[] = {
|
|
|
|
0xa0 | ((2 - 1) << 2) | 1, /* e0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e1 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e3 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e4 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e5 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e6 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e7 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e8 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e9 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ea */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* eb */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ec */
|
|
|
|
0x80 | ((2 - 1) << 2) | 1, /* ed */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ee */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ef */
|
|
|
|
0x90 | ((3 - 1) << 2) | 2, /* f0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f1 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f3 */
|
|
|
|
0x80 | ((1 - 1) << 2) | 2, /* f4 */
|
|
|
|
|
|
|
|
0, /* s0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 0, /* s2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* s3 */
|
|
|
|
};
|
|
|
|
unsigned char s = *state;
|
|
|
|
|
|
|
|
while (len--) {
|
|
|
|
unsigned char c = *buf++;
|
|
|
|
|
|
|
|
if (!s) {
|
|
|
|
if (c >= 0x80) {
|
|
|
|
if (c < 0xc2 || c > 0xf4)
|
|
|
|
return 1;
|
|
|
|
if (c < 0xe0)
|
|
|
|
s = 0x80 | ((4 - 1) << 2);
|
|
|
|
else
|
|
|
|
s = e0f4[c - 0xe0];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (c < (s & 0xf0) ||
|
|
|
|
c >= (s & 0xf0) + 0x10 + ((s << 2) & 0x30))
|
|
|
|
return 1;
|
|
|
|
s = e0f4[21 + (s & 3)];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*state = s;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-01-11 11:34:01 +08:00
|
|
|
|
2016-01-14 11:37:56 +08:00
|
|
|
/**
|
2016-02-07 07:28:21 +08:00
|
|
|
* lws_parse_uri: cut up prot:/ads:port/path into pieces
|
2016-01-14 11:37:56 +08:00
|
|
|
* Notice it does so by dropping '\0' into input string
|
2016-02-07 07:28:21 +08:00
|
|
|
* and the leading / on the path is consequently lost
|
2016-01-14 11:37:56 +08:00
|
|
|
*
|
|
|
|
* @p: incoming uri string.. will get written to
|
|
|
|
* @prot: result pointer for protocol part (https://)
|
|
|
|
* @ads: result pointer for address part
|
|
|
|
* @port: result pointer for port part
|
|
|
|
* @path: result pointer for path part
|
|
|
|
*/
|
|
|
|
|
2016-01-14 13:39:02 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
2016-01-19 03:34:24 +08:00
|
|
|
lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
|
|
|
|
const char **path)
|
2016-01-14 13:39:02 +08:00
|
|
|
{
|
|
|
|
const char *end;
|
|
|
|
static const char *slash = "/";
|
|
|
|
|
|
|
|
/* cut up the location into address, port and path */
|
|
|
|
*prot = p;
|
|
|
|
while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
|
|
|
|
p++;
|
|
|
|
if (!*p) {
|
|
|
|
end = p;
|
|
|
|
p = (char *)*prot;
|
|
|
|
*prot = end;
|
|
|
|
} else {
|
|
|
|
*p = '\0';
|
|
|
|
p += 3;
|
|
|
|
}
|
|
|
|
*ads = p;
|
|
|
|
if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
|
|
|
|
*port = 80;
|
|
|
|
else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
|
|
|
|
*port = 443;
|
2016-01-19 04:32:14 +08:00
|
|
|
|
2016-01-14 13:39:02 +08:00
|
|
|
while (*p && *p != ':' && *p != '/')
|
|
|
|
p++;
|
|
|
|
if (*p == ':') {
|
|
|
|
*p++ = '\0';
|
|
|
|
*port = atoi(p);
|
|
|
|
while (*p && *p != '/')
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
*path = slash;
|
|
|
|
if (*p) {
|
|
|
|
*p++ = '\0';
|
|
|
|
if (*p)
|
|
|
|
*path = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-15 02:36:22 +08:00
|
|
|
int
|
|
|
|
lws_snprintf(char *str, size_t size, const char *format, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
va_start(ap, format);
|
|
|
|
n = vsnprintf(str, size, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
if (n >= size)
|
|
|
|
return size;
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
#ifdef LWS_NO_EXTENSIONS
|
|
|
|
|
|
|
|
/* we need to provide dummy callbacks for internal exts
|
|
|
|
* so user code runs when faced with a lib compiled with
|
|
|
|
* extensions disabled.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_extension_callback_pm_deflate(struct lws_context *context,
|
|
|
|
const struct lws_extension *ext,
|
|
|
|
struct lws *wsi,
|
|
|
|
enum lws_extension_callback_reasons reason,
|
|
|
|
void *user, void *in, size_t len)
|
|
|
|
{
|
2016-01-20 17:35:18 +08:00
|
|
|
(void)context;
|
|
|
|
(void)ext;
|
|
|
|
(void)wsi;
|
|
|
|
(void)reason;
|
|
|
|
(void)user;
|
|
|
|
(void)in;
|
|
|
|
(void)len;
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|