2011-03-07 17:54:06 +00:00
|
|
|
/*
|
2010-11-08 17:12:19 +00:00
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
2010-12-19 22:13:26 +00:00
|
|
|
*
|
2017-09-23 12:55:21 +08:00
|
|
|
* Copyright (C) 2010-2017 Andy Green <andy@warmcat.com>
|
2010-11-08 17:12:19 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
2010-10-31 17:51:39 +00:00
|
|
|
*/
|
|
|
|
|
2018-05-03 10:49:36 +08:00
|
|
|
#include "core/private.h"
|
2016-02-29 18:48:55 +08:00
|
|
|
|
|
|
|
#ifdef LWS_HAVE_SYS_TYPES_H
|
2016-02-21 21:25:48 +08:00
|
|
|
#include <sys/types.h>
|
2016-02-29 18:48:55 +08:00
|
|
|
#endif
|
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-12-15 09:58:20 +08:00
|
|
|
#if defined(WIN32) || defined(_WIN32)
|
2017-12-01 11:36:31 +08:00
|
|
|
#include <wincrypt.h>
|
2018-08-29 13:29:07 +02:00
|
|
|
#include <iphlpapi.h>
|
2016-12-15 09:58:20 +08:00
|
|
|
#else
|
|
|
|
#include <net/if.h>
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
int log_level = LLL_ERR | LLL_WARN | LLL_NOTICE;
|
2017-01-17 07:01:02 +08:00
|
|
|
static void (*lwsl_emit)(int level, const char *line)
|
|
|
|
#ifndef LWS_PLAT_OPTEE
|
|
|
|
= lwsl_emit_stderr
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2013-02-11 17:13:32 +08:00
|
|
|
static const char * const log_level_names[] = {
|
2013-01-10 19:50:35 +08:00
|
|
|
"ERR",
|
|
|
|
"WARN",
|
2013-01-19 12:18:07 +08:00
|
|
|
"NOTICE",
|
2013-01-10 19:50:35 +08:00
|
|
|
"INFO",
|
|
|
|
"DEBUG",
|
|
|
|
"PARSER",
|
|
|
|
"HEADER",
|
|
|
|
"EXTENSION",
|
|
|
|
"CLIENT",
|
2013-01-29 12:36:17 +08:00
|
|
|
"LATENCY",
|
2017-02-06 10:16:45 +08:00
|
|
|
"USER",
|
2018-09-04 08:06:46 +08:00
|
|
|
"THREAD",
|
2017-02-06 10:16:45 +08:00
|
|
|
"?",
|
|
|
|
"?"
|
2013-01-10 19:50:35 +08:00
|
|
|
};
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
#if defined (_DEBUG)
|
|
|
|
void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
|
|
|
|
{
|
|
|
|
wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
|
|
|
|
|
|
|
|
lwsl_debug("lwsi_set_role(%p, 0x%x)\n", wsi, wsi->wsistate);
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
|
|
|
|
{
|
|
|
|
wsi->wsistate = (wsi->wsistate & (~LRS_MASK)) | lrs;
|
|
|
|
|
|
|
|
lwsl_debug("lwsi_set_state(%p, 0x%x)\n", wsi, wsi->wsistate);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
signed char char_to_hex(const char c)
|
|
|
|
{
|
|
|
|
if (c >= '0' && c <= '9')
|
|
|
|
return c - '0';
|
|
|
|
|
|
|
|
if (c >= 'a' && c <= 'f')
|
|
|
|
return c - 'a' + 10;
|
|
|
|
|
|
|
|
if (c >= 'A' && c <= 'F')
|
|
|
|
return c - 'A' + 10;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-06-23 12:56:21 +08:00
|
|
|
int lws_open(const char *__file, int __oflag, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
va_start(ap, __oflag);
|
|
|
|
if (((__oflag & O_CREAT) == O_CREAT)
|
|
|
|
#if defined(O_TMPFILE)
|
|
|
|
|| ((__oflag & O_TMPFILE) == O_TMPFILE)
|
|
|
|
#endif
|
|
|
|
)
|
|
|
|
/* last arg is really a mode_t. But windows... */
|
|
|
|
n = open(__file, __oflag, va_arg(ap, uint32_t));
|
|
|
|
else
|
|
|
|
n = open(__file, __oflag);
|
|
|
|
va_end(ap);
|
|
|
|
|
2018-08-01 06:52:03 +08:00
|
|
|
if (n != -1 && lws_plat_apply_FD_CLOEXEC(n)) {
|
|
|
|
close(n);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2018-06-23 12:56:21 +08:00
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
void
|
|
|
|
lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi)
|
|
|
|
{
|
2018-07-19 09:36:20 -05:00
|
|
|
if (wsi->vhost == vh)
|
|
|
|
return;
|
2018-11-15 16:33:54 +08:00
|
|
|
lws_context_lock(vh->context, __func__); /* ---------- context { */
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
wsi->vhost = vh;
|
|
|
|
vh->count_bound_wsi++;
|
2018-11-15 16:33:54 +08:00
|
|
|
lws_context_unlock(vh->context); /* } context ---------- */
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
lwsl_info("%s: vh %s: count_bound_wsi %d\n",
|
|
|
|
__func__, vh->name, vh->count_bound_wsi);
|
|
|
|
assert(wsi->vhost->count_bound_wsi > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_vhost_unbind_wsi(struct lws *wsi)
|
|
|
|
{
|
2018-06-27 07:49:04 +08:00
|
|
|
if (!wsi->vhost)
|
|
|
|
return;
|
|
|
|
|
|
|
|
lws_context_lock(wsi->context, __func__); /* ---------- context { */
|
|
|
|
|
|
|
|
assert(wsi->vhost->count_bound_wsi > 0);
|
|
|
|
wsi->vhost->count_bound_wsi--;
|
|
|
|
lwsl_info("%s: vh %s: count_bound_wsi %d\n", __func__,
|
|
|
|
wsi->vhost->name, wsi->vhost->count_bound_wsi);
|
|
|
|
|
|
|
|
if (!wsi->vhost->count_bound_wsi &&
|
|
|
|
wsi->vhost->being_destroyed) {
|
|
|
|
/*
|
|
|
|
* We have closed all wsi that were bound to this vhost
|
|
|
|
* by any pt: nothing can be servicing any wsi belonging
|
|
|
|
* to it any more.
|
|
|
|
*
|
|
|
|
* Finalize the vh destruction
|
|
|
|
*/
|
|
|
|
__lws_vhost_destroy2(wsi->vhost);
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
}
|
2018-06-27 07:49:04 +08:00
|
|
|
wsi->vhost = NULL;
|
|
|
|
|
|
|
|
lws_context_unlock(wsi->context); /* } context ---------- */
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
}
|
|
|
|
|
2015-06-25 17:51:07 +02:00
|
|
|
void
|
2018-03-05 16:49:28 +08:00
|
|
|
__lws_free_wsi(struct lws *wsi)
|
2015-06-25 17:51:07 +02:00
|
|
|
{
|
|
|
|
if (!wsi)
|
|
|
|
return;
|
2017-07-28 13:12:03 +08:00
|
|
|
|
2017-09-23 12:55:21 +08:00
|
|
|
/*
|
|
|
|
* Protocol user data may be allocated either internally by lws
|
|
|
|
* or by specified the user. We should only free what we allocated.
|
|
|
|
*/
|
2015-12-04 16:54:12 +08:00
|
|
|
if (wsi->protocol && wsi->protocol->per_session_data_size &&
|
|
|
|
wsi->user_space && !wsi->user_space_externally_allocated)
|
2015-06-25 17:51:07 +02:00
|
|
|
lws_free(wsi->user_space);
|
|
|
|
|
2018-04-17 15:35:15 +08:00
|
|
|
lws_buflist_destroy_all_segments(&wsi->buflist);
|
2018-08-20 12:02:26 +08:00
|
|
|
lws_buflist_destroy_all_segments(&wsi->buflist_out);
|
2018-03-24 08:07:00 +08:00
|
|
|
lws_free_set_NULL(wsi->udp);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->vhost && wsi->vhost->lserv_wsi == wsi)
|
|
|
|
wsi->vhost->lserv_wsi = NULL;
|
2018-10-06 08:00:32 +08:00
|
|
|
#if !defined(LWS_NO_CLIENT)
|
|
|
|
lws_dll_lws_remove(&wsi->dll_active_client_conns);
|
|
|
|
#endif
|
|
|
|
wsi->context->count_wsi_allocated--;
|
2018-04-27 19:16:50 +08:00
|
|
|
|
2018-10-06 08:00:32 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
|
|
|
__lws_header_table_detach(wsi, 0);
|
|
|
|
#endif
|
|
|
|
__lws_same_vh_protocol_remove(wsi);
|
|
|
|
#if !defined(LWS_NO_CLIENT)
|
|
|
|
lws_client_stash_destroy(wsi);
|
|
|
|
lws_free_set_NULL(wsi->client_hostname_copy);
|
|
|
|
#endif
|
2018-04-27 19:16:50 +08:00
|
|
|
|
2018-05-02 08:46:16 +08:00
|
|
|
if (wsi->role_ops->destroy_role)
|
|
|
|
wsi->role_ops->destroy_role(wsi);
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
lws_peer_track_wsi_close(wsi->context, wsi->peer);
|
|
|
|
wsi->peer = NULL;
|
|
|
|
#endif
|
|
|
|
|
2017-08-04 13:27:34 +08:00
|
|
|
/* since we will destroy the wsi, make absolutely sure now */
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
#if defined(LWS_WITH_OPENSSL)
|
|
|
|
__lws_ssl_remove_wsi_from_buffered_list(wsi);
|
|
|
|
#endif
|
|
|
|
__lws_remove_from_timeout_list(wsi);
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (wsi->context->event_loop_ops->destroy_wsi)
|
|
|
|
wsi->context->event_loop_ops->destroy_wsi(wsi);
|
2018-01-17 02:05:45 +01:00
|
|
|
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
lws_vhost_unbind_wsi(wsi);
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
lwsl_debug("%s: %p, remaining wsi %d\n", __func__, wsi,
|
|
|
|
wsi->context->count_wsi_allocated);
|
|
|
|
|
2015-06-25 17:51:07 +02:00
|
|
|
lws_free(wsi);
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
void
|
|
|
|
lws_dll_add_front(struct lws_dll *d, struct lws_dll *phead)
|
2017-12-05 20:02:29 +08:00
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
if (d->prev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* our next guy is current first guy */
|
|
|
|
d->next = phead->next;
|
|
|
|
/* if there is a next guy, set his prev ptr to our next ptr */
|
|
|
|
if (d->next)
|
|
|
|
d->next->prev = d;
|
|
|
|
/* our prev ptr is first ptr */
|
|
|
|
d->prev = phead;
|
|
|
|
/* set the first guy to be us */
|
|
|
|
phead->next = d;
|
2017-12-05 20:02:29 +08:00
|
|
|
}
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
/* situation is:
|
|
|
|
*
|
|
|
|
* HEAD: struct lws_dll * = &entry1
|
|
|
|
*
|
|
|
|
* Entry 1: struct lws_dll .pprev = &HEAD , .next = Entry 2
|
|
|
|
* Entry 2: struct lws_dll .pprev = &entry1 , .next = &entry2
|
|
|
|
* Entry 3: struct lws_dll .pprev = &entry2 , .next = NULL
|
|
|
|
*
|
|
|
|
* Delete Entry1:
|
|
|
|
*
|
|
|
|
* - HEAD = &entry2
|
|
|
|
* - Entry2: .pprev = &HEAD, .next = &entry3
|
|
|
|
* - Entry3: .pprev = &entry2, .next = NULL
|
|
|
|
*
|
|
|
|
* Delete Entry2:
|
|
|
|
*
|
|
|
|
* - HEAD = &entry1
|
|
|
|
* - Entry1: .pprev = &HEAD, .next = &entry3
|
|
|
|
* - Entry3: .pprev = &entry1, .next = NULL
|
|
|
|
*
|
|
|
|
* Delete Entry3:
|
|
|
|
*
|
|
|
|
* - HEAD = &entry1
|
|
|
|
* - Entry1: .pprev = &HEAD, .next = &entry2
|
|
|
|
* - Entry2: .pprev = &entry1, .next = NULL
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-02-21 23:38:40 +08:00
|
|
|
void
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_dll_remove(struct lws_dll *d)
|
2016-01-19 04:32:14 +08:00
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
if (!d->prev) /* ie, not part of the list */
|
2016-01-19 04:32:14 +08:00
|
|
|
return;
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
/*
|
|
|
|
* remove us
|
|
|
|
*
|
|
|
|
* USp <-> us <-> USn --> USp <-> USn
|
|
|
|
*/
|
|
|
|
|
2016-02-25 21:39:01 +08:00
|
|
|
/* if we have a next guy, set his prev to our prev */
|
2018-03-19 16:37:37 +08:00
|
|
|
if (d->next)
|
|
|
|
d->next->prev = d->prev;
|
|
|
|
|
2016-02-25 21:39:01 +08:00
|
|
|
/* set our prev guy to our next guy instead of us */
|
2018-03-19 16:37:37 +08:00
|
|
|
if (d->prev)
|
|
|
|
d->prev->next = d->next;
|
2016-01-19 23:11:39 +08:00
|
|
|
|
2016-02-25 21:39:01 +08:00
|
|
|
/* we're out of the list, we should not point anywhere any more */
|
2018-03-19 16:37:37 +08:00
|
|
|
d->prev = NULL;
|
|
|
|
d->next = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
__lws_remove_from_timeout_list(struct lws *wsi)
|
|
|
|
{
|
|
|
|
lws_dll_lws_remove(&wsi->dll_timeout);
|
2018-03-02 14:22:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_remove_from_timeout_list(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
__lws_remove_from_timeout_list(wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_unlock(pt);
|
2016-01-19 04:32:14 +08:00
|
|
|
}
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
void
|
|
|
|
lws_dll_dump(struct lws_dll_lws *head, const char *title)
|
2017-12-05 20:02:29 +08:00
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
int n = 0;
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
(void)n;
|
|
|
|
lwsl_notice("%s: %s (head.next %p)\n", __func__, title, head->next);
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1, head->next) {
|
|
|
|
struct lws *wsi = lws_container_of(d, struct lws, dll_hrtimer);
|
|
|
|
|
|
|
|
(void)wsi;
|
|
|
|
|
|
|
|
lwsl_notice(" %d: wsi %p: %llu\n", n++, wsi,
|
|
|
|
(unsigned long long)wsi->pending_timer);
|
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
2017-12-05 20:02:29 +08:00
|
|
|
}
|
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
void
|
2018-03-19 16:37:37 +08:00
|
|
|
__lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
|
2017-12-05 20:02:29 +08:00
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_dll_lws *dd = &pt->dll_head_hrtimer;
|
|
|
|
struct timeval now;
|
|
|
|
struct lws *wsi1;
|
|
|
|
int bef = 0;
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_dll_lws_remove(&wsi->dll_hrtimer);
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
if (usecs == LWS_SET_TIMER_USEC_CANCEL)
|
2017-12-05 20:02:29 +08:00
|
|
|
return;
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
gettimeofday(&now, NULL);
|
|
|
|
wsi->pending_timer = ((now.tv_sec * 1000000ll) + now.tv_usec) + usecs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we sort the hrtimer list with the earliest timeout first
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
|
|
|
pt->dll_head_hrtimer.next) {
|
|
|
|
dd = d;
|
|
|
|
wsi1 = lws_container_of(d, struct lws, dll_hrtimer);
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
if (wsi1->pending_timer >= wsi->pending_timer) {
|
|
|
|
/* d, dprev's next, is >= our time */
|
|
|
|
bef = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
2017-12-05 20:02:29 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
if (bef) {
|
|
|
|
/*
|
|
|
|
* we go before dd
|
|
|
|
* DDp <-> DD <-> DDn --> DDp <-> us <-> DD <-> DDn
|
|
|
|
*/
|
|
|
|
/* we point forward to dd */
|
|
|
|
wsi->dll_hrtimer.next = dd;
|
|
|
|
/* we point back to what dd used to point back to */
|
|
|
|
wsi->dll_hrtimer.prev = dd->prev;
|
|
|
|
/* DDp points forward to us now */
|
|
|
|
dd->prev->next = &wsi->dll_hrtimer;
|
|
|
|
/* DD points back to us now */
|
|
|
|
dd->prev = &wsi->dll_hrtimer;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* we go after dd
|
|
|
|
* DDp <-> DD <-> DDn --> DDp <-> DD <-> us <-> DDn
|
|
|
|
*/
|
|
|
|
/* we point forward to what dd used to point forward to */
|
|
|
|
wsi->dll_hrtimer.next = dd->next;
|
|
|
|
/* we point back to dd */
|
|
|
|
wsi->dll_hrtimer.prev = dd;
|
|
|
|
/* DDn points back to us */
|
|
|
|
if (dd->next)
|
|
|
|
dd->next->prev = &wsi->dll_hrtimer;
|
|
|
|
/* DD points forward to us */
|
|
|
|
dd->next = &wsi->dll_hrtimer;
|
2017-12-05 20:02:29 +08:00
|
|
|
}
|
2018-03-19 16:37:37 +08:00
|
|
|
|
|
|
|
// lws_dll_dump(&pt->dll_head_hrtimer, "after set_timer_usec");
|
2017-12-05 20:02:29 +08:00
|
|
|
}
|
|
|
|
|
2018-09-04 08:06:46 +08:00
|
|
|
LWS_VISIBLE lws_usec_t
|
|
|
|
lws_now_usecs(void)
|
|
|
|
{
|
|
|
|
struct timeval now;
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL);
|
|
|
|
return (now.tv_sec * 1000000ll) + now.tv_usec;
|
|
|
|
}
|
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
LWS_VISIBLE void
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
|
|
|
|
{
|
|
|
|
__lws_set_timer_usecs(wsi, usecs);
|
|
|
|
}
|
|
|
|
|
|
|
|
lws_usec_t
|
|
|
|
__lws_hrtimer_service(struct lws_context_per_thread *pt)
|
2018-03-05 16:49:28 +08:00
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
struct timeval now;
|
|
|
|
struct lws *wsi;
|
|
|
|
lws_usec_t t;
|
2018-03-05 16:49:28 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
gettimeofday(&now, NULL);
|
|
|
|
t = (now.tv_sec * 1000000ll) + now.tv_usec;
|
|
|
|
|
|
|
|
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
|
|
|
pt->dll_head_hrtimer.next) {
|
|
|
|
wsi = lws_container_of(d, struct lws, dll_hrtimer);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we met one in the future, we are done, because the list
|
|
|
|
* is sorted by time in the future.
|
|
|
|
*/
|
|
|
|
if (wsi->pending_timer > t)
|
|
|
|
break;
|
|
|
|
|
|
|
|
lws_set_timer_usecs(wsi, LWS_SET_TIMER_USEC_CANCEL);
|
|
|
|
|
|
|
|
/* it's time for the timer to be serviced */
|
|
|
|
|
|
|
|
if (wsi->protocol &&
|
|
|
|
wsi->protocol->callback(wsi, LWS_CALLBACK_TIMER,
|
|
|
|
wsi->user_space, NULL, 0))
|
|
|
|
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
|
|
|
"timer cb errored");
|
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
|
|
|
|
|
|
|
/* return an estimate how many us until next timer hit */
|
|
|
|
|
|
|
|
if (!pt->dll_head_hrtimer.next)
|
|
|
|
return LWS_HRTIMER_NOWAIT;
|
|
|
|
|
2018-11-23 08:47:56 +08:00
|
|
|
wsi = lws_container_of(pt->dll_head_hrtimer.next, struct lws,
|
|
|
|
dll_hrtimer);
|
2018-03-19 16:37:37 +08:00
|
|
|
|
|
|
|
gettimeofday(&now, NULL);
|
|
|
|
t = (now.tv_sec * 1000000ll) + now.tv_usec;
|
|
|
|
|
|
|
|
if (wsi->pending_timer < t)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return wsi->pending_timer - t;
|
2018-03-05 16:49:28 +08:00
|
|
|
}
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
void
|
|
|
|
__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|
|
|
{
|
2018-03-19 16:37:37 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
2018-03-02 14:22:49 +08:00
|
|
|
time_t now;
|
|
|
|
|
|
|
|
time(&now);
|
|
|
|
|
2018-08-25 12:27:00 +08:00
|
|
|
lwsl_debug("%s: %p: %d secs (reason %d)\n", __func__, wsi, secs, reason);
|
2018-03-02 14:22:49 +08:00
|
|
|
wsi->pending_timeout_limit = secs;
|
|
|
|
wsi->pending_timeout_set = now;
|
|
|
|
wsi->pending_timeout = reason;
|
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
if (!reason)
|
|
|
|
lws_dll_lws_remove(&wsi->dll_timeout);
|
|
|
|
else
|
|
|
|
lws_dll_lws_add_front(&wsi->dll_timeout, &pt->dll_head_timeout);
|
2018-03-02 14:22:49 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
2017-07-19 04:39:14 +08:00
|
|
|
if (secs == LWS_TO_KILL_SYNC) {
|
|
|
|
lws_remove_from_timeout_list(wsi);
|
|
|
|
lwsl_debug("synchronously killing %p\n", wsi);
|
2018-11-23 08:47:56 +08:00
|
|
|
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
|
|
|
"to sync kill");
|
2017-07-19 04:39:14 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-05 10:16:54 +08:00
|
|
|
if (secs == LWS_TO_KILL_ASYNC)
|
|
|
|
secs = 0;
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
__lws_set_timeout(wsi, reason, secs);
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
}
|
2016-01-19 04:32:14 +08:00
|
|
|
|
2018-11-15 16:33:54 +08:00
|
|
|
/* requires context + vh lock */
|
|
|
|
|
2017-12-07 10:16:17 +08:00
|
|
|
int
|
2018-11-15 16:33:54 +08:00
|
|
|
__lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p)
|
2017-12-07 10:16:17 +08:00
|
|
|
{
|
|
|
|
lws_start_foreach_llp(struct lws_timed_vh_protocol **, pt,
|
|
|
|
vh->timed_vh_protocol_list) {
|
|
|
|
if (*pt == p) {
|
|
|
|
*pt = p->next;
|
|
|
|
lws_free(p);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} lws_end_foreach_llp(pt, next);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-11-15 16:33:54 +08:00
|
|
|
int
|
|
|
|
lws_pthread_self_to_tsi(struct lws_context *context)
|
|
|
|
{
|
|
|
|
#if LWS_MAX_SMP > 1
|
|
|
|
pthread_t ps = pthread_self();
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[0];
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
if (pthread_equal(ps, pt->self))
|
|
|
|
return n;
|
|
|
|
pt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-12-07 10:16:17 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
2018-11-15 16:33:54 +08:00
|
|
|
lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
|
|
|
const struct lws_protocols *prot, int reason,
|
|
|
|
int secs)
|
2017-12-07 10:16:17 +08:00
|
|
|
{
|
|
|
|
struct lws_timed_vh_protocol *p = (struct lws_timed_vh_protocol *)
|
|
|
|
lws_malloc(sizeof(*p), "timed_vh");
|
|
|
|
|
|
|
|
if (!p)
|
|
|
|
return 1;
|
|
|
|
|
2018-11-15 16:33:54 +08:00
|
|
|
p->tsi_req = lws_pthread_self_to_tsi(vh->context);
|
|
|
|
if (p->tsi_req < 0) /* not called from a service thread --> tsi 0 */
|
|
|
|
p->tsi_req = 0;
|
|
|
|
|
|
|
|
lws_context_lock(vh->context, __func__); /* context ----------------- */
|
|
|
|
|
2017-12-07 10:16:17 +08:00
|
|
|
p->protocol = prot;
|
|
|
|
p->reason = reason;
|
|
|
|
p->time = lws_now_secs() + secs;
|
|
|
|
|
2018-11-15 16:33:54 +08:00
|
|
|
lws_vhost_lock(vh); /* vhost ---------------------------------------- */
|
|
|
|
p->next = vh->timed_vh_protocol_list;
|
2017-12-07 10:16:17 +08:00
|
|
|
vh->timed_vh_protocol_list = p;
|
2018-11-15 16:33:54 +08:00
|
|
|
lws_vhost_unlock(vh); /* -------------------------------------- vhost */
|
|
|
|
|
|
|
|
lws_context_unlock(vh->context); /* ------------------------- context */
|
2017-12-07 10:16:17 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-16 09:35:07 +08:00
|
|
|
void
|
2017-03-03 07:36:08 +08:00
|
|
|
lws_remove_child_from_any_parent(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws **pwsi;
|
2017-03-30 08:31:36 +08:00
|
|
|
int seen = 0;
|
2017-03-03 07:36:08 +08:00
|
|
|
|
2017-03-30 08:31:36 +08:00
|
|
|
if (!wsi->parent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* detach ourselves from parent's child list */
|
|
|
|
pwsi = &wsi->parent->child_list;
|
|
|
|
while (*pwsi) {
|
|
|
|
if (*pwsi == wsi) {
|
2017-09-23 12:55:21 +08:00
|
|
|
lwsl_info("%s: detach %p from parent %p\n", __func__,
|
|
|
|
wsi, wsi->parent);
|
2017-07-19 04:39:14 +08:00
|
|
|
|
|
|
|
if (wsi->parent->protocol)
|
|
|
|
wsi->parent->protocol->callback(wsi,
|
|
|
|
LWS_CALLBACK_CHILD_CLOSING,
|
|
|
|
wsi->parent->user_space, wsi, 0);
|
|
|
|
|
2017-03-30 08:31:36 +08:00
|
|
|
*pwsi = wsi->sibling_list;
|
|
|
|
seen = 1;
|
|
|
|
break;
|
2017-03-03 07:36:08 +08:00
|
|
|
}
|
2017-03-30 08:31:36 +08:00
|
|
|
pwsi = &(*pwsi)->sibling_list;
|
2017-03-03 07:36:08 +08:00
|
|
|
}
|
2017-03-30 08:31:36 +08:00
|
|
|
if (!seen)
|
|
|
|
lwsl_err("%s: failed to detach from parent\n", __func__);
|
|
|
|
|
|
|
|
wsi->parent = NULL;
|
2017-03-03 07:36:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-07 16:06:05 +08:00
|
|
|
int
|
2018-11-23 08:47:56 +08:00
|
|
|
lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p,
|
|
|
|
const char *reason)
|
2017-03-07 16:06:05 +08:00
|
|
|
{
|
|
|
|
// if (wsi->protocol == p)
|
|
|
|
// return 0;
|
2017-04-06 13:49:17 +08:00
|
|
|
const struct lws_protocols *vp = wsi->vhost->protocols, *vpo;
|
2017-03-07 16:06:05 +08:00
|
|
|
|
2018-04-03 10:37:14 +08:00
|
|
|
if (wsi->protocol && wsi->protocol_bind_balance) {
|
2018-08-18 14:11:29 +08:00
|
|
|
wsi->protocol->callback(wsi,
|
|
|
|
wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)],
|
2018-09-02 14:35:37 +08:00
|
|
|
wsi->user_space, (void *)reason, 0);
|
2018-04-03 10:37:14 +08:00
|
|
|
wsi->protocol_bind_balance = 0;
|
|
|
|
}
|
2017-03-07 16:06:05 +08:00
|
|
|
if (!wsi->user_space_externally_allocated)
|
|
|
|
lws_free_set_NULL(wsi->user_space);
|
|
|
|
|
2017-04-06 13:49:17 +08:00
|
|
|
lws_same_vh_protocol_remove(wsi);
|
|
|
|
|
2017-03-07 16:06:05 +08:00
|
|
|
wsi->protocol = p;
|
|
|
|
if (!p)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (lws_ensure_user_space(wsi))
|
|
|
|
return 1;
|
|
|
|
|
2017-04-06 13:49:17 +08:00
|
|
|
if (p > vp && p < &vp[wsi->vhost->count_protocols])
|
2017-10-25 08:00:23 +08:00
|
|
|
lws_same_vh_protocol_insert(wsi, (int)(p - vp));
|
2017-04-06 13:49:17 +08:00
|
|
|
else {
|
|
|
|
int n = wsi->vhost->count_protocols;
|
|
|
|
int hit = 0;
|
|
|
|
|
|
|
|
vpo = vp;
|
|
|
|
|
|
|
|
while (n--) {
|
2017-04-07 18:22:54 +08:00
|
|
|
if (p->name && vp->name && !strcmp(p->name, vp->name)) {
|
2017-04-06 13:49:17 +08:00
|
|
|
hit = 1;
|
2017-10-25 08:00:23 +08:00
|
|
|
lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
|
2017-04-06 13:49:17 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
vp++;
|
|
|
|
}
|
|
|
|
if (!hit)
|
2017-09-23 12:55:21 +08:00
|
|
|
lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
|
|
|
|
__func__, p, wsi->vhost->name);
|
2017-04-06 13:49:17 +08:00
|
|
|
}
|
|
|
|
|
2018-08-18 14:11:29 +08:00
|
|
|
if (wsi->protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[
|
|
|
|
!!lwsi_role_server(wsi)],
|
2017-03-07 16:06:05 +08:00
|
|
|
wsi->user_space, NULL, 0))
|
|
|
|
return 1;
|
|
|
|
|
2018-04-03 10:37:14 +08:00
|
|
|
wsi->protocol_bind_balance = 1;
|
|
|
|
|
2017-03-07 16:06:05 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-12-19 22:13:26 +00:00
|
|
|
void
|
2018-11-23 08:47:56 +08:00
|
|
|
__lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason,
|
|
|
|
const char *caller)
|
2010-11-03 11:13:06 +00:00
|
|
|
{
|
2016-01-19 21:32:08 +08:00
|
|
|
struct lws_context_per_thread *pt;
|
2017-03-03 07:36:08 +08:00
|
|
|
struct lws *wsi1, *wsi2;
|
2016-03-02 09:17:22 +08:00
|
|
|
struct lws_context *context;
|
2018-04-20 10:33:23 +08:00
|
|
|
int n;
|
2010-12-18 15:13:50 +00:00
|
|
|
|
2018-03-11 11:26:06 +08:00
|
|
|
lwsl_info("%s: %p: caller: %s\n", __func__, wsi, caller);
|
2017-07-19 04:39:14 +08:00
|
|
|
|
2011-02-14 08:03:48 +00:00
|
|
|
if (!wsi)
|
2010-12-18 15:13:50 +00:00
|
|
|
return;
|
|
|
|
|
2016-04-15 12:00:23 +08:00
|
|
|
lws_access_log(wsi);
|
|
|
|
|
2016-01-19 21:32:08 +08:00
|
|
|
context = wsi->context;
|
|
|
|
pt = &context->pt[(int)wsi->tsi];
|
2017-05-07 10:02:03 +08:00
|
|
|
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_CLOSE, 1);
|
2010-11-03 11:13:06 +00:00
|
|
|
|
2018-03-26 12:05:04 +08:00
|
|
|
#if !defined(LWS_NO_CLIENT)
|
2018-03-27 09:17:19 +08:00
|
|
|
|
|
|
|
lws_free_set_NULL(wsi->client_hostname_copy);
|
2018-03-26 12:05:04 +08:00
|
|
|
/* we are no longer an active client connection that can piggyback */
|
|
|
|
lws_dll_lws_remove(&wsi->dll_active_client_conns);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we have wsi in our transaction queue, if we are closing we
|
|
|
|
* must go through and close all those first
|
|
|
|
*/
|
2018-04-27 09:13:23 +08:00
|
|
|
if (wsi->vhost) {
|
2018-04-30 19:17:32 +08:00
|
|
|
if ((int)reason != -1)
|
|
|
|
lws_vhost_lock(wsi->vhost);
|
2018-04-27 09:13:23 +08:00
|
|
|
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
2018-11-23 08:47:56 +08:00
|
|
|
wsi->dll_client_transaction_queue_head.next) {
|
2018-04-27 09:13:23 +08:00
|
|
|
struct lws *w = lws_container_of(d, struct lws,
|
2018-11-23 08:47:56 +08:00
|
|
|
dll_client_transaction_queue);
|
2018-03-26 12:05:04 +08:00
|
|
|
|
2018-04-30 19:17:32 +08:00
|
|
|
__lws_close_free_wsi(w, -1, "trans q leader closing");
|
2018-04-27 09:13:23 +08:00
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
2018-03-26 12:05:04 +08:00
|
|
|
|
2018-04-27 09:13:23 +08:00
|
|
|
/*
|
2018-11-23 08:47:56 +08:00
|
|
|
* !!! If we are closing, but we have pending pipelined
|
|
|
|
* transaction results we already sent headers for, that's going
|
|
|
|
* to destroy sync for HTTP/1 and leave H2 stream with no live
|
|
|
|
* swsi.
|
2018-04-27 09:13:23 +08:00
|
|
|
*
|
2018-11-23 08:47:56 +08:00
|
|
|
* However this is normal if we are being closed because the
|
|
|
|
* transaction queue leader is closing.
|
2018-04-27 09:13:23 +08:00
|
|
|
*/
|
|
|
|
lws_dll_lws_remove(&wsi->dll_client_transaction_queue);
|
2018-04-30 19:17:32 +08:00
|
|
|
if ((int)reason !=-1)
|
|
|
|
lws_vhost_unlock(wsi->vhost);
|
2018-04-27 09:13:23 +08:00
|
|
|
}
|
2018-03-26 12:05:04 +08:00
|
|
|
#endif
|
|
|
|
|
2016-03-02 09:17:22 +08:00
|
|
|
/* if we have children, close them first */
|
|
|
|
if (wsi->child_list) {
|
|
|
|
wsi2 = wsi->child_list;
|
|
|
|
while (wsi2) {
|
|
|
|
wsi1 = wsi2->sibling_list;
|
2016-03-20 11:59:53 +08:00
|
|
|
wsi2->parent = NULL;
|
|
|
|
/* stop it doing shutdown processing */
|
|
|
|
wsi2->socket_is_permanently_unusable = 1;
|
2018-11-23 08:47:56 +08:00
|
|
|
__lws_close_free_wsi(wsi2, reason,
|
|
|
|
"general child recurse");
|
2016-03-02 09:17:22 +08:00
|
|
|
wsi2 = wsi1;
|
|
|
|
}
|
2016-03-20 11:59:53 +08:00
|
|
|
wsi->child_list = NULL;
|
2016-03-02 09:17:22 +08:00
|
|
|
}
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops == &role_ops_raw_file) {
|
2017-10-13 10:33:02 +08:00
|
|
|
lws_remove_child_from_any_parent(wsi);
|
2018-03-05 16:49:28 +08:00
|
|
|
__remove_wsi_socket_from_fds(wsi);
|
2018-04-11 13:39:42 +08:00
|
|
|
wsi->protocol->callback(wsi, wsi->role_ops->close_cb[0],
|
2017-10-13 10:33:02 +08:00
|
|
|
wsi->user_space, NULL, 0);
|
|
|
|
goto async_close;
|
2017-03-03 07:36:08 +08:00
|
|
|
}
|
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
wsi->wsistate_pre_close = wsi->wsistate;
|
2017-11-24 12:18:49 +08:00
|
|
|
|
2016-02-21 21:25:48 +08:00
|
|
|
#ifdef LWS_WITH_CGI
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops == &role_ops_cgi) {
|
2016-02-21 21:25:48 +08:00
|
|
|
/* we are not a network connection, but a handler for CGI io */
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->parent && wsi->parent->http.cgi) {
|
2017-08-26 12:00:16 +08:00
|
|
|
|
|
|
|
if (wsi->cgi_channel == LWS_STDOUT)
|
|
|
|
lws_cgi_remove_and_kill(wsi->parent);
|
|
|
|
|
2016-04-13 11:49:07 +08:00
|
|
|
/* end the binding between us and master */
|
2018-11-23 08:47:56 +08:00
|
|
|
wsi->parent->http.cgi->stdwsi[(int)wsi->cgi_channel] =
|
|
|
|
NULL;
|
2017-08-26 12:00:16 +08:00
|
|
|
}
|
2016-02-21 21:25:48 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
|
|
|
|
|
|
|
goto just_kill_connection;
|
|
|
|
}
|
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->http.cgi)
|
2017-08-26 12:00:16 +08:00
|
|
|
lws_cgi_remove_and_kill(wsi);
|
2016-02-21 21:25:48 +08:00
|
|
|
#endif
|
|
|
|
|
2017-07-21 21:49:24 +08:00
|
|
|
#if !defined(LWS_NO_CLIENT)
|
2017-12-07 10:09:56 +08:00
|
|
|
lws_client_stash_destroy(wsi);
|
2017-07-21 21:49:24 +08:00
|
|
|
#endif
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops == &role_ops_raw_skt) {
|
2017-02-12 20:32:49 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
|
|
|
goto just_kill_connection;
|
|
|
|
}
|
2018-04-27 19:16:50 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2018-04-11 13:39:42 +08:00
|
|
|
if (lwsi_role_http(wsi) && lwsi_role_server(wsi) &&
|
|
|
|
wsi->http.fop_fd != NULL)
|
2017-12-01 11:09:32 +08:00
|
|
|
lws_vfs_file_close(&wsi->http.fop_fd);
|
2018-04-27 19:16:50 +08:00
|
|
|
#endif
|
2018-04-11 13:39:42 +08:00
|
|
|
|
2018-04-19 10:08:48 +08:00
|
|
|
if (lwsi_state(wsi) == LRS_DEAD_SOCKET)
|
|
|
|
return;
|
|
|
|
|
2015-04-12 08:17:26 +08:00
|
|
|
if (wsi->socket_is_permanently_unusable ||
|
2016-01-26 20:56:56 +08:00
|
|
|
reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY ||
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_state(wsi) == LRS_SHUTDOWN)
|
2014-10-16 08:23:46 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
switch (lwsi_state_PRE_CLOSE(wsi)) {
|
2018-04-02 11:55:17 +08:00
|
|
|
case LRS_DEAD_SOCKET:
|
2011-02-10 09:07:05 +00:00
|
|
|
return;
|
|
|
|
|
2013-02-15 22:48:58 +08:00
|
|
|
/* we tried the polite way... */
|
2018-04-02 11:55:17 +08:00
|
|
|
case LRS_WAITING_TO_SEND_CLOSE:
|
|
|
|
case LRS_AWAITING_CLOSE_ACK:
|
2018-04-20 10:33:23 +08:00
|
|
|
case LRS_RETURNED_CLOSE:
|
2013-02-15 22:48:58 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
case LRS_FLUSHING_BEFORE_CLOSE:
|
http: compression methods
Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION.
This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist
outside that context.
Currently provides 'deflate' and 'br' compression methods for server side only.
'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in
your distro already) and dev package.
Other compression methods can be added nicely using an ops struct.
The built-in file serving stuff will use this is the client says he can handle it, and the
mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of
Javascript.
zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this
is only useful on relatively strong servers with lots of memory. However for some usecases
where you are serving a lot of css and js assets, it's a nice help.
The patch performs special treatment for http/1.1 pipelining, since the compression is
performed on the fly the compressed content-length is not known until the end. So for h1
only, chunked transfer-encoding is automatically added so pipelining can continue of the
connection.
For h2 the chunking is neither supported nor required, so it "just works".
User code can also request to add a compression transform before the reply headers were
sent using the new api
LWS_VISIBLE int
lws_http_compression_apply(struct lws *wsi, const char *name,
unsigned char **p, unsigned char *end, char decomp);
... this allows transparent compression of dynamically generated HTTP. The requested
compression (eg, "deflate") is only applied if the client headers indicated it was
supported, otherwise it's a NOP.
Name may be NULL in which case the first compression method in the internal table at
stream.c that is mentioned as acceptable by the client will be used.
NOTE: the compression translation, same as h2 support, relies on the user code using
LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal
lws fileserving code already does this.
2018-09-02 14:43:05 +08:00
|
|
|
if (lws_has_buffered_out(wsi)
|
|
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|
|
|| wsi->http.comp_ctx.buflist_comp ||
|
|
|
|
wsi->http.comp_ctx.may_have_more
|
|
|
|
#endif
|
|
|
|
) {
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
2014-04-10 14:25:24 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsl_info("%p: end LRS_FLUSHING_BEFORE_CLOSE\n", wsi);
|
2014-04-10 14:25:24 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
default:
|
http: compression methods
Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION.
This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist
outside that context.
Currently provides 'deflate' and 'br' compression methods for server side only.
'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in
your distro already) and dev package.
Other compression methods can be added nicely using an ops struct.
The built-in file serving stuff will use this is the client says he can handle it, and the
mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of
Javascript.
zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this
is only useful on relatively strong servers with lots of memory. However for some usecases
where you are serving a lot of css and js assets, it's a nice help.
The patch performs special treatment for http/1.1 pipelining, since the compression is
performed on the fly the compressed content-length is not known until the end. So for h1
only, chunked transfer-encoding is automatically added so pipelining can continue of the
connection.
For h2 the chunking is neither supported nor required, so it "just works".
User code can also request to add a compression transform before the reply headers were
sent using the new api
LWS_VISIBLE int
lws_http_compression_apply(struct lws *wsi, const char *name,
unsigned char **p, unsigned char *end, char decomp);
... this allows transparent compression of dynamically generated HTTP. The requested
compression (eg, "deflate") is only applied if the client headers indicated it was
supported, otherwise it's a NOP.
Name may be NULL in which case the first compression method in the internal table at
stream.c that is mentioned as acceptable by the client will be used.
NOTE: the compression translation, same as h2 support, relies on the user code using
LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal
lws fileserving code already does this.
2018-09-02 14:43:05 +08:00
|
|
|
if (lws_has_buffered_out(wsi)
|
|
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|
|
|| wsi->http.comp_ctx.buflist_comp ||
|
|
|
|
wsi->http.comp_ctx.may_have_more
|
|
|
|
#endif
|
|
|
|
) {
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsl_info("%p: LRS_FLUSHING_BEFORE_CLOSE\n", wsi);
|
|
|
|
lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
|
2018-03-11 11:26:06 +08:00
|
|
|
__lws_set_timeout(wsi,
|
2017-10-28 07:42:44 +08:00
|
|
|
PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5);
|
2014-04-10 14:25:24 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
if (lwsi_state(wsi) == LRS_WAITING_CONNECT ||
|
|
|
|
lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE)
|
2013-09-20 20:26:12 +08:00
|
|
|
goto just_kill_connection;
|
|
|
|
|
2018-08-18 14:11:29 +08:00
|
|
|
if (!wsi->told_user_closed && wsi->user_space && wsi->protocol &&
|
|
|
|
wsi->protocol_bind_balance) {
|
|
|
|
wsi->protocol->callback(wsi,
|
|
|
|
wsi->role_ops->protocol_unbind_cb[
|
|
|
|
!!lwsi_role_server(wsi)],
|
2018-09-02 14:35:37 +08:00
|
|
|
wsi->user_space, (void *)__func__, 0);
|
2018-08-18 14:11:29 +08:00
|
|
|
wsi->protocol_bind_balance = 0;
|
2016-06-08 10:07:02 +08:00
|
|
|
}
|
2015-01-28 04:15:13 +08:00
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
2015-12-04 08:43:54 +08:00
|
|
|
* signal we are closing, lws_write will
|
2011-03-07 07:08:12 +00:00
|
|
|
* add any necessary version-specific stuff. If the write fails,
|
|
|
|
* no worries we are closing anyway. If we didn't initiate this
|
|
|
|
* close, then our state has been changed to
|
2018-04-02 11:55:17 +08:00
|
|
|
* LRS_RETURNED_CLOSE and we will skip this.
|
2011-03-07 07:08:12 +00:00
|
|
|
*
|
|
|
|
* Likewise if it's a second call to close this connection after we
|
|
|
|
* sent the close indication to the peer already, we are in state
|
2018-04-02 11:55:17 +08:00
|
|
|
* LRS_AWAITING_CLOSE_ACK and will skip doing this a second time.
|
2011-03-07 07:08:12 +00:00
|
|
|
*/
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops->close_via_role_protocol &&
|
|
|
|
wsi->role_ops->close_via_role_protocol(wsi, reason))
|
2017-07-17 10:11:17 +08:00
|
|
|
return;
|
2011-03-07 07:08:12 +00:00
|
|
|
|
2011-03-07 07:08:18 +00:00
|
|
|
just_kill_connection:
|
2016-07-23 14:18:25 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops->close_kill_connection)
|
|
|
|
wsi->role_ops->close_kill_connection(wsi, reason);
|
2018-03-11 11:26:06 +08:00
|
|
|
|
2017-03-03 07:36:08 +08:00
|
|
|
lws_remove_child_from_any_parent(wsi);
|
2017-10-13 10:33:02 +08:00
|
|
|
n = 0;
|
2017-03-03 07:36:08 +08:00
|
|
|
|
2018-04-03 10:37:14 +08:00
|
|
|
if (!wsi->told_user_closed && wsi->user_space &&
|
|
|
|
wsi->protocol_bind_balance) {
|
|
|
|
lwsl_debug("%s: %p: DROP_PROTOCOL %s\n", __func__, wsi,
|
2017-10-15 12:15:28 +08:00
|
|
|
wsi->protocol->name);
|
2018-08-18 14:11:29 +08:00
|
|
|
wsi->protocol->callback(wsi,
|
|
|
|
wsi->role_ops->protocol_unbind_cb[
|
|
|
|
!!lwsi_role_server(wsi)],
|
2018-09-02 14:35:37 +08:00
|
|
|
wsi->user_space, (void *)__func__, 0);
|
2018-04-03 10:37:14 +08:00
|
|
|
wsi->protocol_bind_balance = 0;
|
2017-10-15 12:15:28 +08:00
|
|
|
}
|
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
if ((lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY ||
|
|
|
|
lwsi_state(wsi) == LRS_WAITING_CONNECT) && !wsi->already_did_cce)
|
2018-03-26 12:05:04 +08:00
|
|
|
wsi->protocol->callback(wsi,
|
2018-04-02 11:55:17 +08:00
|
|
|
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
2017-10-15 12:15:28 +08:00
|
|
|
wsi->user_space, NULL, 0);
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
/*
|
|
|
|
* Testing with ab shows that we have to stage the socket close when
|
|
|
|
* the system is under stress... shutdown any further TX, change the
|
|
|
|
* state to one that won't emit anything more, and wait with a timeout
|
|
|
|
* for the POLLIN to show a zero-size rx before coming back and doing
|
|
|
|
* the actual close.
|
|
|
|
*/
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) &&
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_state(wsi) != LRS_SHUTDOWN &&
|
|
|
|
lwsi_state(wsi) != LRS_UNCONNECTED &&
|
2016-02-14 09:27:41 +08:00
|
|
|
reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY &&
|
|
|
|
!wsi->socket_is_permanently_unusable) {
|
2017-10-18 09:41:44 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
#if defined(LWS_WITH_TLS)
|
2018-05-01 12:41:42 +08:00
|
|
|
if (lws_is_ssl(wsi) && wsi->tls.ssl) {
|
2017-10-18 09:41:44 +08:00
|
|
|
n = 0;
|
2018-03-11 11:26:06 +08:00
|
|
|
switch (__lws_tls_shutdown(wsi)) {
|
2017-10-18 09:41:44 +08:00
|
|
|
case LWS_SSL_CAPABLE_DONE:
|
|
|
|
case LWS_SSL_CAPABLE_ERROR:
|
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
|
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
|
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
2017-02-20 05:44:56 +08:00
|
|
|
#endif
|
|
|
|
{
|
2018-11-23 08:47:56 +08:00
|
|
|
lwsl_info("%s: shutdown conn: %p (sk %d, state 0x%x)\n",
|
2017-09-23 12:55:21 +08:00
|
|
|
__func__, wsi, (int)(long)wsi->desc.sockfd,
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_state(wsi));
|
2017-10-13 10:33:02 +08:00
|
|
|
if (!wsi->socket_is_permanently_unusable &&
|
2018-05-06 07:19:21 +08:00
|
|
|
lws_socket_is_valid(wsi->desc.sockfd)) {
|
2018-01-03 09:22:32 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2017-10-13 10:33:02 +08:00
|
|
|
n = shutdown(wsi->desc.sockfd, SHUT_WR);
|
2018-01-03 09:22:32 +08:00
|
|
|
}
|
2017-02-20 05:44:56 +08:00
|
|
|
}
|
2016-01-26 20:56:56 +08:00
|
|
|
if (n)
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsl_debug("closing: shutdown (state 0x%x) ret %d\n",
|
|
|
|
lwsi_state(wsi), LWS_ERRNO);
|
2016-02-24 21:27:46 +08:00
|
|
|
|
2017-09-23 12:55:21 +08:00
|
|
|
/*
|
|
|
|
* This causes problems on WINCE / ESP32 with disconnection
|
|
|
|
* when the events are half closing connection
|
|
|
|
*/
|
2017-03-16 10:46:31 +08:00
|
|
|
#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP32)
|
2016-02-29 10:34:29 +08:00
|
|
|
/* libuv: no event available to guarantee completion */
|
2017-10-13 10:33:02 +08:00
|
|
|
if (!wsi->socket_is_permanently_unusable &&
|
2018-05-06 07:19:21 +08:00
|
|
|
lws_socket_is_valid(wsi->desc.sockfd) &&
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_state(wsi) != LRS_SHUTDOWN &&
|
2018-04-29 10:44:36 +08:00
|
|
|
context->event_loop_ops->periodic_events_available) {
|
2018-03-11 11:26:06 +08:00
|
|
|
__lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_set_state(wsi, LRS_SHUTDOWN);
|
2018-03-11 11:26:06 +08:00
|
|
|
__lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH,
|
2018-04-29 10:44:36 +08:00
|
|
|
context->timeout_secs);
|
2017-03-16 10:46:31 +08:00
|
|
|
|
2016-02-29 10:34:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-02-29 18:48:55 +08:00
|
|
|
#endif
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
|
2017-09-23 12:55:21 +08:00
|
|
|
lwsl_debug("%s: real just_kill_connection: %p (sockfd %d)\n", __func__,
|
|
|
|
wsi, wsi->desc.sockfd);
|
2016-07-23 14:18:25 +08:00
|
|
|
|
2018-09-04 08:06:46 +08:00
|
|
|
#ifdef LWS_WITH_HUBBUB
|
2018-04-27 14:36:10 +08:00
|
|
|
if (wsi->http.rw) {
|
|
|
|
lws_rewrite_destroy(wsi->http.rw);
|
|
|
|
wsi->http.rw = NULL;
|
2016-03-20 11:59:53 +08:00
|
|
|
}
|
2016-03-20 11:55:25 +08:00
|
|
|
#endif
|
2018-09-04 08:06:46 +08:00
|
|
|
|
|
|
|
if (wsi->http.pending_return_headers)
|
|
|
|
lws_free_set_NULL(wsi->http.pending_return_headers);
|
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
|
|
|
* we won't be servicing or receiving anything further from this guy
|
2013-01-17 12:26:48 +08:00
|
|
|
* delete socket from the internal poll list if still present
|
2011-03-07 07:08:12 +00:00
|
|
|
*/
|
2018-03-05 16:49:28 +08:00
|
|
|
__lws_ssl_remove_wsi_from_buffered_list(wsi);
|
|
|
|
__lws_remove_from_timeout_list(wsi);
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_dll_lws_remove(&wsi->dll_hrtimer);
|
2015-01-29 08:36:18 +08:00
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
/* don't repeat event loop stuff */
|
|
|
|
if (wsi->told_event_loop_closed)
|
|
|
|
return;
|
|
|
|
|
2015-12-04 16:54:12 +08:00
|
|
|
/* checking return redundant since we anyway close */
|
2017-02-27 12:55:56 +08:00
|
|
|
if (wsi->desc.sockfd != LWS_SOCK_INVALID)
|
2018-03-05 16:49:28 +08:00
|
|
|
__remove_wsi_socket_from_fds(wsi);
|
2017-07-19 04:39:14 +08:00
|
|
|
else
|
2018-10-06 08:00:32 +08:00
|
|
|
__lws_same_vh_protocol_remove(wsi);
|
2011-02-14 08:03:48 +00:00
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsi_set_state(wsi, LRS_DEAD_SOCKET);
|
2018-04-17 15:35:15 +08:00
|
|
|
lws_buflist_destroy_all_segments(&wsi->buflist);
|
|
|
|
lws_dll_lws_remove(&wsi->dll_buflist);
|
2017-11-14 07:35:05 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (wsi->role_ops->close_role)
|
|
|
|
wsi->role_ops->close_role(pt, wsi);
|
2016-01-11 11:34:01 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
/* tell the user it's all over for this guy */
|
2014-10-08 12:00:53 +08:00
|
|
|
|
2018-08-15 12:49:32 +08:00
|
|
|
if ((lwsi_state_est_PRE_CLOSE(wsi) ||
|
|
|
|
lwsi_state_PRE_CLOSE(wsi) == LRS_WAITING_SERVER_REPLY) &&
|
|
|
|
!wsi->told_user_closed &&
|
2018-04-11 13:39:42 +08:00
|
|
|
wsi->role_ops->close_cb[lwsi_role_server(wsi)]) {
|
|
|
|
const struct lws_protocols *pro = wsi->protocol;
|
2015-12-04 16:54:12 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
if (!wsi->protocol)
|
|
|
|
pro = &wsi->vhost->protocols[0];
|
2018-05-02 08:46:16 +08:00
|
|
|
|
2018-05-24 11:26:09 +08:00
|
|
|
if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi))
|
|
|
|
/*
|
|
|
|
* The network wsi for a client h2 connection shouldn't
|
|
|
|
* call back for its role: the child stream connections
|
|
|
|
* own the role. Otherwise h2 will call back closed
|
|
|
|
* one too many times as the children do it and then
|
|
|
|
* the closing network stream.
|
|
|
|
*/
|
|
|
|
pro->callback(wsi,
|
2018-04-11 13:39:42 +08:00
|
|
|
wsi->role_ops->close_cb[lwsi_role_server(wsi)],
|
|
|
|
wsi->user_space, NULL, 0);
|
|
|
|
wsi->told_user_closed = 1;
|
2013-02-06 21:10:16 +09:00
|
|
|
}
|
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
async_close:
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (wsi->context->event_loop_ops->wsi_logical_close)
|
|
|
|
if (wsi->context->event_loop_ops->wsi_logical_close(wsi))
|
2017-07-19 04:39:14 +08:00
|
|
|
return;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
__lws_close_free_wsi_final(wsi);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-03-05 16:49:28 +08:00
|
|
|
__lws_close_free_wsi_final(struct lws *wsi)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2018-10-02 10:50:24 +08:00
|
|
|
if (!wsi->shadow &&
|
|
|
|
lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) {
|
2018-06-16 09:35:07 +08:00
|
|
|
lwsl_debug("%s: wsi %p: fd %d\n", __func__, wsi, wsi->desc.sockfd);
|
2017-02-27 12:55:56 +08:00
|
|
|
n = compatible_close(wsi->desc.sockfd);
|
2014-04-12 10:07:02 +08:00
|
|
|
if (n)
|
|
|
|
lwsl_debug("closing: close ret %d\n", LWS_ERRNO);
|
2015-11-02 20:34:12 +08:00
|
|
|
|
2017-02-27 12:55:56 +08:00
|
|
|
wsi->desc.sockfd = LWS_SOCK_INVALID;
|
2010-11-08 17:03:03 +00:00
|
|
|
}
|
2014-02-15 19:25:50 +08:00
|
|
|
|
|
|
|
/* outermost destroy notification for wsi (user_space still intact) */
|
2017-10-24 11:59:44 +08:00
|
|
|
if (wsi->vhost)
|
|
|
|
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY,
|
|
|
|
wsi->user_space, NULL, 0);
|
2014-02-15 19:25:50 +08:00
|
|
|
|
2016-04-13 11:42:53 +08:00
|
|
|
#ifdef LWS_WITH_CGI
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->http.cgi) {
|
2017-07-28 18:01:14 +02:00
|
|
|
|
|
|
|
for (n = 0; n < 3; n++) {
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->http.cgi->pipe_fds[n][!!(n == 0)] == 0)
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lwsl_err("ZERO FD IN CGI CLOSE");
|
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
if (wsi->http.cgi->pipe_fds[n][!!(n == 0)] >= 0)
|
|
|
|
close(wsi->http.cgi->pipe_fds[n][!!(n == 0)]);
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
}
|
2016-04-20 06:10:56 +08:00
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
lws_free(wsi->http.cgi);
|
2016-04-13 11:42:53 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
__lws_free_wsi(wsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
__lws_close_free_wsi(wsi, reason, caller);
|
|
|
|
lws_pt_unlock(pt);
|
2010-11-03 11:13:06 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
/* lws_buflist */
|
|
|
|
|
|
|
|
int
|
2018-04-17 15:35:15 +08:00
|
|
|
lws_buflist_append_segment(struct lws_buflist **head, const uint8_t *buf,
|
|
|
|
size_t len)
|
2018-04-13 16:01:38 +08:00
|
|
|
{
|
2018-04-28 07:49:31 +08:00
|
|
|
struct lws_buflist *nbuf;
|
2018-04-13 16:01:38 +08:00
|
|
|
int first = !*head;
|
2018-04-17 11:43:20 +08:00
|
|
|
void *p = *head;
|
2018-04-28 07:49:31 +08:00
|
|
|
int sanity = 1024;
|
2018-04-13 16:01:38 +08:00
|
|
|
|
|
|
|
assert(buf);
|
|
|
|
assert(len);
|
|
|
|
|
|
|
|
/* append at the tail */
|
2018-04-28 07:49:31 +08:00
|
|
|
while (*head) {
|
|
|
|
if (!--sanity || head == &((*head)->next)) {
|
|
|
|
lwsl_err("%s: corrupt list points to self\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-04-13 16:01:38 +08:00
|
|
|
head = &((*head)->next);
|
2018-04-28 07:49:31 +08:00
|
|
|
}
|
2018-04-13 16:01:38 +08:00
|
|
|
|
2018-04-17 11:43:20 +08:00
|
|
|
lwsl_info("%s: len %u first %d %p\n", __func__, (uint32_t)len, first, p);
|
2018-04-13 16:01:38 +08:00
|
|
|
|
2018-11-23 08:47:56 +08:00
|
|
|
nbuf = (struct lws_buflist *)lws_malloc(sizeof(**head) + len, __func__);
|
2018-04-28 07:49:31 +08:00
|
|
|
if (!nbuf) {
|
2018-04-13 16:01:38 +08:00
|
|
|
lwsl_err("%s: OOM\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-04-28 07:49:31 +08:00
|
|
|
nbuf->len = len;
|
|
|
|
nbuf->pos = 0;
|
|
|
|
nbuf->next = NULL;
|
2018-04-13 16:01:38 +08:00
|
|
|
|
2018-04-28 07:49:31 +08:00
|
|
|
p = (void *)nbuf->buf;
|
2018-04-13 16:01:38 +08:00
|
|
|
memcpy(p, buf, len);
|
|
|
|
|
2018-04-28 07:49:31 +08:00
|
|
|
*head = nbuf;
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
return first; /* returns 1 if first segment just created */
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_buflist_destroy_segment(struct lws_buflist **head)
|
|
|
|
{
|
|
|
|
struct lws_buflist *old = *head;
|
|
|
|
|
|
|
|
assert(*head);
|
|
|
|
*head = (*head)->next;
|
2018-04-28 07:49:31 +08:00
|
|
|
old->next = NULL;
|
2018-04-13 16:01:38 +08:00
|
|
|
lws_free(old);
|
|
|
|
|
|
|
|
return !*head; /* returns 1 if last segment just destroyed */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_buflist_destroy_all_segments(struct lws_buflist **head)
|
|
|
|
{
|
|
|
|
struct lws_buflist *p = *head, *p1;
|
|
|
|
|
|
|
|
while (p) {
|
|
|
|
p1 = p->next;
|
2018-04-28 07:49:31 +08:00
|
|
|
p->next = NULL;
|
2018-04-13 16:01:38 +08:00
|
|
|
lws_free(p);
|
|
|
|
p = p1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*head = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
lws_buflist_next_segment_len(struct lws_buflist **head, uint8_t **buf)
|
|
|
|
{
|
|
|
|
if (!*head) {
|
|
|
|
if (buf)
|
|
|
|
*buf = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(*head)->len && (*head)->next)
|
|
|
|
lws_buflist_destroy_segment(head);
|
|
|
|
|
|
|
|
if (!*head) {
|
|
|
|
if (buf)
|
|
|
|
*buf = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert((*head)->pos < (*head)->len);
|
|
|
|
|
|
|
|
if (buf)
|
|
|
|
*buf = (*head)->buf + (*head)->pos;
|
|
|
|
|
|
|
|
return (*head)->len - (*head)->pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_buflist_use_segment(struct lws_buflist **head, size_t len)
|
|
|
|
{
|
|
|
|
assert(*head);
|
|
|
|
assert(len);
|
|
|
|
assert((*head)->pos + len <= (*head)->len);
|
|
|
|
|
|
|
|
(*head)->pos += len;
|
|
|
|
if ((*head)->pos == (*head)->len)
|
|
|
|
lws_buflist_destroy_segment(head);
|
|
|
|
|
|
|
|
if (!*head)
|
|
|
|
return 0;
|
|
|
|
|
2018-04-16 19:52:28 +08:00
|
|
|
return (int)((*head)->len - (*head)->pos);
|
2018-04-13 16:01:38 +08:00
|
|
|
}
|
|
|
|
|
2018-04-17 15:35:15 +08:00
|
|
|
void
|
|
|
|
lws_buflist_describe(struct lws_buflist **head, void *id)
|
|
|
|
{
|
2018-04-30 19:17:32 +08:00
|
|
|
struct lws_buflist *old;
|
2018-04-17 15:35:15 +08:00
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
if (*head == NULL)
|
|
|
|
lwsl_notice("%p: buflist empty\n", id);
|
|
|
|
|
|
|
|
while (*head) {
|
|
|
|
lwsl_notice("%p: %d: %llu / %llu (%llu left)\n", id, n,
|
|
|
|
(unsigned long long)(*head)->pos,
|
|
|
|
(unsigned long long)(*head)->len,
|
|
|
|
(unsigned long long)(*head)->len - (*head)->pos);
|
2018-04-30 19:17:32 +08:00
|
|
|
old = *head;
|
2018-04-17 15:35:15 +08:00
|
|
|
head = &((*head)->next);
|
2018-04-30 19:17:32 +08:00
|
|
|
if (*head == old) {
|
|
|
|
lwsl_err("%s: next points to self\n", __func__);
|
|
|
|
break;
|
|
|
|
}
|
2018-04-17 15:35:15 +08:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
/* ... */
|
|
|
|
|
2016-06-03 09:04:15 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
|
|
|
|
{
|
2017-10-25 08:00:23 +08:00
|
|
|
int n = 0, sl = (int)strlen(name);
|
2016-06-03 09:04:15 +08:00
|
|
|
|
|
|
|
while (lws_hdr_copy_fragment(wsi, buf, len,
|
|
|
|
WSI_TOKEN_HTTP_URI_ARGS, n) >= 0) {
|
|
|
|
|
|
|
|
if (!strncmp(buf, name, sl))
|
|
|
|
return buf + sl;
|
|
|
|
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
#if !defined(LWS_WITH_ESP32)
|
2016-01-16 12:09:38 +08:00
|
|
|
LWS_VISIBLE int
|
2017-09-23 12:55:21 +08:00
|
|
|
interface_to_sa(struct lws_vhost *vh, const char *ifname,
|
|
|
|
struct sockaddr_in *addr, size_t addrlen)
|
2016-01-16 12:09:38 +08:00
|
|
|
{
|
|
|
|
int ipv6 = 0;
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-06-03 21:19:40 +08:00
|
|
|
ipv6 = LWS_IPV6_ENABLED(vh);
|
2016-01-16 12:09:38 +08:00
|
|
|
#endif
|
2016-06-03 21:19:40 +08:00
|
|
|
(void)vh;
|
2016-01-16 12:09:38 +08:00
|
|
|
|
|
|
|
return lws_interface_to_sa(ipv6, ifname, addr, addrlen);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-17 07:01:02 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2016-06-03 21:19:40 +08:00
|
|
|
static int
|
|
|
|
lws_get_addresses(struct lws_vhost *vh, void *ads, char *name,
|
2015-12-04 11:30:53 +08:00
|
|
|
int name_len, char *rip, int rip_len)
|
2015-01-28 21:03:49 +08:00
|
|
|
{
|
|
|
|
struct addrinfo ai, *res;
|
2015-10-15 13:02:03 +03:00
|
|
|
struct sockaddr_in addr4;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
2017-07-28 14:19:24 +08:00
|
|
|
rip[0] = '\0';
|
2015-01-28 21:03:49 +08:00
|
|
|
name[0] = '\0';
|
2015-10-15 13:02:03 +03:00
|
|
|
addr4.sin_family = AF_UNSPEC;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-06-03 21:19:40 +08:00
|
|
|
if (LWS_IPV6_ENABLED(vh)) {
|
2017-09-23 12:55:21 +08:00
|
|
|
if (!lws_plat_inet_ntop(AF_INET6,
|
|
|
|
&((struct sockaddr_in6 *)ads)->sin6_addr,
|
|
|
|
rip, rip_len)) {
|
2017-02-14 17:55:13 +08:00
|
|
|
lwsl_err("inet_ntop: %s", strerror(LWS_ERRNO));
|
2015-01-28 21:03:49 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip off the IPv4 to IPv6 header if one exists
|
|
|
|
if (strncmp(rip, "::ffff:", 7) == 0)
|
|
|
|
memmove(rip, rip + 7, strlen(rip) - 6);
|
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
getnameinfo((struct sockaddr *)ads, sizeof(struct sockaddr_in6),
|
|
|
|
name, name_len, NULL, 0, 0);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2015-10-15 13:02:03 +03:00
|
|
|
struct addrinfo *result;
|
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
memset(&ai, 0, sizeof ai);
|
|
|
|
ai.ai_family = PF_UNSPEC;
|
|
|
|
ai.ai_socktype = SOCK_STREAM;
|
2017-02-18 17:26:40 +08:00
|
|
|
#if !defined(LWS_WITH_ESP32)
|
2015-01-28 21:03:49 +08:00
|
|
|
if (getnameinfo((struct sockaddr *)ads,
|
|
|
|
sizeof(struct sockaddr_in),
|
|
|
|
name, name_len, NULL, 0, 0))
|
|
|
|
return -1;
|
2017-02-18 17:26:40 +08:00
|
|
|
#endif
|
2015-01-28 21:03:49 +08:00
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
if (getaddrinfo(name, NULL, &ai, &result))
|
2015-01-28 21:03:49 +08:00
|
|
|
return -1;
|
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
res = result;
|
|
|
|
while (addr4.sin_family == AF_UNSPEC && res) {
|
2015-01-28 21:03:49 +08:00
|
|
|
switch (res->ai_family) {
|
|
|
|
case AF_INET:
|
2017-09-23 12:55:21 +08:00
|
|
|
addr4.sin_addr =
|
|
|
|
((struct sockaddr_in *)res->ai_addr)->sin_addr;
|
2015-10-15 13:02:03 +03:00
|
|
|
addr4.sin_family = AF_INET;
|
2015-01-28 21:03:49 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = res->ai_next;
|
|
|
|
}
|
2015-10-15 13:02:03 +03:00
|
|
|
freeaddrinfo(result);
|
2015-01-28 21:03:49 +08:00
|
|
|
}
|
|
|
|
|
2015-10-15 13:02:03 +03:00
|
|
|
if (addr4.sin_family == AF_UNSPEC)
|
2015-01-28 21:03:49 +08:00
|
|
|
return -1;
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
if (lws_plat_inet_ntop(AF_INET, &addr4.sin_addr, rip, rip_len) == NULL)
|
|
|
|
return -1;
|
2015-01-28 21:03:49 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:01:02 +08:00
|
|
|
|
2016-05-25 08:04:52 +08:00
|
|
|
LWS_VISIBLE const char *
|
2016-04-15 12:00:23 +08:00
|
|
|
lws_get_peer_simple(struct lws *wsi, char *name, int namelen)
|
|
|
|
{
|
|
|
|
socklen_t len, olen;
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-04-15 12:00:23 +08:00
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
#endif
|
|
|
|
struct sockaddr_in sin4;
|
|
|
|
int af = AF_INET;
|
|
|
|
void *p, *q;
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
wsi = lws_get_network_wsi(wsi);
|
2017-10-13 10:33:02 +08:00
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-06-03 21:19:40 +08:00
|
|
|
if (LWS_IPV6_ENABLED(wsi->vhost)) {
|
2016-04-15 12:00:23 +08:00
|
|
|
len = sizeof(sin6);
|
|
|
|
p = &sin6;
|
|
|
|
af = AF_INET6;
|
|
|
|
q = &sin6.sin6_addr;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
len = sizeof(sin4);
|
|
|
|
p = &sin4;
|
|
|
|
q = &sin4.sin_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
olen = len;
|
2017-02-27 12:55:56 +08:00
|
|
|
if (getpeername(wsi->desc.sockfd, p, &len) < 0 || len > olen) {
|
2016-04-15 12:00:23 +08:00
|
|
|
lwsl_warn("getpeername: %s\n", strerror(LWS_ERRNO));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-04-17 19:26:04 -03:00
|
|
|
return lws_plat_inet_ntop(af, q, name, namelen);
|
2016-04-15 12:00:23 +08:00
|
|
|
}
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2016-04-15 12:00:23 +08:00
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE void
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_get_peer_addresses(struct lws *wsi, lws_sockfd_type fd, char *name,
|
|
|
|
int name_len, char *rip, int rip_len)
|
2011-02-13 08:37:12 +00:00
|
|
|
{
|
2017-01-17 07:01:02 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2013-04-25 09:16:30 +08:00
|
|
|
socklen_t len;
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2014-03-24 16:09:25 +08:00
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
#endif
|
|
|
|
struct sockaddr_in sin4;
|
2015-12-16 18:19:08 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2014-03-24 16:09:25 +08:00
|
|
|
int ret = -1;
|
2015-01-28 21:03:49 +08:00
|
|
|
void *p;
|
2011-02-13 08:37:12 +00:00
|
|
|
|
|
|
|
rip[0] = '\0';
|
|
|
|
name[0] = '\0';
|
|
|
|
|
2013-01-30 08:12:20 +08:00
|
|
|
lws_latency_pre(context, wsi);
|
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-06-03 21:19:40 +08:00
|
|
|
if (LWS_IPV6_ENABLED(wsi->vhost)) {
|
2014-03-24 16:09:25 +08:00
|
|
|
len = sizeof(sin6);
|
2015-01-28 21:03:49 +08:00
|
|
|
p = &sin6;
|
2014-03-24 16:09:25 +08:00
|
|
|
} else
|
2011-03-10 18:14:01 +00:00
|
|
|
#endif
|
2014-03-24 16:09:25 +08:00
|
|
|
{
|
|
|
|
len = sizeof(sin4);
|
2015-01-28 21:03:49 +08:00
|
|
|
p = &sin4;
|
|
|
|
}
|
2011-02-13 08:37:12 +00:00
|
|
|
|
2015-01-28 21:03:49 +08:00
|
|
|
if (getpeername(fd, p, &len) < 0) {
|
|
|
|
lwsl_warn("getpeername: %s\n", strerror(LWS_ERRNO));
|
|
|
|
goto bail;
|
2011-02-13 08:37:12 +00:00
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-06-03 21:19:40 +08:00
|
|
|
ret = lws_get_addresses(wsi->vhost, p, name, name_len, rip, rip_len);
|
2013-01-30 08:12:20 +08:00
|
|
|
|
|
|
|
bail:
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_latency(context, wsi, "lws_get_peer_addresses", ret, 1);
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2015-11-02 20:34:12 +08:00
|
|
|
(void)wsi;
|
|
|
|
(void)fd;
|
|
|
|
(void)name;
|
|
|
|
(void)name_len;
|
|
|
|
(void)rip;
|
|
|
|
(void)rip_len;
|
2017-01-17 07:01:02 +08:00
|
|
|
|
2011-02-13 08:37:12 +00:00
|
|
|
}
|
2011-02-12 11:57:45 +00:00
|
|
|
|
2017-08-29 15:37:16 +08:00
|
|
|
LWS_EXTERN void *
|
|
|
|
lws_vhost_user(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->user;
|
|
|
|
}
|
|
|
|
|
2012-10-19 11:21:56 +02:00
|
|
|
LWS_EXTERN void *
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_context_user(struct lws_context *context)
|
2012-10-19 11:21:56 +02:00
|
|
|
{
|
2013-02-11 17:13:32 +08:00
|
|
|
return context->user_space;
|
2012-10-19 11:21:56 +02:00
|
|
|
}
|
|
|
|
|
2016-04-06 16:15:40 +08:00
|
|
|
LWS_VISIBLE struct lws_vhost *
|
|
|
|
lws_vhost_get(struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->vhost;
|
|
|
|
}
|
|
|
|
|
2016-05-17 13:47:44 +08:00
|
|
|
LWS_VISIBLE struct lws_vhost *
|
|
|
|
lws_get_vhost(struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->vhost;
|
|
|
|
}
|
|
|
|
|
2016-04-06 16:15:40 +08:00
|
|
|
LWS_VISIBLE const struct lws_protocols *
|
|
|
|
lws_protocol_get(struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->protocol;
|
|
|
|
}
|
|
|
|
|
2018-03-24 08:07:00 +08:00
|
|
|
LWS_VISIBLE const struct lws_udp *
|
|
|
|
lws_get_udp(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->udp;
|
|
|
|
}
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
LWS_VISIBLE struct lws *
|
|
|
|
lws_get_network_wsi(struct lws *wsi)
|
|
|
|
{
|
|
|
|
if (!wsi)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_HTTP2)
|
2018-11-12 15:24:42 +08:00
|
|
|
if (!wsi->http2_substream
|
|
|
|
#if !defined(LWS_NO_CLIENT)
|
|
|
|
&& !wsi->client_h2_substream
|
|
|
|
#endif
|
|
|
|
)
|
2017-10-13 10:33:02 +08:00
|
|
|
return wsi;
|
|
|
|
|
2017-12-01 11:09:32 +08:00
|
|
|
while (wsi->h2.parent_wsi)
|
|
|
|
wsi = wsi->h2.parent_wsi;
|
2017-10-13 10:33:02 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return wsi;
|
|
|
|
}
|
|
|
|
|
2018-12-04 08:03:58 +08:00
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_explicit_bzero(void *p, size_t len)
|
|
|
|
{
|
|
|
|
volatile uint8_t *vp = p;
|
|
|
|
|
|
|
|
while (len--)
|
|
|
|
*vp++ = 0;
|
|
|
|
}
|
|
|
|
|
2018-12-01 06:45:23 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE int LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_raw_transaction_completed(struct lws *wsi)
|
|
|
|
{
|
|
|
|
if (lws_has_buffered_out(wsi)) {
|
|
|
|
/*
|
|
|
|
* ...so he tried to send something large, but it went out
|
|
|
|
* as a partial, but he immediately called us to say he wants
|
|
|
|
* to close the connection.
|
|
|
|
*
|
|
|
|
* Defer the close until the last part of the partial is sent.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
lwsl_debug("%s: %p: deferring due to partial\n", __func__, wsi);
|
|
|
|
wsi->close_when_buffered_out_drained = 1;
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-02-15 09:12:39 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN const struct lws_protocols *
|
|
|
|
lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < vh->count_protocols; n++)
|
|
|
|
if (!strcmp(name, vh->protocols[n].name))
|
|
|
|
return &vh->protocols[n];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-15 16:36:38 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-11 10:45:35 +08:00
|
|
|
lws_callback_all_protocol(struct lws_context *context,
|
|
|
|
const struct lws_protocols *protocol, int reason)
|
2014-02-15 16:36:38 +08:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[0];
|
2016-01-26 20:56:56 +08:00
|
|
|
unsigned int n, m = context->count_threads;
|
2016-01-29 21:18:54 +08:00
|
|
|
struct lws *wsi;
|
2016-01-19 03:34:24 +08:00
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
for (n = 0; n < pt->fds_count; n++) {
|
|
|
|
wsi = wsi_from_fd(context, pt->fds[n].fd);
|
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
if (wsi->protocol == protocol)
|
2016-01-29 21:18:54 +08:00
|
|
|
protocol->callback(wsi, reason, wsi->user_space,
|
|
|
|
NULL, 0);
|
2016-01-19 03:34:24 +08:00
|
|
|
}
|
|
|
|
pt++;
|
2014-02-15 16:36:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-06 16:15:40 +08:00
|
|
|
LWS_VISIBLE int
|
2017-08-05 10:38:59 +08:00
|
|
|
lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
|
|
|
|
const struct lws_protocols *protocol, int reason,
|
|
|
|
void *argp, size_t len)
|
2016-04-06 16:15:40 +08:00
|
|
|
{
|
|
|
|
struct lws_context *context = vh->context;
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[0];
|
|
|
|
unsigned int n, m = context->count_threads;
|
|
|
|
struct lws *wsi;
|
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
for (n = 0; n < pt->fds_count; n++) {
|
|
|
|
wsi = wsi_from_fd(context, pt->fds[n].fd);
|
|
|
|
if (!wsi)
|
|
|
|
continue;
|
2017-08-05 10:38:59 +08:00
|
|
|
if (wsi->vhost == vh && (wsi->protocol == protocol ||
|
|
|
|
!protocol))
|
|
|
|
wsi->protocol->callback(wsi, reason,
|
|
|
|
wsi->user_space, argp, len);
|
2016-04-06 16:15:40 +08:00
|
|
|
}
|
|
|
|
pt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-05 10:38:59 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_callback_all_protocol_vhost(struct lws_vhost *vh,
|
|
|
|
const struct lws_protocols *protocol, int reason)
|
|
|
|
{
|
|
|
|
return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2016-06-15 10:46:58 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, int len)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < wsi->vhost->count_protocols; n++)
|
|
|
|
if (wsi->vhost->protocols[n].callback(wsi, reason, NULL, in, len))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-07 10:16:17 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
|
|
|
|
|
|
|
|
wsi->context = vh->context;
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
lws_vhost_bind_wsi(vh, wsi);
|
2017-12-07 10:16:17 +08:00
|
|
|
|
|
|
|
for (n = 0; n < wsi->vhost->count_protocols; n++) {
|
|
|
|
wsi->protocol = &vh->protocols[n];
|
|
|
|
if (wsi->protocol->callback(wsi, reason, NULL, in, len)) {
|
|
|
|
lws_free(wsi);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lws_free(wsi);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-18 17:26:40 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
2017-03-08 11:11:41 +08:00
|
|
|
lws_set_fops(struct lws_context *context, const struct lws_plat_file_ops *fops)
|
2017-02-18 17:26:40 +08:00
|
|
|
{
|
2017-03-08 11:11:41 +08:00
|
|
|
context->fops = fops;
|
2017-03-03 12:38:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN lws_filepos_t
|
|
|
|
lws_vfs_tell(lws_fop_fd_t fop_fd)
|
|
|
|
{
|
|
|
|
return fop_fd->pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN lws_filepos_t
|
|
|
|
lws_vfs_get_length(lws_fop_fd_t fop_fd)
|
|
|
|
{
|
|
|
|
return fop_fd->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN uint32_t
|
|
|
|
lws_vfs_get_mod_time(lws_fop_fd_t fop_fd)
|
|
|
|
{
|
|
|
|
return fop_fd->mod_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE lws_fileofs_t
|
|
|
|
lws_vfs_file_seek_set(lws_fop_fd_t fop_fd, lws_fileofs_t offset)
|
|
|
|
{
|
|
|
|
lws_fileofs_t ofs;
|
2017-09-23 12:55:21 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
ofs = fop_fd->fops->LWS_FOP_SEEK_CUR(fop_fd, offset - fop_fd->pos);
|
2017-09-23 12:55:21 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
return ofs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
LWS_VISIBLE lws_fileofs_t
|
|
|
|
lws_vfs_file_seek_end(lws_fop_fd_t fop_fd, lws_fileofs_t offset)
|
|
|
|
{
|
2017-09-23 12:55:21 +08:00
|
|
|
return fop_fd->fops->LWS_FOP_SEEK_CUR(fop_fd, fop_fd->len +
|
|
|
|
fop_fd->pos + offset);
|
2017-03-01 14:28:56 +08:00
|
|
|
}
|
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
|
|
|
|
const struct lws_plat_file_ops *
|
|
|
|
lws_vfs_select_fops(const struct lws_plat_file_ops *fops, const char *vfs_path,
|
|
|
|
const char **vpath)
|
2017-03-01 14:28:56 +08:00
|
|
|
{
|
2017-03-03 12:38:10 +08:00
|
|
|
const struct lws_plat_file_ops *pf;
|
|
|
|
const char *p = vfs_path;
|
|
|
|
int n;
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
*vpath = NULL;
|
|
|
|
|
|
|
|
/* no non-platform fops, just use that */
|
|
|
|
|
|
|
|
if (!fops->next)
|
|
|
|
return fops;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scan the vfs path looking for indications we are to be
|
|
|
|
* handled by a specific fops
|
|
|
|
*/
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2017-03-08 11:11:41 +08:00
|
|
|
while (p && *p) {
|
2017-03-03 12:38:10 +08:00
|
|
|
if (*p != '/') {
|
|
|
|
p++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* the first one is always platform fops, so skip */
|
|
|
|
pf = fops->next;
|
|
|
|
while (pf) {
|
|
|
|
n = 0;
|
2018-08-16 19:10:32 +08:00
|
|
|
while (n < (int)LWS_ARRAY_SIZE(pf->fi) && pf->fi[n].sig) {
|
2017-03-03 12:38:10 +08:00
|
|
|
if (p >= vfs_path + pf->fi[n].len)
|
|
|
|
if (!strncmp(p - (pf->fi[n].len - 1),
|
2018-11-23 08:47:56 +08:00
|
|
|
pf->fi[n].sig,
|
|
|
|
pf->fi[n].len - 1)) {
|
2017-03-03 12:38:10 +08:00
|
|
|
*vpath = p + 1;
|
|
|
|
return pf;
|
|
|
|
}
|
|
|
|
|
|
|
|
n++;
|
2017-03-01 14:28:56 +08:00
|
|
|
}
|
2017-03-03 12:38:10 +08:00
|
|
|
pf = pf->next;
|
|
|
|
}
|
|
|
|
p++;
|
2017-03-01 14:28:56 +08:00
|
|
|
}
|
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
return fops;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN lws_fop_fd_t LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_vfs_file_open(const struct lws_plat_file_ops *fops, const char *vfs_path,
|
|
|
|
lws_fop_flags_t *flags)
|
|
|
|
{
|
2017-03-08 11:11:41 +08:00
|
|
|
const char *vpath = "";
|
2017-09-23 12:55:21 +08:00
|
|
|
const struct lws_plat_file_ops *selected;
|
|
|
|
|
|
|
|
selected = lws_vfs_select_fops(fops, vfs_path, &vpath);
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
return selected->LWS_FOP_OPEN(fops, vfs_path, vpath, flags);
|
2017-02-18 17:26:40 +08:00
|
|
|
}
|
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
|
2016-06-17 09:41:22 +08:00
|
|
|
/**
|
|
|
|
* lws_now_secs() - seconds since 1970-1-1
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
LWS_VISIBLE LWS_EXTERN unsigned long
|
|
|
|
lws_now_secs(void)
|
|
|
|
{
|
|
|
|
struct timeval tv;
|
|
|
|
|
|
|
|
gettimeofday(&tv, NULL);
|
|
|
|
|
|
|
|
return tv.tv_sec;
|
|
|
|
}
|
|
|
|
|
2017-11-24 11:00:59 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_compare_time_t(struct lws_context *context, time_t t1, time_t t2)
|
|
|
|
{
|
|
|
|
if (t1 < context->time_discontiguity)
|
|
|
|
t1 += context->time_fixup;
|
|
|
|
|
|
|
|
if (t2 < context->time_discontiguity)
|
|
|
|
t2 += context->time_fixup;
|
|
|
|
|
|
|
|
return (int)(t1 - t2);
|
|
|
|
}
|
|
|
|
|
2017-10-25 08:00:23 +08:00
|
|
|
LWS_VISIBLE lws_sockfd_type
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_get_socket_fd(struct lws *wsi)
|
2011-01-27 20:06:03 +00:00
|
|
|
{
|
2017-09-19 07:58:04 +08:00
|
|
|
if (!wsi)
|
|
|
|
return -1;
|
2017-02-27 12:55:56 +08:00
|
|
|
return wsi->desc.sockfd;
|
2011-01-27 20:06:03 +00:00
|
|
|
}
|
|
|
|
|
2013-01-29 12:36:17 +08:00
|
|
|
#ifdef LWS_LATENCY
|
|
|
|
void
|
2015-12-04 11:30:53 +08:00
|
|
|
lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
|
|
|
|
int ret, int completed)
|
2013-01-29 12:36:17 +08:00
|
|
|
{
|
2014-02-26 21:37:31 +01:00
|
|
|
unsigned long long u;
|
2013-01-29 12:36:17 +08:00
|
|
|
char buf[256];
|
|
|
|
|
2018-10-14 06:15:36 +08:00
|
|
|
u = lws_time_in_microseconds();
|
2013-01-29 12:36:17 +08:00
|
|
|
|
2014-03-31 11:01:32 +08:00
|
|
|
if (!action) {
|
2013-01-29 12:36:17 +08:00
|
|
|
wsi->latency_start = u;
|
|
|
|
if (!wsi->action_start)
|
|
|
|
wsi->action_start = u;
|
2014-03-31 11:01:32 +08:00
|
|
|
return;
|
2013-01-29 12:36:17 +08:00
|
|
|
}
|
2014-03-31 11:01:32 +08:00
|
|
|
if (completed) {
|
|
|
|
if (wsi->action_start == wsi->latency_start)
|
|
|
|
sprintf(buf,
|
2014-07-05 10:50:47 +08:00
|
|
|
"Completion first try lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->latency_start,
|
|
|
|
(void *)wsi, ret, action);
|
|
|
|
else
|
|
|
|
sprintf(buf,
|
2014-07-05 10:50:47 +08:00
|
|
|
"Completion %lluus: lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->action_start,
|
|
|
|
u - wsi->latency_start,
|
|
|
|
(void *)wsi, ret, action);
|
|
|
|
wsi->action_start = 0;
|
|
|
|
} else
|
2014-07-05 10:50:47 +08:00
|
|
|
sprintf(buf, "lat %lluus: %p: ret %d: %s\n",
|
2014-03-31 11:01:32 +08:00
|
|
|
u - wsi->latency_start, (void *)wsi, ret, action);
|
|
|
|
|
|
|
|
if (u - wsi->latency_start > context->worst_latency) {
|
|
|
|
context->worst_latency = u - wsi->latency_start;
|
|
|
|
strcpy(context->worst_latency_info, buf);
|
|
|
|
}
|
|
|
|
lwsl_latency("%s", buf);
|
2013-01-29 12:36:17 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE int
|
2017-10-13 10:33:02 +08:00
|
|
|
lws_rx_flow_control(struct lws *wsi, int _enable)
|
2013-01-17 16:50:35 +08:00
|
|
|
{
|
2018-03-02 14:22:49 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
2017-10-13 10:33:02 +08:00
|
|
|
int en = _enable;
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
// h2 ignores rx flow control atm
|
|
|
|
if (lwsi_role_h2(wsi) || wsi->http2_substream ||
|
|
|
|
lwsi_role_h2_ENCAPSULATION(wsi))
|
|
|
|
return 0; // !!!
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
lwsl_info("%s: %p 0x%x\n", __func__, wsi, _enable);
|
|
|
|
|
|
|
|
if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
|
|
|
|
/*
|
|
|
|
* convert user bool style to bitmap style... in user simple
|
|
|
|
* bool style _enable = 0 = flow control it, = 1 = allow rx
|
|
|
|
*/
|
|
|
|
en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
|
|
|
|
if (_enable & 1)
|
|
|
|
en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
|
|
|
|
}
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
/* any bit set in rxflow_bitmap DISABLEs rxflow control */
|
|
|
|
if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
|
|
|
|
wsi->rxflow_bitmap &= ~(en & 0xff);
|
|
|
|
else
|
|
|
|
wsi->rxflow_bitmap |= en & 0xff;
|
|
|
|
|
|
|
|
if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
|
|
|
|
wsi->rxflow_change_to)
|
2018-03-02 14:22:49 +08:00
|
|
|
goto skip;
|
2013-03-16 11:24:23 +08:00
|
|
|
|
2018-10-10 13:54:43 +08:00
|
|
|
wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE |
|
|
|
|
(!wsi->rxflow_bitmap);
|
2017-10-13 10:33:02 +08:00
|
|
|
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsl_info("%s: %p: bitmap 0x%x: en 0x%x, ch 0x%x\n", __func__, wsi,
|
2017-10-13 10:33:02 +08:00
|
|
|
wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
|
|
|
|
|
|
|
|
if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
|
2018-03-02 14:22:49 +08:00
|
|
|
!wsi->rxflow_will_be_applied) {
|
|
|
|
en = __lws_rx_flow_control(wsi);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
|
|
|
skip:
|
|
|
|
lws_pt_unlock(pt);
|
2013-01-17 16:50:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE void
|
2015-12-11 10:45:35 +08:00
|
|
|
lws_rx_flow_allow_all_protocol(const struct lws_context *context,
|
|
|
|
const struct lws_protocols *protocol)
|
2013-03-16 12:32:27 +08:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
const struct lws_context_per_thread *pt = &context->pt[0];
|
2015-12-04 11:08:32 +08:00
|
|
|
struct lws *wsi;
|
2016-01-26 20:56:56 +08:00
|
|
|
unsigned int n, m = context->count_threads;
|
2016-01-19 03:34:24 +08:00
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
for (n = 0; n < pt->fds_count; n++) {
|
|
|
|
wsi = wsi_from_fd(context, pt->fds[n].fd);
|
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
if (wsi->protocol == protocol)
|
|
|
|
lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
|
|
|
|
}
|
|
|
|
pt++;
|
2013-03-16 12:32:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 18:53:34 +08:00
|
|
|
int
|
|
|
|
lws_broadcast(struct lws_context *context, int reason, void *in, size_t len)
|
|
|
|
{
|
|
|
|
struct lws_vhost *v = context->vhost_list;
|
|
|
|
struct lws wsi;
|
|
|
|
int n, ret = 0;
|
|
|
|
|
|
|
|
memset(&wsi, 0, sizeof(wsi));
|
|
|
|
wsi.context = context;
|
|
|
|
|
|
|
|
while (v) {
|
|
|
|
const struct lws_protocols *p = v->protocols;
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
wsi.vhost = v; /* not a real bound wsi */
|
2017-10-26 18:53:34 +08:00
|
|
|
|
|
|
|
for (n = 0; n < v->count_protocols; n++) {
|
|
|
|
wsi.protocol = p;
|
|
|
|
if (p->callback &&
|
|
|
|
p->callback(&wsi, reason, NULL, in, len))
|
|
|
|
ret |= 1;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
v = v->vhost_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE extern const char *
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_canonical_hostname(struct lws_context *context)
|
2011-01-28 10:00:18 +00:00
|
|
|
{
|
2011-03-02 22:03:47 +00:00
|
|
|
return (const char *)context->canonical_hostname;
|
2011-01-28 10:00:18 +00:00
|
|
|
}
|
|
|
|
|
2017-10-26 18:53:34 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_get_vhost_name(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->name;
|
|
|
|
}
|
|
|
|
|
2017-10-29 16:27:13 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_get_vhost_port(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->listen_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void *
|
|
|
|
lws_get_vhost_user(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->user;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_get_vhost_iface(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->iface;
|
|
|
|
}
|
|
|
|
|
2015-12-21 18:06:38 +01:00
|
|
|
int user_callback_handle_rxflow(lws_callback_function callback_function,
|
2015-12-17 07:54:44 +08:00
|
|
|
struct lws *wsi,
|
2015-12-04 11:30:53 +08:00
|
|
|
enum lws_callback_reasons reason, void *user,
|
|
|
|
void *in, size_t len)
|
2013-01-17 16:50:35 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
wsi->rxflow_will_be_applied = 1;
|
2015-12-17 07:54:44 +08:00
|
|
|
n = callback_function(wsi, reason, user, in, len);
|
2017-10-13 10:33:02 +08:00
|
|
|
wsi->rxflow_will_be_applied = 0;
|
2013-02-10 21:21:24 +08:00
|
|
|
if (!n)
|
2018-03-02 14:22:49 +08:00
|
|
|
n = __lws_rx_flow_control(wsi);
|
2013-01-17 16:50:35 +08:00
|
|
|
|
2013-02-10 21:21:24 +08:00
|
|
|
return n;
|
2013-01-17 16:50:35 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 12:41:42 +08:00
|
|
|
#if !defined(LWS_WITHOUT_CLIENT)
|
2013-10-24 22:12:03 +08:00
|
|
|
LWS_VISIBLE int
|
2016-03-28 10:10:43 +08:00
|
|
|
lws_set_proxy(struct lws_vhost *vhost, const char *proxy)
|
2013-10-24 22:12:03 +08:00
|
|
|
{
|
|
|
|
char *p;
|
2015-11-08 10:15:01 +08:00
|
|
|
char authstring[96];
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2013-10-24 22:12:03 +08:00
|
|
|
if (!proxy)
|
|
|
|
return -1;
|
|
|
|
|
2017-01-26 07:27:11 +08:00
|
|
|
/* we have to deal with a possible redundant leading http:// */
|
|
|
|
if (!strncmp(proxy, "http://", 7))
|
|
|
|
proxy += 7;
|
|
|
|
|
2018-04-20 10:49:29 +08:00
|
|
|
p = strrchr(proxy, '@');
|
2015-11-08 10:15:01 +08:00
|
|
|
if (p) { /* auth is around */
|
|
|
|
|
2015-11-08 12:10:26 +08:00
|
|
|
if ((unsigned int)(p - proxy) > sizeof(authstring) - 1)
|
2015-11-08 10:15:01 +08:00
|
|
|
goto auth_too_long;
|
|
|
|
|
2018-03-12 09:28:26 +08:00
|
|
|
lws_strncpy(authstring, proxy, p - proxy + 1);
|
2015-11-08 10:15:01 +08:00
|
|
|
// null termination not needed on input
|
2017-10-25 08:00:23 +08:00
|
|
|
if (lws_b64_encode_string(authstring, lws_ptr_diff(p, proxy),
|
2016-03-28 10:10:43 +08:00
|
|
|
vhost->proxy_basic_auth_token,
|
|
|
|
sizeof vhost->proxy_basic_auth_token) < 0)
|
2015-11-08 10:15:01 +08:00
|
|
|
goto auth_too_long;
|
|
|
|
|
2016-07-23 14:18:25 +08:00
|
|
|
lwsl_info(" Proxy auth in use\n");
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2018-10-10 13:54:43 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2015-11-08 10:15:01 +08:00
|
|
|
proxy = p + 1;
|
2018-10-10 13:54:43 +08:00
|
|
|
#endif
|
2015-11-08 10:15:01 +08:00
|
|
|
} else
|
2016-03-28 10:10:43 +08:00
|
|
|
vhost->proxy_basic_auth_token[0] = '\0';
|
2015-11-08 10:15:01 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
|
|
|
lws_strncpy(vhost->http.http_proxy_address, proxy,
|
|
|
|
sizeof(vhost->http.http_proxy_address));
|
2015-11-08 10:15:01 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
p = strchr(vhost->http.http_proxy_address, ':');
|
|
|
|
if (!p && !vhost->http.http_proxy_port) {
|
2013-10-24 22:12:03 +08:00
|
|
|
lwsl_err("http_proxy needs to be ads:port\n");
|
|
|
|
|
|
|
|
return -1;
|
2015-11-08 10:15:01 +08:00
|
|
|
} else {
|
2015-12-06 11:04:05 +08:00
|
|
|
if (p) {
|
|
|
|
*p = '\0';
|
2018-04-27 15:20:56 +08:00
|
|
|
vhost->http.http_proxy_port = atoi(p + 1);
|
2015-12-06 11:04:05 +08:00
|
|
|
}
|
2013-10-24 22:12:03 +08:00
|
|
|
}
|
2015-11-08 10:15:01 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
lwsl_info(" Proxy %s:%u\n", vhost->http.http_proxy_address,
|
|
|
|
vhost->http.http_proxy_port);
|
|
|
|
#endif
|
2013-10-24 22:12:03 +08:00
|
|
|
return 0;
|
2015-11-08 10:15:01 +08:00
|
|
|
|
|
|
|
auth_too_long:
|
|
|
|
lwsl_err("proxy auth too long\n");
|
2017-11-30 12:40:46 +08:00
|
|
|
|
2015-11-08 10:15:01 +08:00
|
|
|
return -1;
|
2013-10-24 22:12:03 +08:00
|
|
|
}
|
2018-05-01 12:41:42 +08:00
|
|
|
#endif
|
2013-10-24 22:12:03 +08:00
|
|
|
|
2017-05-05 11:38:34 -04:00
|
|
|
#if defined(LWS_WITH_SOCKS5)
|
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_set_socks(struct lws_vhost *vhost, const char *socks)
|
|
|
|
{
|
|
|
|
char *p_at, *p_colon;
|
|
|
|
char user[96];
|
|
|
|
char password[96];
|
|
|
|
|
|
|
|
if (!socks)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
vhost->socks_user[0] = '\0';
|
|
|
|
vhost->socks_password[0] = '\0';
|
|
|
|
|
2018-04-20 10:49:29 +08:00
|
|
|
p_at = strrchr(socks, '@');
|
2017-05-05 11:38:34 -04:00
|
|
|
if (p_at) { /* auth is around */
|
|
|
|
if ((unsigned int)(p_at - socks) > (sizeof(user)
|
|
|
|
+ sizeof(password) - 2)) {
|
|
|
|
lwsl_err("Socks auth too long\n");
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
p_colon = strchr(socks, ':');
|
|
|
|
if (p_colon) {
|
|
|
|
if ((unsigned int)(p_colon - socks) > (sizeof(user)
|
|
|
|
- 1) ) {
|
|
|
|
lwsl_err("Socks user too long\n");
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
if ((unsigned int)(p_at - p_colon) > (sizeof(password)
|
|
|
|
- 1) ) {
|
|
|
|
lwsl_err("Socks password too long\n");
|
|
|
|
goto bail;
|
|
|
|
}
|
2017-09-23 13:27:11 +08:00
|
|
|
|
2018-03-12 09:28:26 +08:00
|
|
|
lws_strncpy(vhost->socks_user, socks, p_colon - socks + 1);
|
|
|
|
lws_strncpy(vhost->socks_password, p_colon + 1,
|
|
|
|
p_at - (p_colon + 1) + 1);
|
2017-05-05 11:38:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_info(" Socks auth, user: %s, password: %s\n",
|
|
|
|
vhost->socks_user, vhost->socks_password );
|
|
|
|
|
|
|
|
socks = p_at + 1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 09:28:26 +08:00
|
|
|
lws_strncpy(vhost->socks_proxy_address, socks,
|
|
|
|
sizeof(vhost->socks_proxy_address));
|
2017-05-05 11:38:34 -04:00
|
|
|
|
|
|
|
p_colon = strchr(vhost->socks_proxy_address, ':');
|
|
|
|
if (!p_colon && !vhost->socks_proxy_port) {
|
|
|
|
lwsl_err("socks_proxy needs to be address:port\n");
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (p_colon) {
|
|
|
|
*p_colon = '\0';
|
|
|
|
vhost->socks_proxy_port = atoi(p_colon + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_info(" Socks %s:%u\n", vhost->socks_proxy_address,
|
|
|
|
vhost->socks_proxy_port);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
LWS_VISIBLE const struct lws_protocols *
|
|
|
|
lws_get_protocol(struct lws *wsi)
|
2010-12-18 15:13:50 +00:00
|
|
|
{
|
|
|
|
return wsi->protocol;
|
|
|
|
}
|
|
|
|
|
2013-01-09 18:06:55 +08:00
|
|
|
|
2013-02-18 16:30:10 +08:00
|
|
|
int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_ensure_user_space(struct lws *wsi)
|
2011-11-07 17:19:25 +08:00
|
|
|
{
|
2013-02-15 22:31:55 +08:00
|
|
|
if (!wsi->protocol)
|
2017-10-13 10:33:02 +08:00
|
|
|
return 0;
|
2013-02-15 22:31:55 +08:00
|
|
|
|
2011-11-07 17:19:25 +08:00
|
|
|
/* allocate the per-connection user memory (if any) */
|
|
|
|
|
|
|
|
if (wsi->protocol->per_session_data_size && !wsi->user_space) {
|
2017-10-28 07:42:44 +08:00
|
|
|
wsi->user_space = lws_zalloc(
|
|
|
|
wsi->protocol->per_session_data_size, "user space");
|
2017-10-13 10:33:02 +08:00
|
|
|
if (wsi->user_space == NULL) {
|
2017-09-23 12:55:21 +08:00
|
|
|
lwsl_err("%s: OOM\n", __func__);
|
2013-02-18 16:30:10 +08:00
|
|
|
return 1;
|
2011-11-07 17:19:25 +08:00
|
|
|
}
|
2014-10-22 15:37:28 +08:00
|
|
|
} else
|
2017-09-23 12:55:21 +08:00
|
|
|
lwsl_debug("%s: %p protocol pss %lu, user_space=%p\n", __func__,
|
|
|
|
wsi, (long)wsi->protocol->per_session_data_size,
|
|
|
|
wsi->user_space);
|
2013-02-18 16:30:10 +08:00
|
|
|
return 0;
|
2011-11-07 17:19:25 +08:00
|
|
|
}
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2017-10-16 16:59:57 +08:00
|
|
|
LWS_VISIBLE void *
|
|
|
|
lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
|
|
|
|
{
|
|
|
|
((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
|
|
|
|
new_size;
|
|
|
|
|
|
|
|
if (lws_ensure_user_space(wsi))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return wsi->user_space;
|
|
|
|
}
|
|
|
|
|
2016-02-25 15:01:55 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lwsl_timestamp(int level, char *p, int len)
|
2013-01-12 09:17:42 +08:00
|
|
|
{
|
2017-01-17 07:01:02 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2018-10-10 13:54:43 +08:00
|
|
|
#ifndef _WIN32_WCE
|
2016-02-21 07:42:49 +08:00
|
|
|
time_t o_now = time(NULL);
|
2018-10-10 13:54:43 +08:00
|
|
|
#endif
|
2014-02-26 21:37:31 +01:00
|
|
|
unsigned long long now;
|
2016-02-21 21:36:48 +08:00
|
|
|
struct tm *ptm = NULL;
|
|
|
|
#ifndef WIN32
|
|
|
|
struct tm tm;
|
|
|
|
#endif
|
|
|
|
int n;
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2016-02-29 18:48:55 +08:00
|
|
|
#ifndef _WIN32_WCE
|
2016-02-21 21:36:48 +08:00
|
|
|
#ifdef WIN32
|
|
|
|
ptm = localtime(&o_now);
|
|
|
|
#else
|
|
|
|
if (localtime_r(&o_now, &tm))
|
|
|
|
ptm = &tm;
|
2016-02-29 18:48:55 +08:00
|
|
|
#endif
|
2016-02-21 21:36:48 +08:00
|
|
|
#endif
|
2016-02-25 15:01:55 +08:00
|
|
|
p[0] = '\0';
|
2015-12-04 16:54:12 +08:00
|
|
|
for (n = 0; n < LLL_COUNT; n++) {
|
|
|
|
if (level != (1 << n))
|
|
|
|
continue;
|
2018-10-14 06:15:36 +08:00
|
|
|
now = lws_time_in_microseconds() / 100;
|
2016-02-21 21:36:48 +08:00
|
|
|
if (ptm)
|
2016-09-15 02:22:57 +08:00
|
|
|
n = lws_snprintf(p, len,
|
2016-02-25 15:01:55 +08:00
|
|
|
"[%04d/%02d/%02d %02d:%02d:%02d:%04d] %s: ",
|
2016-02-21 21:36:48 +08:00
|
|
|
ptm->tm_year + 1900,
|
2016-06-01 08:32:18 +08:00
|
|
|
ptm->tm_mon + 1,
|
2016-02-21 21:36:48 +08:00
|
|
|
ptm->tm_mday,
|
|
|
|
ptm->tm_hour,
|
|
|
|
ptm->tm_min,
|
|
|
|
ptm->tm_sec,
|
2016-02-21 07:42:49 +08:00
|
|
|
(int)(now % 10000), log_level_names[n]);
|
|
|
|
else
|
2016-09-15 02:22:57 +08:00
|
|
|
n = lws_snprintf(p, len, "[%llu:%04d] %s: ",
|
2016-02-21 07:42:49 +08:00
|
|
|
(unsigned long long) now / 10000,
|
|
|
|
(int)(now % 10000), log_level_names[n]);
|
2016-02-25 15:01:55 +08:00
|
|
|
return n;
|
2015-12-04 16:54:12 +08:00
|
|
|
}
|
2017-12-20 10:44:21 +08:00
|
|
|
#else
|
|
|
|
p[0] = '\0';
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2017-12-20 10:44:21 +08:00
|
|
|
|
2016-02-25 15:01:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-20 10:44:21 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2017-10-13 10:33:02 +08:00
|
|
|
static const char * const colours[] = {
|
|
|
|
"[31;1m", /* LLL_ERR */
|
|
|
|
"[36;1m", /* LLL_WARN */
|
|
|
|
"[35;1m", /* LLL_NOTICE */
|
|
|
|
"[32;1m", /* LLL_INFO */
|
|
|
|
"[34;1m", /* LLL_DEBUG */
|
|
|
|
"[33;1m", /* LLL_PARSER */
|
2018-06-30 09:40:26 +08:00
|
|
|
"[33m", /* LLL_HEADER */
|
|
|
|
"[33m", /* LLL_EXT */
|
|
|
|
"[33m", /* LLL_CLIENT */
|
2017-10-13 10:33:02 +08:00
|
|
|
"[33;1m", /* LLL_LATENCY */
|
|
|
|
"[30;1m", /* LLL_USER */
|
2018-09-04 08:06:46 +08:00
|
|
|
"[31m", /* LLL_THREAD */
|
2017-10-13 10:33:02 +08:00
|
|
|
};
|
|
|
|
|
2018-11-13 17:03:33 +08:00
|
|
|
static char tty;
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lwsl_emit_stderr(int level, const char *line)
|
2016-02-25 15:01:55 +08:00
|
|
|
{
|
|
|
|
char buf[50];
|
2018-08-16 19:10:32 +08:00
|
|
|
int n, m = LWS_ARRAY_SIZE(colours) - 1;
|
2017-10-13 10:33:02 +08:00
|
|
|
|
|
|
|
if (!tty)
|
|
|
|
tty = isatty(2) | 2;
|
2016-02-25 15:01:55 +08:00
|
|
|
lwsl_timestamp(level, buf, sizeof(buf));
|
2017-10-13 10:33:02 +08:00
|
|
|
|
|
|
|
if (tty == 3) {
|
2018-08-16 19:10:32 +08:00
|
|
|
n = 1 << (LWS_ARRAY_SIZE(colours) - 1);
|
2017-10-13 10:33:02 +08:00
|
|
|
while (n) {
|
|
|
|
if (level & n)
|
|
|
|
break;
|
|
|
|
m--;
|
|
|
|
n >>= 1;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "%c%s%s%s%c[0m", 27, colours[m], buf, line, 27);
|
|
|
|
} else
|
|
|
|
fprintf(stderr, "%s%s", buf, line);
|
2013-01-19 11:17:56 +08:00
|
|
|
}
|
2018-11-13 17:03:33 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lwsl_emit_stderr_notimestamp(int level, const char *line)
|
|
|
|
{
|
|
|
|
int n, m = LWS_ARRAY_SIZE(colours) - 1;
|
|
|
|
|
|
|
|
if (!tty)
|
|
|
|
tty = isatty(2) | 2;
|
|
|
|
|
|
|
|
if (tty == 3) {
|
|
|
|
n = 1 << (LWS_ARRAY_SIZE(colours) - 1);
|
|
|
|
while (n) {
|
|
|
|
if (level & n)
|
|
|
|
break;
|
|
|
|
m--;
|
|
|
|
n >>= 1;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "%c%s%s%c[0m", 27, colours[m], line, 27);
|
|
|
|
} else
|
|
|
|
fprintf(stderr, "%s", line);
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2013-01-19 11:12:16 +08:00
|
|
|
|
2014-12-10 18:50:28 -06:00
|
|
|
LWS_VISIBLE void _lws_logv(int filter, const char *format, va_list vl)
|
2013-01-19 11:17:56 +08:00
|
|
|
{
|
|
|
|
char buf[256];
|
2016-07-23 14:18:25 +08:00
|
|
|
int n;
|
2013-01-19 11:17:56 +08:00
|
|
|
|
|
|
|
if (!(log_level & filter))
|
|
|
|
return;
|
2013-01-10 19:50:35 +08:00
|
|
|
|
2016-07-23 14:18:25 +08:00
|
|
|
n = vsnprintf(buf, sizeof(buf) - 1, format, vl);
|
|
|
|
(void)n;
|
|
|
|
/* vnsprintf returns what it would have written, even if truncated */
|
2018-09-02 07:13:56 +08:00
|
|
|
if (n > (int)sizeof(buf) - 1) {
|
|
|
|
n = sizeof(buf) - 5;
|
|
|
|
buf[n++] = '.';
|
|
|
|
buf[n++] = '.';
|
|
|
|
buf[n++] = '.';
|
|
|
|
buf[n++] = '\n';
|
|
|
|
buf[n] = '\0';
|
|
|
|
}
|
2016-07-23 14:18:25 +08:00
|
|
|
if (n > 0)
|
|
|
|
buf[n] = '\0';
|
2013-01-12 09:17:42 +08:00
|
|
|
|
2013-01-19 11:17:56 +08:00
|
|
|
lwsl_emit(filter, buf);
|
2013-01-10 19:50:35 +08:00
|
|
|
}
|
|
|
|
|
2014-12-10 18:50:28 -06:00
|
|
|
LWS_VISIBLE void _lws_log(int filter, const char *format, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, format);
|
|
|
|
_lws_logv(filter, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:30:53 +08:00
|
|
|
LWS_VISIBLE void lws_set_log_level(int level,
|
2015-12-04 16:54:12 +08:00
|
|
|
void (*func)(int level, const char *line))
|
2013-01-10 19:50:35 +08:00
|
|
|
{
|
|
|
|
log_level = level;
|
2015-12-04 16:54:12 +08:00
|
|
|
if (func)
|
|
|
|
lwsl_emit = func;
|
2014-04-27 13:28:22 +02:00
|
|
|
}
|
2014-08-16 09:54:27 +08:00
|
|
|
|
2017-02-09 09:11:17 +08:00
|
|
|
LWS_VISIBLE int lwsl_visible(int level)
|
|
|
|
{
|
|
|
|
return log_level & level;
|
|
|
|
}
|
|
|
|
|
2017-10-16 12:52:32 +08:00
|
|
|
LWS_VISIBLE void
|
|
|
|
lwsl_hexdump_level(int hexdump_level, const void *vbuf, size_t len)
|
|
|
|
{
|
|
|
|
unsigned char *buf = (unsigned char *)vbuf;
|
2018-10-10 13:54:43 +08:00
|
|
|
unsigned int n;
|
2017-10-16 12:52:32 +08:00
|
|
|
|
|
|
|
if (!lwsl_visible(hexdump_level))
|
|
|
|
return;
|
|
|
|
|
2018-04-20 10:33:23 +08:00
|
|
|
if (!len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!vbuf)
|
|
|
|
return;
|
|
|
|
|
2017-10-16 12:52:32 +08:00
|
|
|
_lws_log(hexdump_level, "\n");
|
|
|
|
|
|
|
|
for (n = 0; n < len;) {
|
2018-10-10 13:54:43 +08:00
|
|
|
unsigned int start = n, m;
|
|
|
|
char line[80], *p = line;
|
2017-10-16 12:52:32 +08:00
|
|
|
|
|
|
|
p += sprintf(p, "%04X: ", start);
|
|
|
|
|
|
|
|
for (m = 0; m < 16 && n < len; m++)
|
|
|
|
p += sprintf(p, "%02X ", buf[n++]);
|
|
|
|
while (m++ < 16)
|
|
|
|
p += sprintf(p, " ");
|
|
|
|
|
|
|
|
p += sprintf(p, " ");
|
|
|
|
|
|
|
|
for (m = 0; m < 16 && (start + m) < len; m++) {
|
|
|
|
if (buf[start + m] >= ' ' && buf[start + m] < 127)
|
|
|
|
*p++ = buf[start + m];
|
|
|
|
else
|
|
|
|
*p++ = '.';
|
|
|
|
}
|
|
|
|
while (m++ < 16)
|
|
|
|
*p++ = ' ';
|
|
|
|
|
|
|
|
*p++ = '\n';
|
|
|
|
*p = '\0';
|
|
|
|
_lws_log(hexdump_level, "%s", line);
|
|
|
|
(void)line;
|
|
|
|
}
|
|
|
|
|
|
|
|
_lws_log(hexdump_level, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lwsl_hexdump(const void *vbuf, size_t len)
|
|
|
|
{
|
2018-05-26 09:07:31 +08:00
|
|
|
#if defined(_DEBUG)
|
2017-10-16 12:52:32 +08:00
|
|
|
lwsl_hexdump_level(LLL_DEBUG, vbuf, len);
|
2018-05-26 09:07:31 +08:00
|
|
|
#endif
|
2017-10-16 12:52:32 +08:00
|
|
|
}
|
|
|
|
|
2014-08-16 09:54:27 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_is_ssl(struct lws *wsi)
|
2014-08-16 09:54:27 +08:00
|
|
|
{
|
2018-04-11 13:39:42 +08:00
|
|
|
#if defined(LWS_WITH_TLS)
|
2018-05-01 12:41:42 +08:00
|
|
|
return wsi->tls.use_ssl & LCCSCF_USE_SSL;
|
2014-08-19 08:41:26 +08:00
|
|
|
#else
|
2015-11-02 13:10:33 +08:00
|
|
|
(void)wsi;
|
2014-08-19 08:41:26 +08:00
|
|
|
return 0;
|
|
|
|
#endif
|
2014-08-16 09:54:27 +08:00
|
|
|
}
|
2014-08-18 22:49:39 +08:00
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
#if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
|
2017-10-18 09:41:44 +08:00
|
|
|
LWS_VISIBLE lws_tls_conn*
|
2017-01-23 19:34:46 +08:00
|
|
|
lws_get_ssl(struct lws *wsi)
|
|
|
|
{
|
2018-05-01 12:41:42 +08:00
|
|
|
return wsi->tls.ssl;
|
2017-01-23 19:34:46 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-08-18 22:49:39 +08:00
|
|
|
LWS_VISIBLE int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_partial_buffered(struct lws *wsi)
|
2014-08-18 22:49:39 +08:00
|
|
|
{
|
2018-08-20 12:02:26 +08:00
|
|
|
return lws_has_buffered_out(wsi);
|
2014-08-18 22:49:39 +08:00
|
|
|
}
|
2014-10-08 12:00:53 +08:00
|
|
|
|
2018-04-26 08:30:12 +08:00
|
|
|
LWS_VISIBLE lws_fileofs_t
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_get_peer_write_allowance(struct lws *wsi)
|
2014-10-29 09:39:08 +08:00
|
|
|
{
|
2018-07-17 18:28:23 +08:00
|
|
|
if (!wsi->role_ops->tx_credit)
|
|
|
|
return -1;
|
2018-04-26 08:30:12 +08:00
|
|
|
return wsi->role_ops->tx_credit(wsi);
|
2014-10-29 09:39:08 +08:00
|
|
|
}
|
2014-11-08 11:18:47 +08:00
|
|
|
|
|
|
|
LWS_VISIBLE void
|
2018-04-11 13:39:42 +08:00
|
|
|
lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
|
2018-11-29 08:47:49 +08:00
|
|
|
const struct lws_role_ops *ops)
|
2014-11-08 11:18:47 +08:00
|
|
|
{
|
2018-04-11 13:39:42 +08:00
|
|
|
#if defined(_DEBUG)
|
|
|
|
const char *name = "(unset)";
|
|
|
|
#endif
|
|
|
|
wsi->wsistate = role | state;
|
|
|
|
if (ops)
|
|
|
|
wsi->role_ops = ops;
|
|
|
|
#if defined(_DEBUG)
|
|
|
|
if (wsi->role_ops)
|
|
|
|
name = wsi->role_ops->name;
|
|
|
|
lwsl_debug("%s: %p: wsistate 0x%x, ops %s\n", __func__, wsi,
|
|
|
|
wsi->wsistate, name);
|
|
|
|
#endif
|
2014-11-08 11:18:47 +08:00
|
|
|
}
|
2015-12-04 10:39:23 +08:00
|
|
|
|
lws_plat_fd implement platform default handlers
This is a rewrite of the patch from Soapyman here
https://github.com/warmcat/libwebsockets/pull/363
The main changes compared to Soapyman's original patch are
- There's no new stuff in the info struct user code does any overrides
it may want to do explicitly after lws_context_create returns
- User overrides for file ops can call through (subclass) to the original
platform implementation using lws_get_fops_plat()
- A typedef is provided for plat-specific fd type
- Public helpers are provided to allow user code to be platform-independent
about file access, using the lws platform file operations underneath:
static inline lws_filefd_type
lws_plat_file_open(struct lws_plat_file_ops *fops, const char *filename,
unsigned long *filelen, int flags)
static inline int
lws_plat_file_close(struct lws_plat_file_ops *fops, lws_filefd_type fd)
static inline unsigned long
lws_plat_file_seek_cur(struct lws_plat_file_ops *fops, lws_filefd_type fd,
long offset_from_cur_pos)
static inline int
lws_plat_file_read(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
static inline int
lws_plat_file_write(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
There's example documentation and implementation in the test server.
Signed-off-by: Andy Green <andy.green@linaro.org>
2015-12-10 07:58:58 +08:00
|
|
|
LWS_VISIBLE struct lws_plat_file_ops *
|
|
|
|
lws_get_fops(struct lws_context *context)
|
|
|
|
{
|
2017-03-03 12:38:10 +08:00
|
|
|
return (struct lws_plat_file_ops *)context->fops;
|
lws_plat_fd implement platform default handlers
This is a rewrite of the patch from Soapyman here
https://github.com/warmcat/libwebsockets/pull/363
The main changes compared to Soapyman's original patch are
- There's no new stuff in the info struct user code does any overrides
it may want to do explicitly after lws_context_create returns
- User overrides for file ops can call through (subclass) to the original
platform implementation using lws_get_fops_plat()
- A typedef is provided for plat-specific fd type
- Public helpers are provided to allow user code to be platform-independent
about file access, using the lws platform file operations underneath:
static inline lws_filefd_type
lws_plat_file_open(struct lws_plat_file_ops *fops, const char *filename,
unsigned long *filelen, int flags)
static inline int
lws_plat_file_close(struct lws_plat_file_ops *fops, lws_filefd_type fd)
static inline unsigned long
lws_plat_file_seek_cur(struct lws_plat_file_ops *fops, lws_filefd_type fd,
long offset_from_cur_pos)
static inline int
lws_plat_file_read(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
static inline int
lws_plat_file_write(struct lws_plat_file_ops *fops, lws_filefd_type fd,
unsigned long *amount, unsigned char *buf, unsigned long len)
There's example documentation and implementation in the test server.
Signed-off-by: Andy Green <andy.green@linaro.org>
2015-12-10 07:58:58 +08:00
|
|
|
}
|
2015-12-04 10:39:23 +08:00
|
|
|
|
2015-12-11 09:36:14 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN struct lws_context *
|
2015-12-17 18:25:25 +08:00
|
|
|
lws_get_context(const struct lws *wsi)
|
2015-12-11 09:36:14 +08:00
|
|
|
{
|
|
|
|
return wsi->context;
|
|
|
|
}
|
|
|
|
|
2016-01-19 03:34:24 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_get_count_threads(struct lws_context *context)
|
|
|
|
{
|
|
|
|
return context->count_threads;
|
|
|
|
}
|
|
|
|
|
2015-12-14 07:16:32 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void *
|
|
|
|
lws_wsi_user(struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->user_space;
|
|
|
|
}
|
2015-12-26 17:20:34 +08:00
|
|
|
|
2017-02-28 21:17:25 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_set_wsi_user(struct lws *wsi, void *data)
|
|
|
|
{
|
|
|
|
if (wsi->user_space_externally_allocated)
|
|
|
|
wsi->user_space = data;
|
|
|
|
else
|
|
|
|
lwsl_err("%s: Cannot set internally-allocated user_space\n",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
2016-03-02 09:17:22 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN struct lws *
|
|
|
|
lws_get_parent(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN struct lws *
|
|
|
|
lws_get_child(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->child_list;
|
|
|
|
}
|
|
|
|
|
2017-07-19 04:39:14 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void *
|
|
|
|
lws_get_opaque_parent_data(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->opaque_parent_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_set_opaque_parent_data(struct lws *wsi, void *data)
|
|
|
|
{
|
|
|
|
wsi->opaque_parent_data = data;
|
|
|
|
}
|
|
|
|
|
2018-12-01 06:35:20 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void *
|
|
|
|
lws_get_opaque_user_data(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->opaque_user_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_set_opaque_user_data(struct lws *wsi, void *data)
|
|
|
|
{
|
|
|
|
wsi->opaque_user_data = data;
|
|
|
|
}
|
|
|
|
|
2017-07-19 04:39:14 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_get_child_pending_on_writable(const struct lws *wsi)
|
|
|
|
{
|
|
|
|
return wsi->parent_pending_cb_on_writable;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_clear_child_pending_on_writable(struct lws *wsi)
|
|
|
|
{
|
|
|
|
wsi->parent_pending_cb_on_writable = 0;
|
|
|
|
}
|
|
|
|
|
2015-12-28 14:24:49 +08:00
|
|
|
|
|
|
|
LWS_EXTERN int
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_rx_flow_control(struct lws *wsi)
|
2015-12-28 14:24:49 +08:00
|
|
|
{
|
2017-03-05 15:32:47 +08:00
|
|
|
struct lws *wsic = wsi->child_list;
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
// h2 ignores rx flow control atm
|
|
|
|
if (lwsi_role_h2(wsi) || wsi->http2_substream ||
|
|
|
|
lwsi_role_h2_ENCAPSULATION(wsi))
|
|
|
|
return 0; // !!!
|
|
|
|
|
2017-03-05 15:32:47 +08:00
|
|
|
/* if he has children, do those if they were changed */
|
|
|
|
while (wsic) {
|
|
|
|
if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_rx_flow_control(wsic);
|
2017-03-05 15:32:47 +08:00
|
|
|
|
|
|
|
wsic = wsic->sibling_list;
|
|
|
|
}
|
|
|
|
|
2015-12-28 14:24:49 +08:00
|
|
|
/* there is no pending change */
|
2017-09-23 12:55:21 +08:00
|
|
|
if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
|
2015-12-28 14:24:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* stuff is still buffered, not ready to really accept new input */
|
2018-04-17 15:35:15 +08:00
|
|
|
if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
|
2015-12-28 14:24:49 +08:00
|
|
|
/* get ourselves called back to deal with stashed buffer */
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-13 16:01:38 +08:00
|
|
|
/* now the pending is cleared, we can change rxflow state */
|
2015-12-28 14:24:49 +08:00
|
|
|
|
|
|
|
wsi->rxflow_change_to &= ~LWS_RXFLOW_PENDING_CHANGE;
|
|
|
|
|
|
|
|
lwsl_info("rxflow: wsi %p change_to %d\n", wsi,
|
2018-11-23 08:47:56 +08:00
|
|
|
wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
|
2015-12-28 14:24:49 +08:00
|
|
|
|
|
|
|
/* adjust the pollfd for this wsi */
|
|
|
|
|
|
|
|
if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
|
2018-03-02 14:22:49 +08:00
|
|
|
if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
|
2015-12-28 14:24:49 +08:00
|
|
|
lwsl_info("%s: fail\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else
|
2018-03-02 14:22:49 +08:00
|
|
|
if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
|
2015-12-28 14:24:49 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-12-30 11:43:36 +08:00
|
|
|
|
2018-10-09 10:29:42 +08:00
|
|
|
static const unsigned char e0f4[] = {
|
|
|
|
0xa0 | ((2 - 1) << 2) | 1, /* e0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e1 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e3 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e4 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e5 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e6 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e7 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e8 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* e9 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ea */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* eb */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ec */
|
|
|
|
0x80 | ((2 - 1) << 2) | 1, /* ed */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ee */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* ef */
|
|
|
|
0x90 | ((3 - 1) << 2) | 2, /* f0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f1 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 2, /* f3 */
|
|
|
|
0x80 | ((1 - 1) << 2) | 2, /* f4 */
|
|
|
|
|
|
|
|
0, /* s0 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 0, /* s2 */
|
|
|
|
0x80 | ((4 - 1) << 2) | 1, /* s3 */
|
|
|
|
};
|
|
|
|
|
|
|
|
LWS_EXTERN int
|
|
|
|
lws_check_byte_utf8(unsigned char state, unsigned char c)
|
|
|
|
{
|
|
|
|
unsigned char s = state;
|
|
|
|
|
|
|
|
if (!s) {
|
|
|
|
if (c >= 0x80) {
|
|
|
|
if (c < 0xc2 || c > 0xf4)
|
|
|
|
return -1;
|
|
|
|
if (c < 0xe0)
|
|
|
|
return 0x80 | ((4 - 1) << 2);
|
|
|
|
else
|
|
|
|
return e0f4[c - 0xe0];
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (c < (s & 0xf0) || c >= (s & 0xf0) + 0x10 + ((s << 2) & 0x30))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return e0f4[21 + (s & 3)];
|
|
|
|
}
|
|
|
|
|
2015-12-30 11:43:36 +08:00
|
|
|
LWS_EXTERN int
|
|
|
|
lws_check_utf8(unsigned char *state, unsigned char *buf, size_t len)
|
|
|
|
{
|
|
|
|
unsigned char s = *state;
|
|
|
|
|
|
|
|
while (len--) {
|
|
|
|
unsigned char c = *buf++;
|
|
|
|
|
|
|
|
if (!s) {
|
|
|
|
if (c >= 0x80) {
|
|
|
|
if (c < 0xc2 || c > 0xf4)
|
|
|
|
return 1;
|
|
|
|
if (c < 0xe0)
|
|
|
|
s = 0x80 | ((4 - 1) << 2);
|
|
|
|
else
|
|
|
|
s = e0f4[c - 0xe0];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (c < (s & 0xf0) ||
|
|
|
|
c >= (s & 0xf0) + 0x10 + ((s << 2) & 0x30))
|
|
|
|
return 1;
|
|
|
|
s = e0f4[21 + (s & 3)];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*state = s;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-01-11 11:34:01 +08:00
|
|
|
|
2016-01-14 13:39:02 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
2016-01-19 03:34:24 +08:00
|
|
|
lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
|
|
|
|
const char **path)
|
2016-01-14 13:39:02 +08:00
|
|
|
{
|
|
|
|
const char *end;
|
2018-09-04 08:06:46 +08:00
|
|
|
char unix_skt = 0;
|
2016-01-14 13:39:02 +08:00
|
|
|
|
|
|
|
/* cut up the location into address, port and path */
|
|
|
|
*prot = p;
|
|
|
|
while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
|
|
|
|
p++;
|
|
|
|
if (!*p) {
|
|
|
|
end = p;
|
|
|
|
p = (char *)*prot;
|
|
|
|
*prot = end;
|
|
|
|
} else {
|
|
|
|
*p = '\0';
|
|
|
|
p += 3;
|
|
|
|
}
|
2018-09-04 08:06:46 +08:00
|
|
|
if (*p == '+') /* unix skt */
|
|
|
|
unix_skt = 1;
|
|
|
|
|
2016-01-14 13:39:02 +08:00
|
|
|
*ads = p;
|
|
|
|
if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
|
|
|
|
*port = 80;
|
|
|
|
else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
|
|
|
|
*port = 443;
|
2016-01-19 04:32:14 +08:00
|
|
|
|
2018-09-04 08:06:46 +08:00
|
|
|
if (*p == '[') {
|
|
|
|
++(*ads);
|
|
|
|
while (*p && *p != ']')
|
|
|
|
p++;
|
|
|
|
if (*p)
|
|
|
|
*p++ = '\0';
|
|
|
|
} else
|
|
|
|
while (*p && *p != ':' && (unix_skt || *p != '/'))
|
|
|
|
p++;
|
|
|
|
|
2016-01-14 13:39:02 +08:00
|
|
|
if (*p == ':') {
|
|
|
|
*p++ = '\0';
|
|
|
|
*port = atoi(p);
|
|
|
|
while (*p && *p != '/')
|
|
|
|
p++;
|
|
|
|
}
|
2018-09-04 08:06:46 +08:00
|
|
|
*path = "/";
|
2016-01-14 13:39:02 +08:00
|
|
|
if (*p) {
|
|
|
|
*p++ = '\0';
|
|
|
|
if (*p)
|
|
|
|
*path = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-10 16:13:26 +08:00
|
|
|
char *
|
|
|
|
lws_strdup(const char *s)
|
|
|
|
{
|
|
|
|
char *d = lws_malloc(strlen(s) + 1, "strdup");
|
|
|
|
|
|
|
|
if (d)
|
|
|
|
strcpy(d, s);
|
|
|
|
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2018-03-19 09:33:55 +08:00
|
|
|
#if defined(LWS_WITHOUT_EXTENSIONS)
|
2016-01-11 11:34:01 +08:00
|
|
|
|
|
|
|
/* we need to provide dummy callbacks for internal exts
|
|
|
|
* so user code runs when faced with a lib compiled with
|
|
|
|
* extensions disabled.
|
|
|
|
*/
|
|
|
|
|
2018-01-04 10:37:25 +08:00
|
|
|
LWS_VISIBLE int
|
2016-01-11 11:34:01 +08:00
|
|
|
lws_extension_callback_pm_deflate(struct lws_context *context,
|
|
|
|
const struct lws_extension *ext,
|
|
|
|
struct lws *wsi,
|
|
|
|
enum lws_extension_callback_reasons reason,
|
|
|
|
void *user, void *in, size_t len)
|
|
|
|
{
|
2016-01-20 17:35:18 +08:00
|
|
|
(void)context;
|
|
|
|
(void)ext;
|
|
|
|
(void)wsi;
|
|
|
|
(void)reason;
|
|
|
|
(void)user;
|
|
|
|
(void)in;
|
|
|
|
(void)len;
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2018-04-25 06:53:30 +08:00
|
|
|
|
|
|
|
LWS_EXTERN int
|
|
|
|
lws_set_extension_option(struct lws *wsi, const char *ext_name,
|
|
|
|
const char *opt_name, const char *opt_val)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2016-01-11 11:34:01 +08:00
|
|
|
#endif
|
|
|
|
|
2018-11-22 17:58:30 +08:00
|
|
|
/* note: this returns a random port, or one of these <= 0 return codes:
|
|
|
|
*
|
|
|
|
* LWS_ITOSA_USABLE: the interface is usable, returned if so and sockfd invalid
|
|
|
|
* LWS_ITOSA_NOT_EXIST: the requested iface does not even exist
|
|
|
|
* LWS_ITOSA_NOT_USABLE: the requested iface exists but is not usable (eg, no IP)
|
|
|
|
* LWS_ITOSA_BUSY: the port at the requested iface + port is already in use
|
|
|
|
*/
|
|
|
|
|
2016-03-12 08:18:58 +08:00
|
|
|
LWS_EXTERN int
|
2016-11-16 08:59:47 +08:00
|
|
|
lws_socket_bind(struct lws_vhost *vhost, lws_sockfd_type sockfd, int port,
|
2016-03-12 08:18:58 +08:00
|
|
|
const char *iface)
|
|
|
|
{
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_UNIX_SOCK
|
2016-03-30 22:47:02 -07:00
|
|
|
struct sockaddr_un serv_unix;
|
|
|
|
#endif
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-03-12 08:18:58 +08:00
|
|
|
struct sockaddr_in6 serv_addr6;
|
|
|
|
#endif
|
|
|
|
struct sockaddr_in serv_addr4;
|
2017-01-17 07:01:02 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2017-05-30 18:31:06 -03:00
|
|
|
socklen_t len = sizeof(struct sockaddr_storage);
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2018-04-12 15:56:38 +08:00
|
|
|
int n;
|
|
|
|
#if !defined(LWS_WITH_ESP32)
|
|
|
|
int m;
|
|
|
|
#endif
|
2017-05-30 18:31:06 -03:00
|
|
|
struct sockaddr_storage sin;
|
2016-03-12 08:18:58 +08:00
|
|
|
struct sockaddr *v;
|
|
|
|
|
2018-10-10 13:54:43 +08:00
|
|
|
memset(&sin, 0, sizeof(sin));
|
|
|
|
|
2018-08-02 19:13:53 +08:00
|
|
|
#if defined(LWS_WITH_UNIX_SOCK)
|
2016-04-14 12:11:51 +08:00
|
|
|
if (LWS_UNIX_SOCK_ENABLED(vhost)) {
|
2016-03-30 22:47:02 -07:00
|
|
|
v = (struct sockaddr *)&serv_unix;
|
|
|
|
n = sizeof(struct sockaddr_un);
|
|
|
|
bzero((char *) &serv_unix, sizeof(serv_unix));
|
|
|
|
serv_unix.sun_family = AF_UNIX;
|
2018-03-19 19:20:29 +08:00
|
|
|
if (!iface)
|
2018-11-22 17:58:30 +08:00
|
|
|
return LWS_ITOSA_NOT_EXIST;
|
2016-03-30 22:47:02 -07:00
|
|
|
if (sizeof(serv_unix.sun_path) <= strlen(iface)) {
|
|
|
|
lwsl_err("\"%s\" too long for UNIX domain socket\n",
|
|
|
|
iface);
|
2018-11-22 17:58:30 +08:00
|
|
|
return LWS_ITOSA_NOT_EXIST;
|
2016-03-30 22:47:02 -07:00
|
|
|
}
|
|
|
|
strcpy(serv_unix.sun_path, iface);
|
2016-04-10 13:19:16 +09:00
|
|
|
if (serv_unix.sun_path[0] == '@')
|
|
|
|
serv_unix.sun_path[0] = '\0';
|
2018-08-02 19:13:53 +08:00
|
|
|
else
|
|
|
|
unlink(serv_unix.sun_path);
|
2016-04-10 13:19:16 +09:00
|
|
|
|
2016-03-30 22:47:02 -07:00
|
|
|
} else
|
|
|
|
#endif
|
2017-09-28 11:29:03 +08:00
|
|
|
#if defined(LWS_WITH_IPV6) && !defined(LWS_WITH_ESP32)
|
2016-06-03 21:19:40 +08:00
|
|
|
if (LWS_IPV6_ENABLED(vhost)) {
|
2016-03-12 08:18:58 +08:00
|
|
|
v = (struct sockaddr *)&serv_addr6;
|
|
|
|
n = sizeof(struct sockaddr_in6);
|
|
|
|
bzero((char *) &serv_addr6, sizeof(serv_addr6));
|
2016-12-15 09:58:20 +08:00
|
|
|
if (iface) {
|
2018-03-29 13:32:33 +08:00
|
|
|
m = interface_to_sa(vhost, iface,
|
|
|
|
(struct sockaddr_in *)v, n);
|
|
|
|
if (m == LWS_ITOSA_NOT_USABLE) {
|
|
|
|
lwsl_info("%s: netif %s: Not usable\n",
|
|
|
|
__func__, iface);
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
if (m == LWS_ITOSA_NOT_EXIST) {
|
|
|
|
lwsl_info("%s: netif %s: Does not exist\n",
|
|
|
|
__func__, iface);
|
|
|
|
return m;
|
2016-12-15 09:58:20 +08:00
|
|
|
}
|
2017-06-05 13:59:22 -03:00
|
|
|
serv_addr6.sin6_scope_id = lws_get_addr_scope(iface);
|
2016-12-15 09:58:20 +08:00
|
|
|
}
|
|
|
|
|
2016-03-12 08:18:58 +08:00
|
|
|
serv_addr6.sin6_family = AF_INET6;
|
|
|
|
serv_addr6.sin6_port = htons(port);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
v = (struct sockaddr *)&serv_addr4;
|
|
|
|
n = sizeof(serv_addr4);
|
|
|
|
bzero((char *) &serv_addr4, sizeof(serv_addr4));
|
|
|
|
serv_addr4.sin_addr.s_addr = INADDR_ANY;
|
|
|
|
serv_addr4.sin_family = AF_INET;
|
|
|
|
|
2018-11-22 17:58:30 +08:00
|
|
|
#if !defined(LWS_WITH_ESP32)
|
2018-03-29 13:32:33 +08:00
|
|
|
if (iface) {
|
|
|
|
m = interface_to_sa(vhost, iface,
|
|
|
|
(struct sockaddr_in *)v, n);
|
|
|
|
if (m == LWS_ITOSA_NOT_USABLE) {
|
|
|
|
lwsl_info("%s: netif %s: Not usable\n",
|
|
|
|
__func__, iface);
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
if (m == LWS_ITOSA_NOT_EXIST) {
|
|
|
|
lwsl_info("%s: netif %s: Does not exist\n",
|
|
|
|
__func__, iface);
|
|
|
|
return m;
|
|
|
|
}
|
2016-03-12 08:18:58 +08:00
|
|
|
}
|
2017-02-18 17:26:40 +08:00
|
|
|
#endif
|
2016-03-12 08:18:58 +08:00
|
|
|
serv_addr4.sin_port = htons(port);
|
|
|
|
} /* ipv4 */
|
|
|
|
|
2018-03-29 13:32:33 +08:00
|
|
|
/* just checking for the interface extant */
|
|
|
|
if (sockfd == LWS_SOCK_INVALID)
|
2018-11-22 17:58:30 +08:00
|
|
|
return LWS_ITOSA_USABLE;
|
2018-03-29 13:32:33 +08:00
|
|
|
|
2016-03-12 08:18:58 +08:00
|
|
|
n = bind(sockfd, v, n);
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_UNIX_SOCK
|
2016-04-14 12:11:51 +08:00
|
|
|
if (n < 0 && LWS_UNIX_SOCK_ENABLED(vhost)) {
|
2016-03-30 22:47:02 -07:00
|
|
|
lwsl_err("ERROR on binding fd %d to \"%s\" (%d %d)\n",
|
2018-08-02 19:13:53 +08:00
|
|
|
sockfd, iface, n, LWS_ERRNO);
|
2018-11-22 17:58:30 +08:00
|
|
|
return LWS_ITOSA_NOT_EXIST;
|
2016-03-30 22:47:02 -07:00
|
|
|
} else
|
|
|
|
#endif
|
2016-03-12 08:18:58 +08:00
|
|
|
if (n < 0) {
|
2016-03-28 10:10:43 +08:00
|
|
|
lwsl_err("ERROR on binding fd %d to port %d (%d %d)\n",
|
2018-11-23 08:47:56 +08:00
|
|
|
sockfd, port, n, LWS_ERRNO);
|
2018-11-22 17:58:30 +08:00
|
|
|
|
|
|
|
/* if something already listening, tell caller to fail permanently */
|
|
|
|
|
|
|
|
if (LWS_ERRNO == LWS_EADDRINUSE)
|
|
|
|
return LWS_ITOSA_BUSY;
|
|
|
|
|
|
|
|
/* otherwise ask caller to retry later */
|
|
|
|
|
|
|
|
return LWS_ITOSA_NOT_EXIST;
|
2016-03-12 08:18:58 +08:00
|
|
|
}
|
|
|
|
|
2018-08-02 19:13:53 +08:00
|
|
|
#if defined(LWS_WITH_UNIX_SOCK)
|
2018-08-24 12:20:38 +08:00
|
|
|
if (LWS_UNIX_SOCK_ENABLED(vhost) && vhost->context->uid)
|
|
|
|
if (chown(serv_unix.sun_path, vhost->context->uid,
|
2018-11-23 08:47:56 +08:00
|
|
|
vhost->context->gid))
|
2018-08-24 12:20:38 +08:00
|
|
|
lwsl_notice("%s: chown for unix skt %s failed\n",
|
|
|
|
__func__, serv_unix.sun_path);
|
2018-08-02 19:13:53 +08:00
|
|
|
#endif
|
|
|
|
|
2017-01-17 07:01:02 +08:00
|
|
|
#ifndef LWS_PLAT_OPTEE
|
2016-03-12 08:18:58 +08:00
|
|
|
if (getsockname(sockfd, (struct sockaddr *)&sin, &len) == -1)
|
|
|
|
lwsl_warn("getsockname: %s\n", strerror(LWS_ERRNO));
|
|
|
|
else
|
2017-01-17 07:01:02 +08:00
|
|
|
#endif
|
2017-09-28 11:29:03 +08:00
|
|
|
#if defined(LWS_WITH_IPV6)
|
2017-05-30 18:31:06 -03:00
|
|
|
port = (sin.ss_family == AF_INET6) ?
|
2017-10-28 07:42:44 +08:00
|
|
|
ntohs(((struct sockaddr_in6 *) &sin)->sin6_port) :
|
|
|
|
ntohs(((struct sockaddr_in *) &sin)->sin_port);
|
2017-05-30 18:31:06 -03:00
|
|
|
#else
|
2017-10-24 19:25:46 +08:00
|
|
|
{
|
|
|
|
struct sockaddr_in sain;
|
|
|
|
memcpy(&sain, &sin, sizeof(sain));
|
|
|
|
port = ntohs(sain.sin_port);
|
|
|
|
}
|
2016-03-12 08:18:58 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2018-04-27 09:01:41 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_get_vhost_listen_port(struct lws_vhost *vhost)
|
|
|
|
{
|
|
|
|
return vhost->listen_port;
|
|
|
|
}
|
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#if defined(LWS_WITH_IPV6)
|
2017-06-05 13:59:22 -03:00
|
|
|
LWS_EXTERN unsigned long
|
|
|
|
lws_get_addr_scope(const char *ipaddr)
|
|
|
|
{
|
|
|
|
unsigned long scope = 0;
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
struct ifaddrs *addrs, *addr;
|
|
|
|
char ip[NI_MAXHOST];
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
getifaddrs(&addrs);
|
|
|
|
for (addr = addrs; addr; addr = addr->ifa_next) {
|
|
|
|
if (!addr->ifa_addr ||
|
|
|
|
addr->ifa_addr->sa_family != AF_INET6)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
getnameinfo(addr->ifa_addr,
|
|
|
|
sizeof(struct sockaddr_in6),
|
|
|
|
ip, sizeof(ip),
|
|
|
|
NULL, 0, NI_NUMERICHOST);
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
while (ip[i])
|
|
|
|
if (ip[i++] == '%') {
|
|
|
|
ip[i - 1] = '\0';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(ip, ipaddr)) {
|
|
|
|
scope = if_nametoindex(addr->ifa_name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
freeifaddrs(addrs);
|
|
|
|
#else
|
|
|
|
PIP_ADAPTER_ADDRESSES adapter, addrs = NULL;
|
|
|
|
PIP_ADAPTER_UNICAST_ADDRESS addr;
|
|
|
|
ULONG size = 0;
|
|
|
|
DWORD ret;
|
|
|
|
struct sockaddr_in6 *sockaddr;
|
|
|
|
char ip[NI_MAXHOST];
|
|
|
|
unsigned int i;
|
|
|
|
int found = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < 5; i++)
|
|
|
|
{
|
|
|
|
ret = GetAdaptersAddresses(AF_INET6, GAA_FLAG_INCLUDE_PREFIX,
|
2018-11-23 08:47:56 +08:00
|
|
|
NULL, addrs, &size);
|
2017-06-05 13:59:22 -03:00
|
|
|
if ((ret == NO_ERROR) || (ret == ERROR_NO_DATA)) {
|
|
|
|
break;
|
|
|
|
} else if (ret == ERROR_BUFFER_OVERFLOW)
|
|
|
|
{
|
|
|
|
if (addrs)
|
|
|
|
free(addrs);
|
2017-10-04 07:10:39 +08:00
|
|
|
addrs = (IP_ADAPTER_ADDRESSES *)malloc(size);
|
2017-06-05 13:59:22 -03:00
|
|
|
} else
|
|
|
|
{
|
|
|
|
if (addrs)
|
|
|
|
{
|
|
|
|
free(addrs);
|
|
|
|
addrs = NULL;
|
|
|
|
}
|
|
|
|
lwsl_err("Failed to get IPv6 address table (%d)", ret);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-23 12:55:21 +08:00
|
|
|
if ((ret == NO_ERROR) && (addrs)) {
|
2017-06-05 13:59:22 -03:00
|
|
|
adapter = addrs;
|
2017-09-23 12:55:21 +08:00
|
|
|
while (adapter && !found) {
|
2017-06-05 13:59:22 -03:00
|
|
|
addr = adapter->FirstUnicastAddress;
|
2017-09-23 12:55:21 +08:00
|
|
|
while (addr && !found) {
|
2017-10-28 07:42:44 +08:00
|
|
|
if (addr->Address.lpSockaddr->sa_family ==
|
|
|
|
AF_INET6) {
|
2017-09-23 12:55:21 +08:00
|
|
|
sockaddr = (struct sockaddr_in6 *)
|
|
|
|
(addr->Address.lpSockaddr);
|
2017-06-05 13:59:22 -03:00
|
|
|
|
2017-09-23 12:55:21 +08:00
|
|
|
lws_plat_inet_ntop(sockaddr->sin6_family,
|
|
|
|
&sockaddr->sin6_addr,
|
2017-06-05 13:59:22 -03:00
|
|
|
ip, sizeof(ip));
|
|
|
|
|
|
|
|
if (!strcmp(ip, ipaddr)) {
|
|
|
|
scope = sockaddr->sin6_scope_id;
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
addr = addr->Next;
|
|
|
|
}
|
|
|
|
adapter = adapter->Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (addrs)
|
|
|
|
free(addrs);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return scope;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-06-08 10:07:02 +08:00
|
|
|
static const char *hex = "0123456789ABCDEF";
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_sql_purify(char *escaped, const char *string, int len)
|
|
|
|
{
|
|
|
|
const char *p = string;
|
|
|
|
char *q = escaped;
|
|
|
|
|
|
|
|
while (*p && len-- > 2) {
|
|
|
|
if (*p == '\'') {
|
2016-05-19 15:28:31 +08:00
|
|
|
*q++ = '\'';
|
2016-06-08 10:07:02 +08:00
|
|
|
*q++ = '\'';
|
|
|
|
len --;
|
|
|
|
p++;
|
|
|
|
} else
|
|
|
|
*q++ = *p++;
|
|
|
|
}
|
|
|
|
*q = '\0';
|
|
|
|
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
2016-06-17 10:05:23 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_json_purify(char *escaped, const char *string, int len)
|
|
|
|
{
|
|
|
|
const char *p = string;
|
|
|
|
char *q = escaped;
|
|
|
|
|
2016-06-26 06:29:20 +08:00
|
|
|
if (!p) {
|
|
|
|
escaped[0] = '\0';
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
2016-06-17 10:05:23 +08:00
|
|
|
while (*p && len-- > 6) {
|
2018-08-14 08:00:25 +08:00
|
|
|
if (*p == '\t') {
|
|
|
|
p++;
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 't';
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*p == '\n') {
|
|
|
|
p++;
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 'n';
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*p == '\r') {
|
|
|
|
p++;
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 'r';
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-06-17 10:05:23 +08:00
|
|
|
if (*p == '\"' || *p == '\\' || *p < 0x20) {
|
|
|
|
*q++ = '\\';
|
|
|
|
*q++ = 'u';
|
|
|
|
*q++ = '0';
|
|
|
|
*q++ = '0';
|
|
|
|
*q++ = hex[((*p) >> 4) & 15];
|
|
|
|
*q++ = hex[(*p) & 15];
|
|
|
|
len -= 5;
|
|
|
|
p++;
|
|
|
|
} else
|
|
|
|
*q++ = *p++;
|
|
|
|
}
|
|
|
|
*q = '\0';
|
|
|
|
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
2018-03-29 09:28:41 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_filename_purify_inplace(char *filename)
|
|
|
|
{
|
|
|
|
while (*filename) {
|
|
|
|
|
|
|
|
if (*filename == '.' && filename[1] == '.') {
|
|
|
|
*filename = '_';
|
|
|
|
filename[1] = '_';
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*filename == ':' ||
|
|
|
|
*filename == '\\' ||
|
|
|
|
*filename == '$' ||
|
|
|
|
*filename == '%')
|
|
|
|
*filename = '_';
|
|
|
|
|
|
|
|
filename++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-08 10:07:02 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN const char *
|
|
|
|
lws_urlencode(char *escaped, const char *string, int len)
|
|
|
|
{
|
|
|
|
const char *p = string;
|
|
|
|
char *q = escaped;
|
|
|
|
|
|
|
|
while (*p && len-- > 3) {
|
|
|
|
if (*p == ' ') {
|
|
|
|
*q++ = '+';
|
|
|
|
p++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((*p >= '0' && *p <= '9') ||
|
|
|
|
(*p >= 'A' && *p <= 'Z') ||
|
|
|
|
(*p >= 'a' && *p <= 'z')) {
|
|
|
|
*q++ = *p++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
*q++ = '%';
|
|
|
|
*q++ = hex[(*p >> 4) & 0xf];
|
|
|
|
*q++ = hex[*p & 0xf];
|
|
|
|
|
|
|
|
len -= 2;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
*q = '\0';
|
|
|
|
|
|
|
|
return escaped;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_urldecode(char *string, const char *escaped, int len)
|
|
|
|
{
|
|
|
|
int state = 0, n;
|
|
|
|
char sum = 0;
|
|
|
|
|
|
|
|
while (*escaped && len) {
|
|
|
|
switch (state) {
|
|
|
|
case 0:
|
|
|
|
if (*escaped == '%') {
|
|
|
|
state++;
|
|
|
|
escaped++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (*escaped == '+') {
|
|
|
|
escaped++;
|
|
|
|
*string++ = ' ';
|
|
|
|
len--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
*string++ = *escaped++;
|
|
|
|
len--;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
n = char_to_hex(*escaped);
|
|
|
|
if (n < 0)
|
|
|
|
return -1;
|
|
|
|
escaped++;
|
|
|
|
sum = n << 4;
|
|
|
|
state++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
n = char_to_hex(*escaped);
|
|
|
|
if (n < 0)
|
|
|
|
return -1;
|
|
|
|
escaped++;
|
|
|
|
*string++ = sum | n;
|
|
|
|
len--;
|
|
|
|
state = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
*string = '\0';
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-28 10:10:43 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_finalize_startup(struct lws_context *context)
|
|
|
|
{
|
|
|
|
struct lws_context_creation_info info;
|
|
|
|
|
|
|
|
info.uid = context->uid;
|
|
|
|
info.gid = context->gid;
|
|
|
|
|
2017-06-07 06:10:02 +08:00
|
|
|
#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
|
|
|
|
memcpy(info.caps, context->caps, sizeof(info.caps));
|
|
|
|
info.count_caps = context->count_caps;
|
|
|
|
#endif
|
|
|
|
|
2016-03-28 10:10:43 +08:00
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
|
|
|
|
lws_plat_drop_app_privileges(&info);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-02 19:15:19 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_get_effective_uid_gid(struct lws_context *context, int *uid, int *gid)
|
|
|
|
{
|
|
|
|
*uid = context->uid;
|
|
|
|
*gid = context->gid;
|
|
|
|
}
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
int
|
|
|
|
lws_snprintf(char *str, size_t size, const char *format, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
va_start(ap, format);
|
|
|
|
n = vsnprintf(str, size, format, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
2017-06-09 20:20:42 +08:00
|
|
|
if (n >= (int)size)
|
2017-10-25 08:00:23 +08:00
|
|
|
return (int)size;
|
2016-09-15 02:22:57 +08:00
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2018-03-12 09:28:26 +08:00
|
|
|
char *
|
|
|
|
lws_strncpy(char *dest, const char *src, size_t size)
|
|
|
|
{
|
|
|
|
strncpy(dest, src, size - 1);
|
|
|
|
dest[size - 1] = '\0';
|
|
|
|
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2018-10-09 10:29:42 +08:00
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
LWS_TOKZS_LEADING_WHITESPACE,
|
|
|
|
LWS_TOKZS_QUOTED_STRING,
|
|
|
|
LWS_TOKZS_TOKEN,
|
|
|
|
LWS_TOKZS_TOKEN_POST_TERMINAL
|
|
|
|
} lws_tokenize_state;
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_tokenize(struct lws_tokenize *ts)
|
|
|
|
{
|
|
|
|
const char *rfc7230_delims = "(),/:;<=>?@[\\]{}";
|
|
|
|
lws_tokenize_state state = LWS_TOKZS_LEADING_WHITESPACE;
|
2018-11-13 09:34:10 +08:00
|
|
|
char c, flo = 0, d_minus = '-', d_dot = '.', s_minus = '\0',
|
|
|
|
s_dot = '\0';
|
2018-10-26 13:48:31 +08:00
|
|
|
signed char num = -1;
|
2018-10-09 10:29:42 +08:00
|
|
|
int utf8 = 0;
|
|
|
|
|
2018-11-13 09:34:10 +08:00
|
|
|
/* for speed, compute the effect of the flags outside the loop */
|
|
|
|
|
|
|
|
if (ts->flags & LWS_TOKENIZE_F_MINUS_NONTERM) {
|
|
|
|
d_minus = '\0';
|
|
|
|
s_minus = '-';
|
|
|
|
}
|
|
|
|
if (ts->flags & LWS_TOKENIZE_F_DOT_NONTERM) {
|
|
|
|
d_dot = '\0';
|
|
|
|
s_dot = '.';
|
|
|
|
}
|
|
|
|
|
2018-10-09 10:29:42 +08:00
|
|
|
ts->token = NULL;
|
|
|
|
ts->token_len = 0;
|
|
|
|
|
|
|
|
while (ts->len) {
|
|
|
|
c = *ts->start++;
|
|
|
|
ts->len--;
|
|
|
|
|
|
|
|
utf8 = lws_check_byte_utf8((unsigned char)utf8, c);
|
|
|
|
if (utf8 < 0)
|
|
|
|
return LWS_TOKZE_ERR_BROKEN_UTF8;
|
|
|
|
|
|
|
|
if (!c)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* whitespace */
|
|
|
|
|
|
|
|
if (c == ' ' || c == '\t' || c == '\n' || c == '\r' ||
|
|
|
|
c == '\f') {
|
|
|
|
switch (state) {
|
|
|
|
case LWS_TOKZS_LEADING_WHITESPACE:
|
|
|
|
case LWS_TOKZS_TOKEN_POST_TERMINAL:
|
|
|
|
continue;
|
|
|
|
case LWS_TOKZS_QUOTED_STRING:
|
|
|
|
ts->token_len++;
|
|
|
|
continue;
|
|
|
|
case LWS_TOKZS_TOKEN:
|
|
|
|
/* we want to scan forward to look for = */
|
|
|
|
|
|
|
|
state = LWS_TOKZS_TOKEN_POST_TERMINAL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* quoted string */
|
|
|
|
|
|
|
|
if (c == '\"') {
|
|
|
|
if (state == LWS_TOKZS_QUOTED_STRING)
|
|
|
|
return LWS_TOKZE_QUOTED_STRING;
|
|
|
|
|
|
|
|
/* starting a quoted string */
|
|
|
|
|
|
|
|
if (ts->flags & LWS_TOKENIZE_F_COMMA_SEP_LIST) {
|
|
|
|
if (ts->delim == LWSTZ_DT_NEED_DELIM)
|
|
|
|
return LWS_TOKZE_ERR_COMMA_LIST;
|
|
|
|
ts->delim = LWSTZ_DT_NEED_DELIM;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = LWS_TOKZS_QUOTED_STRING;
|
|
|
|
ts->token = ts->start;
|
|
|
|
ts->token_len = 0;
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* token= aggregation */
|
|
|
|
|
|
|
|
if (c == '=' && (state == LWS_TOKZS_TOKEN_POST_TERMINAL ||
|
|
|
|
state == LWS_TOKZS_TOKEN)) {
|
|
|
|
if (num == 1)
|
|
|
|
return LWS_TOKZE_ERR_NUM_ON_LHS;
|
|
|
|
/* swallow the = */
|
|
|
|
return LWS_TOKZE_TOKEN_NAME_EQUALS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* optional token: aggregation */
|
|
|
|
|
|
|
|
if ((ts->flags & LWS_TOKENIZE_F_AGG_COLON) && c == ':' &&
|
|
|
|
(state == LWS_TOKZS_TOKEN_POST_TERMINAL ||
|
|
|
|
state == LWS_TOKZS_TOKEN))
|
|
|
|
/* swallow the : */
|
|
|
|
return LWS_TOKZE_TOKEN_NAME_COLON;
|
|
|
|
|
|
|
|
/* aggregate . in a number as a float */
|
|
|
|
|
2018-11-13 09:34:10 +08:00
|
|
|
if (c == '.' && !(ts->flags & LWS_TOKENIZE_F_NO_FLOATS) &&
|
|
|
|
state == LWS_TOKZS_TOKEN && num == 1) {
|
2018-10-09 10:29:42 +08:00
|
|
|
if (flo)
|
|
|
|
return LWS_TOKZE_ERR_MALFORMED_FLOAT;
|
|
|
|
flo = 1;
|
|
|
|
ts->token_len++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delimiter... by default anything that:
|
|
|
|
*
|
|
|
|
* - isn't matched earlier, or
|
|
|
|
* - is [A-Z, a-z, 0-9, _], and
|
|
|
|
* - is not a partial utf8 char
|
|
|
|
*
|
|
|
|
* is a "delimiter", it marks the end of a token and is itself
|
|
|
|
* reported as a single LWS_TOKZE_DELIMITER each time.
|
|
|
|
*
|
|
|
|
* However with LWS_TOKENIZE_F_RFC7230_DELIMS flag, tokens may
|
|
|
|
* contain any noncontrol character that isn't defined in
|
|
|
|
* rfc7230_delims, and only characters listed there are treated
|
|
|
|
* as delimiters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!utf8 &&
|
|
|
|
((ts->flags & LWS_TOKENIZE_F_RFC7230_DELIMS &&
|
|
|
|
strchr(rfc7230_delims, c) && c > 32) ||
|
|
|
|
((!(ts->flags & LWS_TOKENIZE_F_RFC7230_DELIMS) &&
|
|
|
|
(c < '0' || c > '9') && (c < 'A' || c > 'Z') &&
|
2018-11-13 09:34:10 +08:00
|
|
|
(c < 'a' || c > 'z') && c != '_') &&
|
|
|
|
c != s_minus && c != s_dot) ||
|
|
|
|
c == d_minus || c == d_dot
|
2018-10-09 10:29:42 +08:00
|
|
|
)) {
|
|
|
|
switch (state) {
|
|
|
|
case LWS_TOKZS_LEADING_WHITESPACE:
|
|
|
|
if (ts->flags & LWS_TOKENIZE_F_COMMA_SEP_LIST) {
|
|
|
|
if (c != ',' ||
|
|
|
|
ts->delim != LWSTZ_DT_NEED_DELIM)
|
|
|
|
return LWS_TOKZE_ERR_COMMA_LIST;
|
|
|
|
ts->delim = LWSTZ_DT_NEED_NEXT_CONTENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ts->token = ts->start - 1;
|
|
|
|
ts->token_len = 1;
|
|
|
|
return LWS_TOKZE_DELIMITER;
|
|
|
|
|
|
|
|
case LWS_TOKZS_QUOTED_STRING:
|
|
|
|
ts->token_len++;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case LWS_TOKZS_TOKEN_POST_TERMINAL:
|
|
|
|
case LWS_TOKZS_TOKEN:
|
|
|
|
/* report the delimiter next time */
|
|
|
|
ts->start--;
|
|
|
|
ts->len++;
|
|
|
|
goto token_or_numeric;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* anything that's not whitespace or delimiter is payload */
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case LWS_TOKZS_LEADING_WHITESPACE:
|
|
|
|
|
|
|
|
if (ts->flags & LWS_TOKENIZE_F_COMMA_SEP_LIST) {
|
|
|
|
if (ts->delim == LWSTZ_DT_NEED_DELIM)
|
|
|
|
return LWS_TOKZE_ERR_COMMA_LIST;
|
|
|
|
ts->delim = LWSTZ_DT_NEED_DELIM;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = LWS_TOKZS_TOKEN;
|
|
|
|
ts->token = ts->start - 1;
|
|
|
|
ts->token_len = 1;
|
|
|
|
if (c < '0' || c > '9')
|
|
|
|
num = 0;
|
|
|
|
else
|
|
|
|
if (num < 0)
|
|
|
|
num = 1;
|
|
|
|
continue;
|
|
|
|
case LWS_TOKZS_QUOTED_STRING:
|
|
|
|
case LWS_TOKZS_TOKEN:
|
|
|
|
if (c < '0' || c > '9')
|
|
|
|
num = 0;
|
|
|
|
else
|
|
|
|
if (num < 0)
|
|
|
|
num = 1;
|
|
|
|
ts->token_len++;
|
|
|
|
continue;
|
|
|
|
case LWS_TOKZS_TOKEN_POST_TERMINAL:
|
|
|
|
/* report the new token next time */
|
|
|
|
ts->start--;
|
|
|
|
ts->len++;
|
|
|
|
goto token_or_numeric;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we ran out of content */
|
|
|
|
|
|
|
|
if (utf8) /* ended partway through a multibyte char */
|
|
|
|
return LWS_TOKZE_ERR_BROKEN_UTF8;
|
|
|
|
|
|
|
|
if (state == LWS_TOKZS_QUOTED_STRING)
|
|
|
|
return LWS_TOKZE_ERR_UNTERM_STRING;
|
|
|
|
|
|
|
|
if (state != LWS_TOKZS_TOKEN_POST_TERMINAL &&
|
|
|
|
state != LWS_TOKZS_TOKEN) {
|
|
|
|
if ((ts->flags & LWS_TOKENIZE_F_COMMA_SEP_LIST) &&
|
|
|
|
ts->delim == LWSTZ_DT_NEED_NEXT_CONTENT)
|
|
|
|
return LWS_TOKZE_ERR_COMMA_LIST;
|
|
|
|
|
|
|
|
return LWS_TOKZE_ENDED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* report the pending token */
|
|
|
|
|
|
|
|
token_or_numeric:
|
|
|
|
|
|
|
|
if (num != 1)
|
|
|
|
return LWS_TOKZE_TOKEN;
|
|
|
|
if (flo)
|
|
|
|
return LWS_TOKZE_FLOAT;
|
|
|
|
|
|
|
|
return LWS_TOKZE_INTEGER;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_tokenize_cstr(struct lws_tokenize *ts, char *str, int max)
|
|
|
|
{
|
|
|
|
if (ts->token_len + 1 >= max)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
memcpy(str, ts->token, ts->token_len);
|
|
|
|
str[ts->token_len] = '\0';
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_tokenize_init(struct lws_tokenize *ts, const char *start, int flags)
|
|
|
|
{
|
|
|
|
ts->start = start;
|
|
|
|
ts->len = 0x7fffffff;
|
|
|
|
ts->flags = flags;
|
|
|
|
ts->delim = LWSTZ_DT_NEED_FIRST_CONTENT;
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:39 +08:00
|
|
|
#if LWS_MAX_SMP > 1
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_mutex_refcount_init(struct lws_mutex_refcount *mr)
|
|
|
|
{
|
|
|
|
pthread_mutex_init(&mr->lock, NULL);
|
|
|
|
mr->last_lock_reason = NULL;
|
|
|
|
mr->lock_depth = 0;
|
|
|
|
mr->metadata = 0;
|
|
|
|
mr->lock_owner = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_mutex_refcount_destroy(struct lws_mutex_refcount *mr)
|
|
|
|
{
|
|
|
|
pthread_mutex_destroy(&mr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_mutex_refcount_lock(struct lws_mutex_refcount *mr, const char *reason)
|
|
|
|
{
|
|
|
|
/* if true, this sequence is atomic because our thread has the lock
|
|
|
|
*
|
|
|
|
* - if true, only guy who can race to make it untrue is our thread,
|
|
|
|
* and we are here.
|
|
|
|
*
|
|
|
|
* - if false, only guy who could race to make it true is our thread,
|
|
|
|
* and we are here
|
|
|
|
*
|
|
|
|
* - it can be false and change to a different tid that is also false
|
|
|
|
*/
|
|
|
|
if (mr->lock_owner == pthread_self()) {
|
|
|
|
/* atomic because we only change it if we own the lock */
|
|
|
|
mr->lock_depth++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_lock(&mr->lock);
|
|
|
|
/* atomic because only we can have the lock */
|
|
|
|
mr->last_lock_reason = reason;
|
|
|
|
mr->lock_owner = pthread_self();
|
|
|
|
mr->lock_depth = 1;
|
|
|
|
//lwsl_notice("tid %d: lock %s\n", mr->tid, reason);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_mutex_refcount_unlock(struct lws_mutex_refcount *mr)
|
|
|
|
{
|
|
|
|
if (--mr->lock_depth)
|
|
|
|
/* atomic because only thread that has the lock can unlock */
|
|
|
|
return;
|
|
|
|
|
|
|
|
mr->last_lock_reason = "free";
|
|
|
|
mr->lock_owner = 0;
|
|
|
|
//lwsl_notice("tid %d: unlock %s\n", mr->tid, mr->last_lock_reason);
|
|
|
|
pthread_mutex_unlock(&mr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* SMP */
|
2016-09-15 02:22:57 +08:00
|
|
|
|
2016-02-21 21:25:48 +08:00
|
|
|
LWS_VISIBLE LWS_EXTERN int
|
|
|
|
lws_is_cgi(struct lws *wsi) {
|
|
|
|
#ifdef LWS_WITH_CGI
|
2018-04-27 19:16:50 +08:00
|
|
|
return !!wsi->http.cgi;
|
2016-02-21 21:25:48 +08:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-03-12 10:45:32 +08:00
|
|
|
const struct lws_protocol_vhost_options *
|
|
|
|
lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
|
|
|
|
{
|
|
|
|
while (pvo) {
|
|
|
|
if (!strcmp(pvo->name, name))
|
|
|
|
break;
|
2017-09-19 07:58:04 +08:00
|
|
|
|
2018-03-12 10:45:32 +08:00
|
|
|
pvo = pvo->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pvo;
|
|
|
|
}
|
2016-04-08 09:45:49 +08:00
|
|
|
|
2018-08-15 09:23:35 +08:00
|
|
|
int
|
|
|
|
lws_pvo_get_str(void *in, const char *name, const char **result)
|
|
|
|
{
|
|
|
|
const struct lws_protocol_vhost_options *pv =
|
|
|
|
lws_pvo_search((const struct lws_protocol_vhost_options *)in,
|
|
|
|
name);
|
|
|
|
|
|
|
|
if (!pv)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
*result = (const char *)pv->value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
void
|
|
|
|
lws_sum_stats(const struct lws_context *ctx, struct lws_conn_stats *cs)
|
|
|
|
{
|
|
|
|
const struct lws_vhost *vh = ctx->vhost_list;
|
|
|
|
|
|
|
|
while (vh) {
|
|
|
|
|
|
|
|
cs->rx += vh->conn_stats.rx;
|
|
|
|
cs->tx += vh->conn_stats.tx;
|
2017-10-13 10:33:02 +08:00
|
|
|
cs->h1_conn += vh->conn_stats.h1_conn;
|
|
|
|
cs->h1_trans += vh->conn_stats.h1_trans;
|
|
|
|
cs->h2_trans += vh->conn_stats.h2_trans;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
cs->ws_upg += vh->conn_stats.ws_upg;
|
2017-10-13 10:33:02 +08:00
|
|
|
cs->h2_upg += vh->conn_stats.h2_upg;
|
|
|
|
cs->h2_alpn += vh->conn_stats.h2_alpn;
|
|
|
|
cs->h2_subs += vh->conn_stats.h2_subs;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
cs->rejected += vh->conn_stats.rejected;
|
|
|
|
|
|
|
|
vh = vh->vhost_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-12 15:56:38 +08:00
|
|
|
const char *
|
|
|
|
lws_cmdline_option(int argc, const char **argv, const char *val)
|
|
|
|
{
|
2018-04-16 19:52:28 +08:00
|
|
|
int n = (int)strlen(val), c = argc;
|
2018-04-12 15:56:38 +08:00
|
|
|
|
2018-05-05 06:58:52 +08:00
|
|
|
while (--c > 0) {
|
2018-05-05 10:16:54 +08:00
|
|
|
|
2018-04-16 07:32:02 +08:00
|
|
|
if (!strncmp(argv[c], val, n)) {
|
2018-05-05 10:16:54 +08:00
|
|
|
if (!*(argv[c] + n) && c < argc - 1) {
|
|
|
|
/* coverity treats unchecked argv as "tainted" */
|
|
|
|
if (!argv[c + 1] || strlen(argv[c + 1]) > 1024)
|
|
|
|
return NULL;
|
2018-04-12 15:56:38 +08:00
|
|
|
return argv[c + 1];
|
2018-05-05 10:16:54 +08:00
|
|
|
}
|
2018-04-12 15:56:38 +08:00
|
|
|
|
|
|
|
return argv[c] + n;
|
|
|
|
}
|
2018-05-05 06:58:52 +08:00
|
|
|
}
|
2018-04-12 15:56:38 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-04-15 14:01:29 +08:00
|
|
|
#ifdef LWS_WITH_SERVER_STATUS
|
|
|
|
|
2016-04-14 15:07:44 +08:00
|
|
|
LWS_EXTERN int
|
|
|
|
lws_json_dump_vhost(const struct lws_vhost *vh, char *buf, int len)
|
|
|
|
{
|
2018-04-27 19:16:50 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2016-04-14 15:07:44 +08:00
|
|
|
static const char * const prots[] = {
|
|
|
|
"http://",
|
|
|
|
"https://",
|
|
|
|
"file://",
|
|
|
|
"cgi://",
|
|
|
|
">http://",
|
|
|
|
">https://",
|
2016-05-09 09:37:01 +08:00
|
|
|
"callback://"
|
2016-04-14 15:07:44 +08:00
|
|
|
};
|
2018-04-27 19:16:50 +08:00
|
|
|
#endif
|
2016-04-14 15:07:44 +08:00
|
|
|
char *orig = buf, *end = buf + len - 1, first = 1;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
if (len < 100)
|
|
|
|
return 0;
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-04-14 15:07:44 +08:00
|
|
|
"{\n \"name\":\"%s\",\n"
|
|
|
|
" \"port\":\"%d\",\n"
|
2016-04-15 14:01:29 +08:00
|
|
|
" \"use_ssl\":\"%d\",\n"
|
2016-04-14 15:07:44 +08:00
|
|
|
" \"sts\":\"%d\",\n"
|
2016-05-04 15:59:27 +08:00
|
|
|
" \"rx\":\"%llu\",\n"
|
|
|
|
" \"tx\":\"%llu\",\n"
|
2017-10-13 10:33:02 +08:00
|
|
|
" \"h1_conn\":\"%lu\",\n"
|
|
|
|
" \"h1_trans\":\"%lu\",\n"
|
|
|
|
" \"h2_trans\":\"%lu\",\n"
|
2016-04-15 14:01:29 +08:00
|
|
|
" \"ws_upg\":\"%lu\",\n"
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
" \"rejected\":\"%lu\",\n"
|
2017-10-13 10:33:02 +08:00
|
|
|
" \"h2_upg\":\"%lu\",\n"
|
|
|
|
" \"h2_alpn\":\"%lu\",\n"
|
|
|
|
" \"h2_subs\":\"%lu\""
|
2016-04-15 14:01:29 +08:00
|
|
|
,
|
2016-04-14 15:07:44 +08:00
|
|
|
vh->name, vh->listen_port,
|
2018-04-11 13:39:42 +08:00
|
|
|
#if defined(LWS_WITH_TLS)
|
2018-05-01 12:41:42 +08:00
|
|
|
vh->tls.use_ssl & LCCSCF_USE_SSL,
|
2016-04-14 15:07:44 +08:00
|
|
|
#else
|
|
|
|
0,
|
|
|
|
#endif
|
|
|
|
!!(vh->options & LWS_SERVER_OPTION_STS),
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
vh->conn_stats.rx, vh->conn_stats.tx,
|
2017-10-13 10:33:02 +08:00
|
|
|
vh->conn_stats.h1_conn,
|
|
|
|
vh->conn_stats.h1_trans,
|
|
|
|
vh->conn_stats.h2_trans,
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
vh->conn_stats.ws_upg,
|
|
|
|
vh->conn_stats.rejected,
|
2017-10-13 10:33:02 +08:00
|
|
|
vh->conn_stats.h2_upg,
|
|
|
|
vh->conn_stats.h2_alpn,
|
|
|
|
vh->conn_stats.h2_subs
|
2016-04-14 15:07:44 +08:00
|
|
|
);
|
2018-04-27 15:20:56 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
|
|
|
if (vh->http.mount_list) {
|
|
|
|
const struct lws_http_mount *m = vh->http.mount_list;
|
2016-04-14 15:07:44 +08:00
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",\n \"mounts\":[");
|
2016-04-14 15:07:44 +08:00
|
|
|
while (m) {
|
|
|
|
if (!first)
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",");
|
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-04-14 15:07:44 +08:00
|
|
|
"\n {\n \"mountpoint\":\"%s\",\n"
|
2016-04-22 08:53:49 +08:00
|
|
|
" \"origin\":\"%s%s\",\n"
|
|
|
|
" \"cache_max_age\":\"%d\",\n"
|
|
|
|
" \"cache_reuse\":\"%d\",\n"
|
|
|
|
" \"cache_revalidate\":\"%d\",\n"
|
|
|
|
" \"cache_intermediaries\":\"%d\"\n"
|
2016-04-14 15:07:44 +08:00
|
|
|
,
|
|
|
|
m->mountpoint,
|
|
|
|
prots[m->origin_protocol],
|
2016-04-22 08:53:49 +08:00
|
|
|
m->origin,
|
|
|
|
m->cache_max_age,
|
|
|
|
m->cache_reusable,
|
|
|
|
m->cache_revalidate,
|
|
|
|
m->cache_intermediaries);
|
2016-04-14 15:07:44 +08:00
|
|
|
if (m->def)
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-04-14 15:07:44 +08:00
|
|
|
",\n \"default\":\"%s\"",
|
|
|
|
m->def);
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\n }");
|
2016-04-14 15:07:44 +08:00
|
|
|
first = 0;
|
|
|
|
m = m->mount_next;
|
|
|
|
}
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\n ]");
|
2016-04-14 15:07:44 +08:00
|
|
|
}
|
2018-04-27 15:20:56 +08:00
|
|
|
#endif
|
2016-04-14 15:07:44 +08:00
|
|
|
if (vh->protocols) {
|
|
|
|
n = 0;
|
|
|
|
first = 1;
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",\n \"ws-protocols\":[");
|
2016-04-14 15:07:44 +08:00
|
|
|
while (n < vh->count_protocols) {
|
|
|
|
if (!first)
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",");
|
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-05-15 08:29:37 +08:00
|
|
|
"\n {\n \"%s\":{\n"
|
2016-04-14 15:07:44 +08:00
|
|
|
" \"status\":\"ok\"\n }\n }"
|
|
|
|
,
|
|
|
|
vh->protocols[n].name);
|
|
|
|
first = 0;
|
|
|
|
n++;
|
|
|
|
}
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\n ]");
|
2016-04-14 15:07:44 +08:00
|
|
|
}
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\n}");
|
2016-04-14 15:07:44 +08:00
|
|
|
|
|
|
|
return buf - orig;
|
|
|
|
}
|
2016-04-15 14:01:29 +08:00
|
|
|
|
|
|
|
|
|
|
|
LWS_EXTERN LWS_VISIBLE int
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lws_json_dump_context(const struct lws_context *context, char *buf, int len,
|
|
|
|
int hide_vhosts)
|
2016-04-15 14:01:29 +08:00
|
|
|
{
|
|
|
|
char *orig = buf, *end = buf + len - 1, first = 1;
|
2016-04-20 06:10:56 +08:00
|
|
|
const struct lws_vhost *vh = context->vhost_list;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
const struct lws_context_per_thread *pt;
|
|
|
|
time_t t = time(NULL);
|
2017-02-06 10:04:04 +08:00
|
|
|
int n, listening = 0, cgi_count = 0;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
struct lws_conn_stats cs;
|
|
|
|
double d = 0;
|
2016-04-20 06:10:56 +08:00
|
|
|
#ifdef LWS_WITH_CGI
|
|
|
|
struct lws_cgi * const *pcgi;
|
|
|
|
#endif
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_LIBUV
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
uv_uptime(&d);
|
|
|
|
#endif
|
2016-04-15 14:01:29 +08:00
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "{ "
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
"\"version\":\"%s\",\n"
|
|
|
|
"\"uptime\":\"%ld\",\n",
|
|
|
|
lws_get_library_version(),
|
|
|
|
(long)d);
|
|
|
|
|
2016-04-26 07:45:45 +08:00
|
|
|
#ifdef LWS_HAVE_GETLOADAVG
|
|
|
|
{
|
|
|
|
double d[3];
|
|
|
|
int m;
|
|
|
|
|
|
|
|
m = getloadavg(d, 3);
|
|
|
|
for (n = 0; n < m; n++) {
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-04-26 07:45:45 +08:00
|
|
|
"\"l%d\":\"%.2f\",\n",
|
|
|
|
n + 1, d[n]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2016-04-15 14:01:29 +08:00
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\"contexts\":[\n");
|
|
|
|
|
|
|
|
buf += lws_snprintf(buf, end - buf, "{ "
|
|
|
|
"\"context_uptime\":\"%ld\",\n"
|
|
|
|
"\"cgi_spawned\":\"%d\",\n"
|
|
|
|
"\"pt_fd_max\":\"%d\",\n"
|
|
|
|
"\"ah_pool_max\":\"%d\",\n"
|
|
|
|
"\"deprecated\":\"%d\",\n"
|
|
|
|
"\"wsi_alive\":\"%d\",\n",
|
|
|
|
(unsigned long)(t - context->time_up),
|
|
|
|
context->count_cgi_spawned,
|
|
|
|
context->fd_limit_per_thread,
|
|
|
|
context->max_http_header_pool,
|
|
|
|
context->deprecated,
|
|
|
|
context->count_wsi_allocated);
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "\"pt\":[\n ");
|
2016-04-20 06:10:56 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
pt = &context->pt[n];
|
|
|
|
if (n)
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",");
|
|
|
|
buf += lws_snprintf(buf, end - buf,
|
2016-04-20 06:10:56 +08:00
|
|
|
"\n {\n"
|
|
|
|
" \"fds_count\":\"%d\",\n"
|
|
|
|
" \"ah_pool_inuse\":\"%d\",\n"
|
|
|
|
" \"ah_wait_list\":\"%d\"\n"
|
|
|
|
" }",
|
|
|
|
pt->fds_count,
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_count_in_use,
|
|
|
|
pt->http.ah_wait_list_length);
|
2016-04-20 06:10:56 +08:00
|
|
|
}
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "]");
|
2016-04-20 06:10:56 +08:00
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ", \"vhosts\":[\n ");
|
|
|
|
|
|
|
|
first = 1;
|
|
|
|
vh = context->vhost_list;
|
|
|
|
listening = 0;
|
|
|
|
cs = context->conn_stats;
|
|
|
|
lws_sum_stats(context, &cs);
|
2016-04-15 14:01:29 +08:00
|
|
|
while (vh) {
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
|
|
|
|
if (!hide_vhosts) {
|
|
|
|
if (!first)
|
|
|
|
if(buf != end)
|
|
|
|
*buf++ = ',';
|
|
|
|
buf += lws_json_dump_vhost(vh, buf, end - buf);
|
|
|
|
first = 0;
|
|
|
|
}
|
2016-04-20 06:10:56 +08:00
|
|
|
if (vh->lserv_wsi)
|
|
|
|
listening++;
|
2016-04-15 14:01:29 +08:00
|
|
|
vh = vh->vhost_next;
|
|
|
|
}
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf,
|
|
|
|
"],\n\"listen_wsi\":\"%d\",\n"
|
|
|
|
" \"rx\":\"%llu\",\n"
|
|
|
|
" \"tx\":\"%llu\",\n"
|
2017-10-13 10:33:02 +08:00
|
|
|
" \"h1_conn\":\"%lu\",\n"
|
|
|
|
" \"h1_trans\":\"%lu\",\n"
|
|
|
|
" \"h2_trans\":\"%lu\",\n"
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
" \"ws_upg\":\"%lu\",\n"
|
|
|
|
" \"rejected\":\"%lu\",\n"
|
2017-10-13 10:33:02 +08:00
|
|
|
" \"h2_alpn\":\"%lu\",\n"
|
|
|
|
" \"h2_subs\":\"%lu\",\n"
|
|
|
|
" \"h2_upg\":\"%lu\"",
|
|
|
|
listening, cs.rx, cs.tx,
|
|
|
|
cs.h1_conn,
|
|
|
|
cs.h1_trans,
|
|
|
|
cs.h2_trans,
|
|
|
|
cs.ws_upg,
|
|
|
|
cs.rejected,
|
|
|
|
cs.h2_alpn,
|
|
|
|
cs.h2_subs,
|
|
|
|
cs.h2_upg);
|
2016-04-20 06:10:56 +08:00
|
|
|
|
|
|
|
#ifdef LWS_WITH_CGI
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
pt = &context->pt[n];
|
2018-04-27 15:20:56 +08:00
|
|
|
pcgi = &pt->http.cgi_list;
|
2016-04-20 06:10:56 +08:00
|
|
|
|
|
|
|
while (*pcgi) {
|
|
|
|
pcgi = &(*pcgi)->cgi_list;
|
|
|
|
|
|
|
|
cgi_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2016-09-15 02:22:57 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, ",\n \"cgi_alive\":\"%d\"\n ",
|
2016-04-20 06:10:56 +08:00
|
|
|
cgi_count);
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
buf += lws_snprintf(buf, end - buf, "}");
|
|
|
|
|
|
|
|
|
|
|
|
buf += lws_snprintf(buf, end - buf, "]}\n ");
|
2016-04-15 14:01:29 +08:00
|
|
|
|
|
|
|
return buf - orig;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2017-05-07 10:02:03 +08:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_STATS)
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN uint64_t
|
|
|
|
lws_stats_get(struct lws_context *context, int index)
|
|
|
|
{
|
|
|
|
if (index >= LWSSTATS_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return context->lws_stats[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE LWS_EXTERN void
|
|
|
|
lws_stats_log_dump(struct lws_context *context)
|
|
|
|
{
|
2017-05-15 07:30:06 +08:00
|
|
|
struct lws_vhost *v = context->vhost_list;
|
2018-10-10 13:54:43 +08:00
|
|
|
int n;
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
int m;
|
|
|
|
#endif
|
2017-05-15 07:30:06 +08:00
|
|
|
|
2017-05-07 10:02:03 +08:00
|
|
|
if (!context->updated)
|
|
|
|
return;
|
|
|
|
|
|
|
|
context->updated = 0;
|
|
|
|
|
|
|
|
lwsl_notice("\n");
|
|
|
|
lwsl_notice("LWS internal statistics dump ----->\n");
|
2017-10-28 07:42:44 +08:00
|
|
|
lwsl_notice("LWSSTATS_C_CONNECTIONS: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_CONNECTIONS));
|
|
|
|
lwsl_notice("LWSSTATS_C_API_CLOSE: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_API_CLOSE));
|
|
|
|
lwsl_notice("LWSSTATS_C_API_READ: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_API_READ));
|
|
|
|
lwsl_notice("LWSSTATS_C_API_LWS_WRITE: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_API_LWS_WRITE));
|
|
|
|
lwsl_notice("LWSSTATS_C_API_WRITE: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_API_WRITE));
|
|
|
|
lwsl_notice("LWSSTATS_C_WRITE_PARTIALS: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_WRITE_PARTIALS));
|
|
|
|
lwsl_notice("LWSSTATS_C_WRITEABLE_CB_REQ: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_WRITEABLE_CB_REQ));
|
|
|
|
lwsl_notice("LWSSTATS_C_WRITEABLE_CB_EFF_REQ: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_WRITEABLE_CB_EFF_REQ));
|
|
|
|
lwsl_notice("LWSSTATS_C_WRITEABLE_CB: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_WRITEABLE_CB));
|
|
|
|
lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_ACCEPT_SPIN: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNECTIONS_ACCEPT_SPIN));
|
|
|
|
lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_FAILED: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNECTIONS_FAILED));
|
|
|
|
lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED));
|
|
|
|
lwsl_notice("LWSSTATS_C_SSL_CONNS_HAD_RX: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNS_HAD_RX));
|
|
|
|
lwsl_notice("LWSSTATS_C_PEER_LIMIT_AH_DENIED: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_PEER_LIMIT_AH_DENIED));
|
|
|
|
lwsl_notice("LWSSTATS_C_PEER_LIMIT_WSI_DENIED: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_PEER_LIMIT_WSI_DENIED));
|
|
|
|
|
|
|
|
lwsl_notice("LWSSTATS_C_TIMEOUTS: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_TIMEOUTS));
|
|
|
|
lwsl_notice("LWSSTATS_C_SERVICE_ENTRY: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SERVICE_ENTRY));
|
|
|
|
lwsl_notice("LWSSTATS_B_READ: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context, LWSSTATS_B_READ));
|
|
|
|
lwsl_notice("LWSSTATS_B_WRITE: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context, LWSSTATS_B_WRITE));
|
|
|
|
lwsl_notice("LWSSTATS_B_PARTIALS_ACCEPTED_PARTS: %8llu\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_B_PARTIALS_ACCEPTED_PARTS));
|
|
|
|
lwsl_notice("LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY: %8llums\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY) / 1000);
|
2017-05-07 10:02:03 +08:00
|
|
|
if (lws_stats_get(context, LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED))
|
|
|
|
lwsl_notice(" Avg accept delay: %8llums\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
(unsigned long long)(lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY) /
|
|
|
|
lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED)) / 1000);
|
|
|
|
lwsl_notice("LWSSTATS_MS_SSL_RX_DELAY: %8llums\n",
|
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_SSL_RX_DELAY) / 1000);
|
2017-05-13 10:26:59 +08:00
|
|
|
if (lws_stats_get(context, LWSSTATS_C_SSL_CONNS_HAD_RX))
|
|
|
|
lwsl_notice(" Avg accept-rx delay: %8llums\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
(unsigned long long)(lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_SSL_RX_DELAY) /
|
|
|
|
lws_stats_get(context,
|
|
|
|
LWSSTATS_C_SSL_CONNS_HAD_RX)) / 1000);
|
2017-05-13 10:26:59 +08:00
|
|
|
|
2017-05-07 10:02:03 +08:00
|
|
|
lwsl_notice("LWSSTATS_MS_WRITABLE_DELAY: %8lluus\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_WRITABLE_DELAY));
|
2017-05-07 10:02:03 +08:00
|
|
|
lwsl_notice("LWSSTATS_MS_WORST_WRITABLE_DELAY: %8lluus\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
(unsigned long long)lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_WORST_WRITABLE_DELAY));
|
2017-05-07 10:02:03 +08:00
|
|
|
if (lws_stats_get(context, LWSSTATS_C_WRITEABLE_CB))
|
|
|
|
lwsl_notice(" Avg writable delay: %8lluus\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
(unsigned long long)(lws_stats_get(context,
|
|
|
|
LWSSTATS_MS_WRITABLE_DELAY) /
|
2017-05-07 10:02:03 +08:00
|
|
|
lws_stats_get(context, LWSSTATS_C_WRITEABLE_CB)));
|
2018-05-01 12:41:42 +08:00
|
|
|
lwsl_notice("Simultaneous SSL restriction: %8d/%d\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
context->simultaneous_ssl,
|
2018-05-01 12:41:42 +08:00
|
|
|
context->simultaneous_ssl_restriction);
|
2017-05-15 07:30:06 +08:00
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
lwsl_notice("Live wsi: %8d\n",
|
|
|
|
context->count_wsi_allocated);
|
2017-05-15 07:30:06 +08:00
|
|
|
|
|
|
|
context->updated = 1;
|
|
|
|
|
|
|
|
while (v) {
|
2018-05-06 07:19:21 +08:00
|
|
|
if (v->lserv_wsi &&
|
|
|
|
v->lserv_wsi->position_in_fds_table != LWS_NO_FDS_POS) {
|
2017-06-28 09:52:22 +08:00
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
struct lws_context_per_thread *pt =
|
|
|
|
&context->pt[(int)v->lserv_wsi->tsi];
|
2017-05-15 07:30:06 +08:00
|
|
|
struct lws_pollfd *pfd;
|
|
|
|
|
|
|
|
pfd = &pt->fds[v->lserv_wsi->position_in_fds_table];
|
|
|
|
|
|
|
|
lwsl_notice(" Listen port %d actual POLLIN: %d\n",
|
2017-10-28 07:42:44 +08:00
|
|
|
v->listen_port,
|
|
|
|
(int)pfd->events & LWS_POLLIN);
|
2017-05-15 07:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
v = v->vhost_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[n];
|
|
|
|
struct lws *wl;
|
|
|
|
int m = 0;
|
|
|
|
|
|
|
|
lwsl_notice("PT %d\n", n + 1);
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_lock(pt, __func__);
|
2017-05-15 07:30:06 +08:00
|
|
|
|
|
|
|
lwsl_notice(" AH in use / max: %d / %d\n",
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_count_in_use,
|
2017-05-15 07:30:06 +08:00
|
|
|
context->max_http_header_pool);
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wl = pt->http.ah_wait_list;
|
2017-05-15 07:30:06 +08:00
|
|
|
while (wl) {
|
|
|
|
m++;
|
2018-08-25 05:43:31 +08:00
|
|
|
wl = wl->http.ah_wait_list;
|
2017-05-15 07:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_notice(" AH wait list count / actual: %d / %d\n",
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_wait_list_length, m);
|
2017-05-15 07:30:06 +08:00
|
|
|
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
}
|
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
m = 0;
|
2017-11-05 06:54:31 +08:00
|
|
|
for (n = 0; n < (int)context->pl_hash_elements; n++) {
|
|
|
|
lws_start_foreach_llp(struct lws_peer **, peer,
|
|
|
|
context->pl_hash_table[n]) {
|
2017-09-14 13:14:11 +08:00
|
|
|
m++;
|
|
|
|
} lws_end_foreach_llp(peer, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_notice(" Peers: total active %d\n", m);
|
|
|
|
if (m > 10) {
|
|
|
|
m = 10;
|
|
|
|
lwsl_notice(" (showing 10 peers only)\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m) {
|
2017-11-05 06:54:31 +08:00
|
|
|
for (n = 0; n < (int)context->pl_hash_elements; n++) {
|
2017-09-14 13:14:11 +08:00
|
|
|
char buf[72];
|
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
lws_start_foreach_llp(struct lws_peer **, peer,
|
|
|
|
context->pl_hash_table[n]) {
|
2017-09-14 13:14:11 +08:00
|
|
|
struct lws_peer *df = *peer;
|
|
|
|
|
|
|
|
if (!lws_plat_inet_ntop(df->af, df->addr, buf,
|
|
|
|
sizeof(buf) - 1))
|
|
|
|
strcpy(buf, "unknown");
|
2018-04-27 15:20:56 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2017-09-14 13:14:11 +08:00
|
|
|
lwsl_notice(" peer %s: count wsi: %d, count ah: %d\n",
|
2018-08-25 05:43:31 +08:00
|
|
|
buf, df->count_wsi,
|
|
|
|
df->http.count_ah);
|
2018-04-27 15:20:56 +08:00
|
|
|
#else
|
|
|
|
lwsl_notice(" peer %s: count wsi: %d\n",
|
|
|
|
buf, df->count_wsi);
|
|
|
|
#endif
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
if (!--m)
|
|
|
|
break;
|
|
|
|
} lws_end_foreach_llp(peer, next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-05-07 10:02:03 +08:00
|
|
|
lwsl_notice("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_stats_atomic_bump(struct lws_context * context,
|
|
|
|
struct lws_context_per_thread *pt, int index, uint64_t bump)
|
|
|
|
{
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_stats_lock(pt);
|
2017-05-07 10:02:03 +08:00
|
|
|
context->lws_stats[index] += bump;
|
|
|
|
if (index != LWSSTATS_C_SERVICE_ENTRY)
|
|
|
|
context->updated = 1;
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_stats_unlock(pt);
|
2017-05-07 10:02:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_stats_atomic_max(struct lws_context * context,
|
|
|
|
struct lws_context_per_thread *pt, int index, uint64_t val)
|
|
|
|
{
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_stats_lock(pt);
|
2017-05-07 10:02:03 +08:00
|
|
|
if (val > context->lws_stats[index]) {
|
|
|
|
context->lws_stats[index] = val;
|
|
|
|
context->updated = 1;
|
|
|
|
}
|
2018-03-02 14:22:49 +08:00
|
|
|
lws_pt_stats_unlock(pt);
|
2017-05-07 10:02:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2017-09-20 10:37:59 +08:00
|
|
|
|