2014-04-03 07:29:50 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2020-02-28 09:29:25 +00:00
|
|
|
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
|
2014-04-03 07:29:50 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
2014-04-03 07:29:50 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
2014-04-03 07:29:50 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
2014-04-03 07:29:50 +08:00
|
|
|
*/
|
|
|
|
|
2019-08-15 10:49:52 +01:00
|
|
|
#include "private-lib-core.h"
|
2014-04-03 07:29:50 +08:00
|
|
|
|
|
|
|
#ifndef LWS_BUILD_HASH
|
|
|
|
#define LWS_BUILD_HASH "unknown-build-hash"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const char *library_version = LWS_LIBRARY_VERSION " " LWS_BUILD_HASH;
|
|
|
|
|
2019-09-18 13:09:32 +01:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
/* in ms */
|
|
|
|
static uint32_t default_backoff_table[] = { 1000, 3000, 9000, 17000 };
|
|
|
|
#endif
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
/**
|
|
|
|
* lws_get_library_version: get version and git hash library built from
|
|
|
|
*
|
|
|
|
* returns a const char * to a string like "1.1 178d78c"
|
|
|
|
* representing the library version followed by the git head hash it
|
|
|
|
* was built from
|
|
|
|
*/
|
2020-01-02 08:32:23 +00:00
|
|
|
const char *
|
2014-04-03 07:29:50 +08:00
|
|
|
lws_get_library_version(void)
|
|
|
|
{
|
|
|
|
return library_version;
|
|
|
|
}
|
|
|
|
|
2019-08-09 10:12:09 +01:00
|
|
|
#if defined(LWS_WITH_STATS)
|
|
|
|
static void
|
|
|
|
lws_sul_stats_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = lws_container_of(sul,
|
|
|
|
struct lws_context_per_thread, sul_stats);
|
|
|
|
|
|
|
|
lws_stats_log_dump(pt->context);
|
|
|
|
|
|
|
|
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_stats, 10 * LWS_US_PER_SEC);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
static void
|
|
|
|
lws_sul_peer_limits_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = lws_container_of(sul,
|
|
|
|
struct lws_context_per_thread, sul_peer_limits);
|
|
|
|
|
|
|
|
lws_peer_cull_peer_wait_list(pt->context);
|
|
|
|
|
|
|
|
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_peer_limits, 10 * LWS_US_PER_SEC);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-09-19 09:48:17 +01:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
|
|
|
|
#if defined(_DEBUG)
|
|
|
|
static const char * system_state_names[] = {
|
|
|
|
"undef",
|
|
|
|
"CONTEXT_CREATED",
|
|
|
|
"INITIALIZED",
|
|
|
|
"IFACE_COLDPLUG",
|
|
|
|
"DHCP",
|
|
|
|
"TIME_VALID",
|
|
|
|
"POLICY_VALID",
|
|
|
|
"REGISTERED",
|
|
|
|
"AUTH1",
|
|
|
|
"AUTH2",
|
|
|
|
"OPERATIONAL",
|
|
|
|
"POLICY_INVALID"
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle provoking protocol init when we pass through the right system state
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_state_notify_protocol_init(struct lws_state_manager *mgr,
|
|
|
|
struct lws_state_notify_link *link, int current,
|
|
|
|
int target)
|
|
|
|
{
|
|
|
|
struct lws_context *context = lws_container_of(mgr, struct lws_context,
|
|
|
|
mgr_system);
|
2019-12-31 15:24:58 +00:00
|
|
|
int n;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Deal with any attachments that were waiting for the right state
|
|
|
|
* to come along
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++)
|
|
|
|
lws_system_do_attach(&context->pt[n]);
|
2019-09-19 09:48:17 +01:00
|
|
|
|
2019-09-27 14:32:59 -07:00
|
|
|
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (target == LWS_SYSTATE_DHCP) {
|
2019-09-27 14:32:59 -07:00
|
|
|
/*
|
|
|
|
* Don't let it past here until at least one iface has been
|
|
|
|
* configured for operation with DHCP
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!lws_dhcpc_status(context, NULL))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_SYS_AUTH_API_AMAZON_COM)
|
|
|
|
/*
|
|
|
|
* Skip this if we are running something without the policy for it
|
2020-05-01 14:27:19 -07:00
|
|
|
*
|
|
|
|
* If root token is empty, skip too.
|
2020-02-29 12:37:24 +00:00
|
|
|
*/
|
|
|
|
if (target == LWS_SYSTATE_AUTH1 &&
|
|
|
|
context->pss_policies &&
|
|
|
|
!lws_system_blob_get_size(lws_system_get_blob(context,
|
|
|
|
LWS_SYSBLOB_TYPE_AUTH,
|
2020-05-01 14:27:19 -07:00
|
|
|
0)) &&
|
|
|
|
lws_system_blob_get_size(lws_system_get_blob(context,
|
|
|
|
LWS_SYSBLOB_TYPE_AUTH,
|
|
|
|
1))) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_info("%s: AUTH1 state triggering api.amazon.com auth\n", __func__);
|
|
|
|
/*
|
|
|
|
* Start trying to acquire it if it's not already in progress
|
|
|
|
* returns nonzero if we determine it's not needed
|
|
|
|
*/
|
|
|
|
if (!lws_ss_sys_auth_api_amazon_com(context))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS)
|
|
|
|
/*
|
|
|
|
* Skip this if we are running something without the policy for it
|
|
|
|
*/
|
|
|
|
if (target == LWS_SYSTATE_POLICY_VALID &&
|
|
|
|
context->pss_policies && !context->policy_updated) {
|
|
|
|
/*
|
|
|
|
* Start trying to acquire it if it's not already in progress
|
|
|
|
* returns nonzero if we determine it's not needed
|
|
|
|
*/
|
|
|
|
if (!lws_ss_sys_fetch_policy(context))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-09-27 14:32:59 -07:00
|
|
|
/* protocol part */
|
|
|
|
|
2019-09-19 09:48:17 +01:00
|
|
|
if (context->protocol_init_done)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (target != LWS_SYSTATE_POLICY_VALID)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
lwsl_info("%s: doing protocol init on POLICY_VALID\n", __func__);
|
|
|
|
lws_protocol_init(context);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_context_creation_completion_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
struct lws_context *context = lws_container_of(sul, struct lws_context,
|
|
|
|
sul_system_state);
|
|
|
|
|
|
|
|
/* if nothing is there to intercept anything, go all the way */
|
|
|
|
lws_state_transition_steps(&context->mgr_system,
|
|
|
|
LWS_SYSTATE_OPERATIONAL);
|
|
|
|
}
|
|
|
|
#endif
|
2019-08-09 10:12:09 +01:00
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
struct lws_context *
|
2018-04-27 08:27:16 +08:00
|
|
|
lws_create_context(const struct lws_context_creation_info *info)
|
2014-04-03 07:29:50 +08:00
|
|
|
{
|
2015-12-04 11:08:32 +08:00
|
|
|
struct lws_context *context = NULL;
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_FILE_OPS)
|
2017-03-03 12:38:10 +08:00
|
|
|
struct lws_plat_file_ops *prev;
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2015-12-06 11:00:36 +08:00
|
|
|
#ifndef LWS_NO_DAEMONIZE
|
2019-01-23 18:06:32 +08:00
|
|
|
pid_t pid_daemon = get_daemonize_pid();
|
2015-11-02 20:34:12 +08:00
|
|
|
#endif
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2019-08-18 06:29:34 +01:00
|
|
|
int n, count_threads = 1;
|
|
|
|
uint8_t *u;
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2016-04-08 16:02:59 +08:00
|
|
|
#if defined(__ANDROID__)
|
|
|
|
struct rlimit rt;
|
|
|
|
#endif
|
2019-08-18 06:29:34 +01:00
|
|
|
size_t s1 = 4096, size = sizeof(struct lws_context);
|
2019-09-19 09:48:17 +01:00
|
|
|
int lpf = info->fd_limit_per_thread;
|
|
|
|
|
|
|
|
if (lpf) {
|
2019-09-22 07:25:58 -07:00
|
|
|
lpf+= 2;
|
2019-09-19 09:48:17 +01:00
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS)
|
|
|
|
lpf++;
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_NTPCLIENT)
|
|
|
|
lpf++;
|
2019-09-27 14:32:59 -07:00
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
|
|
|
|
lpf++;
|
2019-09-19 09:48:17 +01:00
|
|
|
#endif
|
|
|
|
}
|
2016-04-08 16:02:59 +08:00
|
|
|
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info("Initial logging level %d\n", log_level);
|
|
|
|
lwsl_info("Libwebsockets version: %s\n", library_version);
|
2018-04-11 13:39:42 +08:00
|
|
|
|
2017-09-28 11:29:03 +08:00
|
|
|
#ifdef LWS_WITH_IPV6
|
2016-03-23 09:22:11 +08:00
|
|
|
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_DISABLE_IPV6))
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info("IPV6 compiled in and enabled\n");
|
2014-04-03 07:29:50 +08:00
|
|
|
else
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info("IPV6 compiled in but disabled\n");
|
2014-04-03 07:29:50 +08:00
|
|
|
#else
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info("IPV6 not compiled in\n");
|
2014-04-03 07:29:50 +08:00
|
|
|
#endif
|
2018-04-30 09:16:04 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info(" LWS_DEF_HEADER_LEN : %u\n", LWS_DEF_HEADER_LEN);
|
2016-01-19 03:34:24 +08:00
|
|
|
lwsl_info(" LWS_MAX_SMP : %u\n", LWS_MAX_SMP);
|
2017-02-05 22:07:34 +08:00
|
|
|
lwsl_info(" sizeof (*info) : %ld\n", (long)sizeof(*info));
|
2017-05-07 10:02:03 +08:00
|
|
|
#if defined(LWS_WITH_STATS)
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info(" LWS_WITH_STATS : on\n");
|
2017-05-07 10:02:03 +08:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
lwsl_info(" SYSTEM_RANDOM_FILEPATH: '%s'\n", SYSTEM_RANDOM_FILEPATH);
|
2017-10-13 10:33:02 +08:00
|
|
|
#if defined(LWS_WITH_HTTP2)
|
|
|
|
lwsl_info(" HTTP2 support : available\n");
|
|
|
|
#else
|
2017-10-26 07:24:45 +08:00
|
|
|
lwsl_info(" HTTP2 support : not configured\n");
|
2015-11-08 12:10:26 +08:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
if (lws_plat_context_early_init())
|
|
|
|
return NULL;
|
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
if (info->count_threads)
|
|
|
|
count_threads = info->count_threads;
|
|
|
|
|
|
|
|
if (count_threads > LWS_MAX_SMP)
|
|
|
|
count_threads = LWS_MAX_SMP;
|
|
|
|
|
|
|
|
if (info->pt_serv_buf_size)
|
|
|
|
s1 = info->pt_serv_buf_size;
|
|
|
|
|
2019-08-27 06:06:13 +01:00
|
|
|
/* pt fakewsi and the pt serv buf allocations ride after the context */
|
2019-08-18 06:29:34 +01:00
|
|
|
size += count_threads * (s1 + sizeof(struct lws));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
context = lws_zalloc(size, "context");
|
2014-04-03 07:29:50 +08:00
|
|
|
if (!context) {
|
|
|
|
lwsl_err("No memory for websocket context\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-04-05 21:13:59 +08:00
|
|
|
|
|
|
|
context->uid = info->uid;
|
|
|
|
context->gid = info->gid;
|
|
|
|
context->username = info->username;
|
|
|
|
context->groupname = info->groupname;
|
2019-08-14 08:59:12 +01:00
|
|
|
context->system_ops = info->system_ops;
|
2020-01-11 14:04:50 +00:00
|
|
|
context->pt_serv_buf_size = (unsigned int)s1;
|
2019-09-08 08:08:55 +01:00
|
|
|
context->udp_loss_sim_tx_pc = info->udp_loss_sim_tx_pc;
|
|
|
|
context->udp_loss_sim_rx_pc = info->udp_loss_sim_rx_pc;
|
|
|
|
|
|
|
|
if (context->udp_loss_sim_tx_pc || context->udp_loss_sim_rx_pc)
|
|
|
|
lwsl_warn("%s: simulating udp loss tx: %d%%, rx: %d%%\n",
|
|
|
|
__func__, context->udp_loss_sim_tx_pc,
|
|
|
|
context->udp_loss_sim_rx_pc);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
|
|
|
|
context->ss_proxy_bind = info->ss_proxy_bind;
|
|
|
|
context->ss_proxy_port = info->ss_proxy_port;
|
|
|
|
context->ss_proxy_address = info->ss_proxy_address;
|
|
|
|
lwsl_notice("%s: using ss proxy bind '%s', port %d, ads '%s'\n",
|
|
|
|
__func__, context->ss_proxy_bind, context->ss_proxy_port,
|
|
|
|
context->ss_proxy_address);
|
|
|
|
#endif
|
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
context->count_threads = count_threads;
|
2019-08-23 16:10:36 +01:00
|
|
|
#if defined(LWS_WITH_DETAILED_LATENCY)
|
|
|
|
context->detailed_latency_cb = info->detailed_latency_cb;
|
|
|
|
context->detailed_latency_filepath = info->detailed_latency_filepath;
|
|
|
|
context->latencies_fd = -1;
|
|
|
|
#endif
|
2019-12-01 17:48:05 +00:00
|
|
|
#if defined(LWS_WITHOUT_EXTENSIONS)
|
|
|
|
if (info->extensions)
|
|
|
|
lwsl_warn("%s: LWS_WITHOUT_EXTENSIONS but extensions ptr set\n", __func__);
|
|
|
|
#endif
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2019-04-05 21:13:59 +08:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS)
|
|
|
|
context->pss_policies_json = info->pss_policies_json;
|
|
|
|
context->pss_plugins = info->pss_plugins;
|
|
|
|
#endif
|
|
|
|
|
2019-04-05 21:13:59 +08:00
|
|
|
/* if he gave us names, set the uid / gid */
|
|
|
|
if (lws_plat_drop_app_privileges(context, 0))
|
|
|
|
goto bail;
|
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
lwsl_info("context created\n");
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_TLS) && defined(LWS_WITH_NETWORK)
|
2018-05-01 12:41:42 +08:00
|
|
|
#if defined(LWS_WITH_MBEDTLS)
|
|
|
|
context->tls_ops = &tls_ops_mbedtls;
|
|
|
|
#else
|
|
|
|
context->tls_ops = &tls_ops_openssl;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
#if LWS_MAX_SMP > 1
|
2018-06-27 07:15:39 +08:00
|
|
|
lws_mutex_refcount_init(&context->mr);
|
2017-09-14 13:14:11 +08:00
|
|
|
#endif
|
|
|
|
|
2019-08-18 10:35:43 +01:00
|
|
|
#if defined(LWS_PLAT_FREERTOS)
|
2019-08-15 10:49:52 +01:00
|
|
|
#if defined(LWS_AMAZON_RTOS)
|
|
|
|
context->last_free_heap = xPortGetFreeHeapSize();
|
|
|
|
#else
|
2017-09-11 10:23:30 +08:00
|
|
|
context->last_free_heap = esp_get_free_heap_size();
|
2019-08-15 10:49:52 +01:00
|
|
|
#endif
|
2017-09-11 10:23:30 +08:00
|
|
|
#endif
|
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_FILE_OPS)
|
2017-03-01 14:28:56 +08:00
|
|
|
/* default to just the platform fops implementation */
|
|
|
|
|
2017-03-08 11:11:41 +08:00
|
|
|
context->fops_platform.LWS_FOP_OPEN = _lws_plat_file_open;
|
|
|
|
context->fops_platform.LWS_FOP_CLOSE = _lws_plat_file_close;
|
|
|
|
context->fops_platform.LWS_FOP_SEEK_CUR = _lws_plat_file_seek_cur;
|
|
|
|
context->fops_platform.LWS_FOP_READ = _lws_plat_file_read;
|
|
|
|
context->fops_platform.LWS_FOP_WRITE = _lws_plat_file_write;
|
2017-03-03 12:38:10 +08:00
|
|
|
context->fops_platform.fi[0].sig = NULL;
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
/*
|
|
|
|
* arrange a linear linked-list of fops starting from context->fops
|
|
|
|
*
|
|
|
|
* platform fops
|
|
|
|
* [ -> fops_zip (copied into context so .next settable) ]
|
|
|
|
* [ -> info->fops ]
|
|
|
|
*/
|
|
|
|
|
|
|
|
context->fops = &context->fops_platform;
|
|
|
|
prev = (struct lws_plat_file_ops *)context->fops;
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2017-03-03 12:38:10 +08:00
|
|
|
#if defined(LWS_WITH_ZIP_FOPS)
|
|
|
|
/* make a soft copy so we can set .next */
|
|
|
|
context->fops_zip = fops_zip;
|
|
|
|
prev->next = &context->fops_zip;
|
|
|
|
prev = (struct lws_plat_file_ops *)prev->next;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* if user provided fops, tack them on the end of the list */
|
2017-03-01 14:28:56 +08:00
|
|
|
if (info->fops)
|
2017-03-03 12:38:10 +08:00
|
|
|
prev->next = info->fops;
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2017-03-01 14:28:56 +08:00
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2016-10-13 06:32:57 +08:00
|
|
|
context->reject_service_keywords = info->reject_service_keywords;
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2016-12-04 07:34:05 +08:00
|
|
|
if (info->external_baggage_free_on_destroy)
|
|
|
|
context->external_baggage_free_on_destroy =
|
|
|
|
info->external_baggage_free_on_destroy;
|
2019-01-15 06:59:48 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2019-08-09 10:12:09 +01:00
|
|
|
context->time_up = lws_now_usecs();
|
2019-01-15 06:59:48 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
context->pcontext_finalize = info->pcontext;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
context->simultaneous_ssl_restriction =
|
|
|
|
info->simultaneous_ssl_restriction;
|
2017-03-16 10:46:31 +08:00
|
|
|
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
context->options = info->options;
|
|
|
|
|
2015-12-06 11:00:36 +08:00
|
|
|
#ifndef LWS_NO_DAEMONIZE
|
2014-04-12 10:07:02 +08:00
|
|
|
if (pid_daemon) {
|
|
|
|
context->started_with_parent = pid_daemon;
|
2019-01-23 18:06:32 +08:00
|
|
|
lwsl_info(" Started with daemon pid %u\n", (unsigned int)pid_daemon);
|
2014-04-12 10:07:02 +08:00
|
|
|
}
|
2015-11-02 20:34:12 +08:00
|
|
|
#endif
|
2016-04-08 16:02:59 +08:00
|
|
|
#if defined(__ANDROID__)
|
2019-08-18 06:29:34 +01:00
|
|
|
n = getrlimit(RLIMIT_NOFILE, &rt);
|
|
|
|
if (n == -1) {
|
|
|
|
lwsl_err("Get RLIMIT_NOFILE failed!\n");
|
2019-06-07 11:11:46 +01:00
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
context->max_fds = rt.rlim_cur;
|
2016-04-08 16:02:59 +08:00
|
|
|
#else
|
2019-06-07 11:11:46 +01:00
|
|
|
#if defined(WIN32) || defined(_WIN32) || defined(LWS_AMAZON_RTOS)
|
2019-08-18 06:29:34 +01:00
|
|
|
context->max_fds = getdtablesize();
|
2019-06-07 11:11:46 +01:00
|
|
|
#else
|
2019-08-18 06:29:34 +01:00
|
|
|
context->max_fds = sysconf(_SC_OPEN_MAX);
|
2019-06-07 11:11:46 +01:00
|
|
|
#endif
|
2019-08-18 06:29:34 +01:00
|
|
|
if (context->max_fds < 0) {
|
|
|
|
lwsl_err("%s: problem getting process max files\n",
|
|
|
|
__func__);
|
2019-06-08 10:29:08 +01:00
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-01-02 08:32:23 +00:00
|
|
|
#endif
|
2016-01-19 03:34:24 +08:00
|
|
|
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
/*
|
|
|
|
* deal with any max_fds override, if it's reducing (setting it to
|
|
|
|
* more than ulimit -n is meaningless). The platform init will
|
|
|
|
* figure out what if this is something it can deal with.
|
|
|
|
*/
|
|
|
|
if (info->fd_limit_per_thread) {
|
2019-09-19 09:48:17 +01:00
|
|
|
int mf = lpf * context->count_threads;
|
2016-03-28 10:10:43 +08:00
|
|
|
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
if (mf < context->max_fds) {
|
|
|
|
context->max_fds_unrelated_to_ulimit = 1;
|
|
|
|
context->max_fds = mf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
context->token_limits = info->token_limits;
|
2016-03-28 10:10:43 +08:00
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* set the context event loops ops struct
|
|
|
|
*
|
|
|
|
* after this, all event_loop actions use the generic ops
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_POLL)
|
|
|
|
context->event_loop_ops = &event_loop_ops_poll;
|
|
|
|
#endif
|
|
|
|
|
2018-04-30 09:16:04 +08:00
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
|
2018-04-29 10:44:36 +08:00
|
|
|
#if defined(LWS_WITH_LIBUV)
|
|
|
|
context->event_loop_ops = &event_loop_ops_uv;
|
2018-04-30 09:16:04 +08:00
|
|
|
#else
|
|
|
|
goto fail_event_libs;
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
2018-04-30 09:16:04 +08:00
|
|
|
|
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEV))
|
2018-04-29 10:44:36 +08:00
|
|
|
#if defined(LWS_WITH_LIBEV)
|
|
|
|
context->event_loop_ops = &event_loop_ops_ev;
|
2018-04-30 09:16:04 +08:00
|
|
|
#else
|
|
|
|
goto fail_event_libs;
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
2018-04-30 09:16:04 +08:00
|
|
|
|
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEVENT))
|
2018-04-29 10:44:36 +08:00
|
|
|
#if defined(LWS_WITH_LIBEVENT)
|
|
|
|
context->event_loop_ops = &event_loop_ops_event;
|
2018-04-30 09:16:04 +08:00
|
|
|
#else
|
|
|
|
goto fail_event_libs;
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
|
|
|
|
2020-02-07 11:39:32 +00:00
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_GLIB))
|
|
|
|
#if defined(LWS_WITH_GLIB)
|
|
|
|
context->event_loop_ops = &event_loop_ops_glib;
|
|
|
|
#else
|
|
|
|
goto fail_event_libs;
|
|
|
|
#endif
|
|
|
|
|
2018-04-30 09:16:04 +08:00
|
|
|
if (!context->event_loop_ops)
|
|
|
|
goto fail_event_libs;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
lwsl_info("Using event loop: %s\n", context->event_loop_ops->name);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_TLS) && defined(LWS_WITH_NETWORK)
|
2018-05-02 18:35:58 +08:00
|
|
|
time(&context->tls.last_cert_check_s);
|
2018-04-12 15:56:38 +08:00
|
|
|
if (info->alpn)
|
2018-05-01 12:41:42 +08:00
|
|
|
context->tls.alpn_default = info->alpn;
|
2018-04-12 15:56:38 +08:00
|
|
|
else {
|
2018-05-01 12:41:42 +08:00
|
|
|
char *p = context->tls.alpn_discovered, first = 1;
|
2018-04-12 15:56:38 +08:00
|
|
|
|
2018-04-25 08:42:18 +08:00
|
|
|
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
|
|
|
|
if (ar->alpn) {
|
2018-04-12 15:56:38 +08:00
|
|
|
if (!first)
|
|
|
|
*p++ = ',';
|
2018-05-01 12:41:42 +08:00
|
|
|
p += lws_snprintf(p,
|
|
|
|
context->tls.alpn_discovered +
|
|
|
|
sizeof(context->tls.alpn_discovered) -
|
|
|
|
2 - p, "%s", ar->alpn);
|
2018-04-12 15:56:38 +08:00
|
|
|
first = 0;
|
|
|
|
}
|
2018-04-25 08:42:18 +08:00
|
|
|
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
|
|
|
|
|
2018-05-01 12:41:42 +08:00
|
|
|
context->tls.alpn_default = context->tls.alpn_discovered;
|
2018-04-12 15:56:38 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 12:41:42 +08:00
|
|
|
lwsl_info("Default ALPN advertisment: %s\n", context->tls.alpn_default);
|
2018-04-12 15:56:38 +08:00
|
|
|
#endif
|
|
|
|
|
2016-02-15 20:36:02 +08:00
|
|
|
if (info->timeout_secs)
|
|
|
|
context->timeout_secs = info->timeout_secs;
|
|
|
|
else
|
|
|
|
context->timeout_secs = AWAITING_TIMEOUT;
|
|
|
|
|
2016-07-15 13:41:38 +08:00
|
|
|
context->ws_ping_pong_interval = info->ws_ping_pong_interval;
|
|
|
|
|
2016-02-15 20:36:02 +08:00
|
|
|
lwsl_info(" default timeout (secs): %u\n", context->timeout_secs);
|
2014-07-29 15:36:06 +03:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
if (info->max_http_header_data)
|
|
|
|
context->max_http_header_data = info->max_http_header_data;
|
|
|
|
else
|
2016-06-02 12:32:38 +08:00
|
|
|
if (info->max_http_header_data2)
|
|
|
|
context->max_http_header_data =
|
|
|
|
info->max_http_header_data2;
|
|
|
|
else
|
|
|
|
context->max_http_header_data = LWS_DEF_HEADER_LEN;
|
2018-01-14 10:18:32 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
if (info->max_http_header_pool)
|
|
|
|
context->max_http_header_pool = info->max_http_header_pool;
|
|
|
|
else
|
2018-06-19 13:27:54 +08:00
|
|
|
if (info->max_http_header_pool2)
|
|
|
|
context->max_http_header_pool =
|
|
|
|
info->max_http_header_pool2;
|
|
|
|
else
|
|
|
|
context->max_http_header_pool = context->max_fds;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2020-03-14 07:55:58 +00:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (info->fd_limit_per_thread)
|
2019-09-19 09:48:17 +01:00
|
|
|
context->fd_limit_per_thread = lpf;
|
2018-04-29 10:44:36 +08:00
|
|
|
else
|
2020-03-14 07:55:58 +00:00
|
|
|
if (context->count_threads)
|
|
|
|
context->fd_limit_per_thread = context->max_fds /
|
|
|
|
context->count_threads;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2019-09-18 13:09:32 +01:00
|
|
|
|
|
|
|
context->default_retry.retry_ms_table = default_backoff_table;
|
|
|
|
context->default_retry.conceal_count =
|
|
|
|
context->default_retry.retry_ms_table_count =
|
|
|
|
LWS_ARRAY_SIZE(default_backoff_table);
|
|
|
|
context->default_retry.jitter_percent = 20;
|
|
|
|
context->default_retry.secs_since_valid_ping = 300;
|
|
|
|
context->default_retry.secs_since_valid_hangup = 310;
|
|
|
|
|
2019-11-20 10:17:36 +00:00
|
|
|
if (info->retry_and_idle_policy &&
|
|
|
|
info->retry_and_idle_policy->secs_since_valid_ping) {
|
|
|
|
context->default_retry.secs_since_valid_ping =
|
|
|
|
info->retry_and_idle_policy->secs_since_valid_ping;
|
|
|
|
context->default_retry.secs_since_valid_hangup =
|
|
|
|
info->retry_and_idle_policy->secs_since_valid_hangup;
|
|
|
|
}
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
/*
|
|
|
|
* Allocate the per-thread storage for scratchpad buffers,
|
|
|
|
* and header data pool
|
2016-01-19 03:34:24 +08:00
|
|
|
*/
|
2019-08-18 06:29:34 +01:00
|
|
|
u = (uint8_t *)&context[1];
|
2016-01-19 03:34:24 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
2019-08-18 06:29:34 +01:00
|
|
|
context->pt[n].serv_buf = u;
|
|
|
|
u += context->pt_serv_buf_size;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2016-02-27 11:03:27 +08:00
|
|
|
context->pt[n].context = context;
|
|
|
|
context->pt[n].tid = n;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2019-08-09 10:12:09 +01:00
|
|
|
/*
|
|
|
|
* We overallocated for a fakewsi (can't compose it in the
|
|
|
|
* pt because size isn't known at that time). point to it
|
|
|
|
* and zero it down. Fakewsis are needed to make callbacks work
|
|
|
|
* when the source of the callback is not actually from a wsi
|
|
|
|
* context.
|
|
|
|
*/
|
2019-08-18 06:29:34 +01:00
|
|
|
context->pt[n].fake_wsi = (struct lws *)u;
|
|
|
|
u += sizeof(struct lws);
|
2019-08-09 10:12:09 +01:00
|
|
|
|
|
|
|
memset(context->pt[n].fake_wsi, 0, sizeof(struct lws));
|
|
|
|
|
2018-04-27 19:16:50 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2018-04-27 15:20:56 +08:00
|
|
|
context->pt[n].http.ah_list = NULL;
|
|
|
|
context->pt[n].http.ah_pool_length = 0;
|
2018-04-27 19:16:50 +08:00
|
|
|
#endif
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_mutex_init(&context->pt[n]);
|
2019-08-09 10:12:09 +01:00
|
|
|
#if defined(LWS_WITH_SEQUENCER)
|
|
|
|
lws_seq_pt_init(&context->pt[n]);
|
|
|
|
#endif
|
2019-09-18 13:09:32 +01:00
|
|
|
|
|
|
|
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
|
|
|
|
if (ar->pt_init_destroy)
|
|
|
|
ar->pt_init_destroy(context, info,
|
|
|
|
&context->pt[n], 0);
|
|
|
|
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
|
2019-10-20 07:12:51 +01:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_CGI)
|
|
|
|
role_ops_cgi.pt_init_destroy(context, info, &context->pt[n], 0);
|
|
|
|
#endif
|
2016-01-19 03:34:24 +08:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info(" Threads: %d each %d fds\n", context->count_threads,
|
2016-01-19 03:34:24 +08:00
|
|
|
context->fd_limit_per_thread);
|
|
|
|
|
2015-10-16 10:54:04 +08:00
|
|
|
if (!info->ka_interval && info->ka_time > 0) {
|
|
|
|
lwsl_err("info->ka_interval can't be 0 if ka_time used\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
/* scale the peer hash table according to the max fds for the process,
|
|
|
|
* so that the max list depth averages 16. Eg, 1024 fd -> 64,
|
|
|
|
* 102400 fd -> 6400
|
|
|
|
*/
|
2018-08-02 19:13:53 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
context->pl_hash_elements =
|
|
|
|
(context->count_threads * context->fd_limit_per_thread) / 16;
|
|
|
|
context->pl_hash_table = lws_zalloc(sizeof(struct lws_peer *) *
|
2017-10-04 07:10:39 +08:00
|
|
|
context->pl_hash_elements, "peer limits hash table");
|
2018-08-02 19:13:53 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
context->ip_limit_ah = info->ip_limit_ah;
|
|
|
|
context->ip_limit_wsi = info->ip_limit_wsi;
|
|
|
|
#endif
|
|
|
|
|
2017-10-28 07:42:44 +08:00
|
|
|
lwsl_info(" mem: context: %5lu B (%ld ctx + (%ld thr x %d))\n",
|
2017-02-05 22:07:34 +08:00
|
|
|
(long)sizeof(struct lws_context) +
|
2016-05-19 12:34:35 +08:00
|
|
|
(context->count_threads * context->pt_serv_buf_size),
|
2017-02-05 22:07:34 +08:00
|
|
|
(long)sizeof(struct lws_context),
|
|
|
|
(long)context->count_threads,
|
2016-05-19 12:34:35 +08:00
|
|
|
context->pt_serv_buf_size);
|
2018-04-27 19:16:50 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
lwsl_info(" mem: http hdr size: (%u + %lu), max count %u\n",
|
2016-01-19 03:34:24 +08:00
|
|
|
context->max_http_header_data,
|
2017-02-05 22:07:34 +08:00
|
|
|
(long)sizeof(struct allocated_headers),
|
2015-12-25 12:44:12 +08:00
|
|
|
context->max_http_header_pool);
|
2018-04-27 19:16:50 +08:00
|
|
|
#endif
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* fds table contains pollfd structs for as many pollfds as we can
|
|
|
|
* handle... spread across as many service threads as we have going
|
|
|
|
*/
|
2016-01-19 03:34:24 +08:00
|
|
|
n = sizeof(struct lws_pollfd) * context->count_threads *
|
|
|
|
context->fd_limit_per_thread;
|
2017-10-04 07:10:39 +08:00
|
|
|
context->pt[0].fds = lws_zalloc(n, "fds table");
|
2016-01-19 03:34:24 +08:00
|
|
|
if (context->pt[0].fds == NULL) {
|
2015-12-04 16:54:12 +08:00
|
|
|
lwsl_err("OOM allocating %d fds\n", context->max_fds);
|
2015-06-25 17:51:07 +02:00
|
|
|
goto bail;
|
2014-04-03 07:29:50 +08:00
|
|
|
}
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
lwsl_info(" mem: pollfd map: %5u B\n", n);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2016-04-15 13:33:52 +08:00
|
|
|
if (info->server_string) {
|
|
|
|
context->server_string = info->server_string;
|
2016-05-04 11:11:15 +08:00
|
|
|
context->server_string_len = (short)
|
|
|
|
strlen(context->server_string);
|
2016-04-15 13:33:52 +08:00
|
|
|
}
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2016-04-15 13:33:52 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
#if LWS_MAX_SMP > 1
|
2016-01-19 03:34:24 +08:00
|
|
|
/* each thread serves his own chunk of fds */
|
2019-01-22 15:45:34 +08:00
|
|
|
for (n = 1; n < (int)context->count_threads; n++)
|
2016-01-26 20:56:56 +08:00
|
|
|
context->pt[n].fds = context->pt[n - 1].fds +
|
|
|
|
context->fd_limit_per_thread;
|
|
|
|
#endif
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2015-12-10 07:14:16 +08:00
|
|
|
if (lws_plat_init(context, info))
|
2015-06-25 17:51:07 +02:00
|
|
|
goto bail;
|
2015-11-08 12:10:26 +08:00
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-29 10:44:36 +08:00
|
|
|
if (context->event_loop_ops->init_context)
|
|
|
|
if (context->event_loop_ops->init_context(context, info))
|
|
|
|
goto bail;
|
|
|
|
|
|
|
|
|
|
|
|
if (context->event_loop_ops->init_pt)
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
void *lp = NULL;
|
|
|
|
|
|
|
|
if (info->foreign_loops)
|
|
|
|
lp = info->foreign_loops[n];
|
|
|
|
|
|
|
|
if (context->event_loop_ops->init_pt(context, lp, n))
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lws_create_event_pipes(context))
|
|
|
|
goto bail;
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2016-03-28 10:10:43 +08:00
|
|
|
lws_context_init_ssl_library(info);
|
|
|
|
|
2016-04-13 11:42:53 +08:00
|
|
|
context->user_space = info->user;
|
2014-04-12 10:07:02 +08:00
|
|
|
|
2019-08-18 06:29:34 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2015-12-25 12:44:12 +08:00
|
|
|
strcpy(context->canonical_hostname, "unknown");
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2014-04-12 10:07:02 +08:00
|
|
|
lws_server_get_canonical_hostname(context, info);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
|
2019-08-09 10:12:09 +01:00
|
|
|
#if defined(LWS_WITH_STATS)
|
|
|
|
context->pt[0].sul_stats.cb = lws_sul_stats_cb;
|
|
|
|
__lws_sul_insert(&context->pt[0].pt_sul_owner, &context->pt[0].sul_stats,
|
|
|
|
10 * LWS_US_PER_SEC);
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
context->pt[0].sul_peer_limits.cb = lws_sul_peer_limits_cb;
|
|
|
|
__lws_sul_insert(&context->pt[0].pt_sul_owner,
|
|
|
|
&context->pt[0].sul_peer_limits, 10 * LWS_US_PER_SEC);
|
|
|
|
#endif
|
|
|
|
|
2017-06-07 06:10:02 +08:00
|
|
|
#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
|
|
|
|
memcpy(context->caps, info->caps, sizeof(context->caps));
|
|
|
|
context->count_caps = info->count_caps;
|
|
|
|
#endif
|
|
|
|
|
2019-09-19 09:48:17 +01:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
|
2019-09-27 14:32:59 -07:00
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS) || defined(LWS_WITH_SYS_NTPCLIENT) || \
|
|
|
|
defined(LWS_WITH_SYS_DHCP_CLIENT)
|
2019-09-19 09:48:17 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* system vhost
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct lws_context_creation_info ii;
|
2020-01-02 08:32:23 +00:00
|
|
|
const struct lws_protocols *pp[4];
|
2019-09-19 09:48:17 +01:00
|
|
|
struct lws_vhost *vh;
|
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS)
|
|
|
|
extern const struct lws_protocols lws_async_dns_protocol;
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_NTPCLIENT)
|
|
|
|
extern const struct lws_protocols lws_system_protocol_ntpc;
|
|
|
|
#endif
|
2019-09-27 14:32:59 -07:00
|
|
|
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
|
|
|
|
extern const struct lws_protocols lws_system_protocol_dhcpc;
|
|
|
|
#endif
|
2019-09-19 09:48:17 +01:00
|
|
|
|
|
|
|
n = 0;
|
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS)
|
|
|
|
pp[n++] = &lws_async_dns_protocol;
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_NTPCLIENT)
|
|
|
|
pp[n++] = &lws_system_protocol_ntpc;
|
2019-09-27 14:32:59 -07:00
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
|
|
|
|
pp[n++] = &lws_system_protocol_dhcpc;
|
2019-09-19 09:48:17 +01:00
|
|
|
#endif
|
|
|
|
pp[n] = NULL;
|
|
|
|
|
|
|
|
memset(&ii, 0, sizeof(ii));
|
|
|
|
ii.vhost_name = "system";
|
|
|
|
ii.pprotocols = pp;
|
|
|
|
|
|
|
|
vh = lws_create_vhost(context, &ii);
|
|
|
|
if (!vh) {
|
|
|
|
lwsl_err("%s: failed to create system vhost\n",
|
|
|
|
__func__);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
context->vhost_system = vh;
|
|
|
|
|
|
|
|
if (lws_protocol_init_vhost(vh, NULL)) {
|
|
|
|
lwsl_err("%s: failed to init system vhost\n", __func__);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS)
|
|
|
|
if (lws_async_dns_init(context))
|
|
|
|
goto bail;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* init the lws_state mgr for the system state
|
|
|
|
*/
|
|
|
|
#if defined(_DEBUG)
|
|
|
|
context->mgr_system.state_names = system_state_names;
|
|
|
|
#endif
|
|
|
|
context->mgr_system.name = "system";
|
|
|
|
context->mgr_system.state = LWS_SYSTATE_CONTEXT_CREATED;
|
|
|
|
context->mgr_system.parent = context;
|
|
|
|
|
|
|
|
context->protocols_notify.name = "prot_init";
|
|
|
|
context->protocols_notify.notify_cb = lws_state_notify_protocol_init;
|
|
|
|
|
|
|
|
lws_state_reg_notifier(&context->mgr_system, &context->protocols_notify);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* insert user notifiers here so they can participate with vetoing us
|
|
|
|
* trying to jump straight to operational, or at least observe us
|
|
|
|
* reaching 'operational', before we returned from context creation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_state_reg_notifier_list(&context->mgr_system,
|
|
|
|
info->register_notifier_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if he's not saying he'll make his own vhosts later then act
|
|
|
|
* compatibly and make a default vhost using the data in the info
|
|
|
|
*/
|
|
|
|
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
|
|
|
|
if (!lws_create_vhost(context, info)) {
|
|
|
|
lwsl_err("Failed to create default vhost\n");
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
lws_free_set_NULL(context->pl_hash_table);
|
|
|
|
#endif
|
2019-11-16 16:56:27 +00:00
|
|
|
goto fail_clean_pipes;
|
2019-09-19 09:48:17 +01:00
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS)
|
|
|
|
|
|
|
|
if (context->pss_policies_json) {
|
|
|
|
/*
|
|
|
|
* You must create your context with the explicit vhosts flag
|
|
|
|
* in order to use secure streams
|
|
|
|
*/
|
|
|
|
assert(lws_check_opt(info->options,
|
|
|
|
LWS_SERVER_OPTION_EXPLICIT_VHOSTS));
|
|
|
|
|
|
|
|
if (lws_ss_policy_parse_begin(context))
|
|
|
|
goto bail;
|
|
|
|
|
|
|
|
n = lws_ss_policy_parse(context,
|
|
|
|
(uint8_t *)context->pss_policies_json,
|
|
|
|
strlen(context->pss_policies_json));
|
|
|
|
if (n != LEJP_CONTINUE && n < 0)
|
|
|
|
goto bail;
|
|
|
|
|
|
|
|
if (lws_ss_policy_set(context, "hardcoded")) {
|
|
|
|
lwsl_err("%s: policy set failed\n", __func__);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
lws_create_vhost(context, info);
|
|
|
|
#endif
|
|
|
|
|
2019-09-19 09:48:17 +01:00
|
|
|
lws_context_init_extensions(info, context);
|
|
|
|
|
|
|
|
lwsl_info(" mem: per-conn: %5lu bytes + protocol rx buf\n",
|
|
|
|
(unsigned long)sizeof(struct lws));
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
/*
|
|
|
|
* drop any root privs for this process
|
|
|
|
* to listen on port < 1023 we would have needed root, but now we are
|
|
|
|
* listening, we don't want the power for anything else
|
|
|
|
*/
|
2016-03-28 10:10:43 +08:00
|
|
|
if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
|
2019-04-21 19:51:03 +01:00
|
|
|
if (lws_plat_drop_app_privileges(context, 1))
|
2019-03-24 17:54:48 +08:00
|
|
|
goto bail;
|
2014-04-03 07:29:50 +08:00
|
|
|
|
2019-09-19 09:48:17 +01:00
|
|
|
/*
|
|
|
|
* We want to move on the syste, state as far as it can go towards
|
|
|
|
* OPERATIONAL now. But we have to return from here first so the user
|
|
|
|
* code that called us can set its copy of context, which it may be
|
|
|
|
* relying on to perform operations triggered by the state change.
|
|
|
|
*
|
|
|
|
* We set up a sul to come back immediately and do the state change.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_sul_schedule(context, 0, &context->sul_system_state,
|
|
|
|
lws_context_creation_completion_cb, 1);
|
|
|
|
|
2018-04-20 10:33:23 +08:00
|
|
|
/* expedite post-context init (eg, protocols) */
|
|
|
|
lws_cancel_service(context);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-20 10:33:23 +08:00
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
return context;
|
|
|
|
|
2019-11-16 16:56:27 +00:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
fail_clean_pipes:
|
|
|
|
for (n = 0; n < context->count_threads; n++)
|
|
|
|
lws_destroy_event_pipe(context->pt[n].pipe_wsi);
|
|
|
|
|
|
|
|
lws_free_set_NULL(context->pt[0].fds);
|
|
|
|
lws_plat_context_late_destroy(context);
|
|
|
|
lws_free_set_NULL(context);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
bail:
|
2015-12-04 08:43:54 +08:00
|
|
|
lws_context_destroy(context);
|
2018-03-22 14:54:25 +08:00
|
|
|
|
2018-04-30 09:16:04 +08:00
|
|
|
return NULL;
|
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-30 09:16:04 +08:00
|
|
|
fail_event_libs:
|
|
|
|
lwsl_err("Requested event library support not configured, available:\n");
|
|
|
|
{
|
2019-01-13 06:58:21 +08:00
|
|
|
extern const struct lws_event_loop_ops *available_event_libs[];
|
2018-04-30 09:16:04 +08:00
|
|
|
const struct lws_event_loop_ops **elops = available_event_libs;
|
|
|
|
|
|
|
|
while (*elops) {
|
|
|
|
lwsl_err(" - %s\n", (*elops)->name);
|
|
|
|
elops++;
|
|
|
|
}
|
|
|
|
}
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-30 09:16:04 +08:00
|
|
|
lws_free(context);
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
int
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lws_context_is_deprecated(struct lws_context *context)
|
|
|
|
{
|
|
|
|
return context->deprecated;
|
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* When using an event loop, the context destruction is in three separate
|
|
|
|
* parts. This is to cover both internal and foreign event loops cleanly.
|
|
|
|
*
|
|
|
|
* - lws_context_destroy() simply starts a soft close of all wsi and
|
|
|
|
* related allocations. The event loop continues.
|
|
|
|
*
|
|
|
|
* As the closes complete in the event loop, reference counting is used
|
|
|
|
* to determine when everything is closed. It then calls
|
|
|
|
* lws_context_destroy2().
|
|
|
|
*
|
|
|
|
* - lws_context_destroy2() cleans up the rest of the higher-level logical
|
|
|
|
* lws pieces like vhosts. If the loop was foreign, it then proceeds to
|
|
|
|
* lws_context_destroy3(). If it the loop is internal, it stops the
|
|
|
|
* internal loops and waits for lws_context_destroy() to be called again
|
|
|
|
* outside the event loop (since we cannot destroy the loop from
|
|
|
|
* within the loop). That will cause lws_context_destroy3() to run
|
|
|
|
* directly.
|
|
|
|
*
|
|
|
|
* - lws_context_destroy3() destroys any internal event loops and then
|
|
|
|
* destroys the context itself, setting what was info.pcontext to NULL.
|
|
|
|
*/
|
|
|
|
|
2018-05-02 19:27:29 +08:00
|
|
|
/*
|
|
|
|
* destroy the actual context itself
|
|
|
|
*/
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
static void
|
|
|
|
lws_context_destroy3(struct lws_context *context)
|
|
|
|
{
|
|
|
|
struct lws_context **pcontext_finalize = context->pcontext_finalize;
|
2018-05-02 18:35:58 +08:00
|
|
|
int n;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%s\n", __func__);
|
|
|
|
|
2018-05-02 18:35:58 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
2018-10-10 13:54:43 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[n];
|
2019-08-09 09:14:48 +01:00
|
|
|
(void)pt;
|
|
|
|
#if defined(LWS_WITH_SEQUENCER)
|
2019-08-08 06:30:14 +01:00
|
|
|
lws_seq_destroy_all_on_pt(pt);
|
2019-08-09 09:14:48 +01:00
|
|
|
#endif
|
2019-09-18 13:09:32 +01:00
|
|
|
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
|
|
|
|
if (ar->pt_init_destroy)
|
|
|
|
ar->pt_init_destroy(context, NULL, pt, 1);
|
|
|
|
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
|
2018-05-02 18:35:58 +08:00
|
|
|
|
2019-10-20 07:12:51 +01:00
|
|
|
#if defined(LWS_WITH_CGI)
|
|
|
|
role_ops_cgi.pt_init_destroy(context, NULL, pt, 1);
|
|
|
|
#endif
|
|
|
|
|
2018-05-02 18:35:58 +08:00
|
|
|
if (context->event_loop_ops->destroy_pt)
|
|
|
|
context->event_loop_ops->destroy_pt(context, n);
|
|
|
|
|
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
|
|
|
while (pt->http.ah_list)
|
|
|
|
_lws_destroy_ah(pt, pt->http.ah_list);
|
|
|
|
#endif
|
|
|
|
}
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2019-09-19 06:54:53 +01:00
|
|
|
#if defined(LWS_WITH_SYS_ASYNC_DNS)
|
|
|
|
lws_async_dns_deinit(&context->async_dns);
|
|
|
|
#endif
|
2019-09-27 14:32:59 -07:00
|
|
|
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
|
|
|
|
lws_dhcpc_remove(context, NULL);
|
|
|
|
#endif
|
2019-09-19 06:54:53 +01:00
|
|
|
|
2018-11-15 17:15:29 +08:00
|
|
|
if (context->pt[0].fds)
|
|
|
|
lws_free_set_NULL(context->pt[0].fds);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2019-08-02 09:47:23 +01:00
|
|
|
lws_context_deinit_ssl_library(context);
|
|
|
|
|
2019-08-23 16:10:36 +01:00
|
|
|
#if defined(LWS_WITH_DETAILED_LATENCIES)
|
|
|
|
if (context->latencies_fd != -1)
|
|
|
|
compatible_close(context->latencies_fd);
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
for (n = 0; n < LWS_SYSBLOB_TYPE_COUNT; n++)
|
|
|
|
lws_system_blob_destroy(
|
|
|
|
lws_system_get_blob(context, n, 0));
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_free(context);
|
|
|
|
lwsl_info("%s: ctx %p freed\n", __func__, context);
|
|
|
|
|
|
|
|
if (pcontext_finalize)
|
|
|
|
*pcontext_finalize = NULL;
|
|
|
|
}
|
|
|
|
|
2018-05-02 19:27:29 +08:00
|
|
|
/*
|
|
|
|
* really start destroying things
|
|
|
|
*/
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
void
|
|
|
|
lws_context_destroy2(struct lws_context *context)
|
|
|
|
{
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-29 10:44:36 +08:00
|
|
|
struct lws_vhost *vh = NULL, *vh1;
|
2020-02-29 12:37:24 +00:00
|
|
|
int n;
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
2018-05-02 18:35:58 +08:00
|
|
|
uint32_t nu;
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
lwsl_info("%s: ctx %p\n", __func__, context);
|
|
|
|
|
2018-06-27 07:49:04 +08:00
|
|
|
lws_context_lock(context, "context destroy 2"); /* ------ context { */
|
|
|
|
|
2018-04-30 09:16:04 +08:00
|
|
|
context->being_destroyed2 = 1;
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We're going to trash things like vhost-protocols
|
|
|
|
* So we need to finish dealing with wsi close that
|
|
|
|
* might make callbacks first
|
|
|
|
*/
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[n];
|
|
|
|
|
|
|
|
(void)pt;
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS)
|
|
|
|
lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
|
|
|
|
if (context->ac_policy)
|
|
|
|
lwsac_free(&context->ac_policy);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
|
|
|
|
lws_dll2_foreach_safe(&pt->ss_client_owner, NULL, lws_sspc_destroy_dll);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SEQUENCER)
|
|
|
|
lws_seq_destroy_all_on_pt(pt);
|
|
|
|
#endif
|
|
|
|
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
|
|
|
|
if (ar->pt_init_destroy)
|
|
|
|
ar->pt_init_destroy(context, NULL, pt, 1);
|
|
|
|
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_CGI)
|
|
|
|
role_ops_cgi.pt_init_destroy(context, NULL, pt, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (context->event_loop_ops->destroy_pt)
|
|
|
|
context->event_loop_ops->destroy_pt(context, n);
|
|
|
|
|
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
|
|
|
while (pt->http.ah_list)
|
|
|
|
_lws_destroy_ah(pt, pt->http.ah_list);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* free all the per-vhost allocations
|
|
|
|
*/
|
|
|
|
|
|
|
|
vh = context->vhost_list;
|
|
|
|
while (vh) {
|
|
|
|
vh1 = vh->vhost_next;
|
2018-06-27 07:49:04 +08:00
|
|
|
__lws_vhost_destroy2(vh);
|
2018-04-29 10:44:36 +08:00
|
|
|
vh = vh1;
|
|
|
|
}
|
|
|
|
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%p: post vh listl\n", __func__);
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/* remove ourselves from the pending destruction list */
|
|
|
|
|
|
|
|
while (context->vhost_pending_destruction_list)
|
|
|
|
/* removes itself from list */
|
2018-06-27 07:49:04 +08:00
|
|
|
__lws_vhost_destroy2(context->vhost_pending_destruction_list);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%p: post pdl\n", __func__);
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_stats_log_dump(context);
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_ssl_context_destroy(context);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_plat_context_late_destroy(context);
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
2018-05-02 18:35:58 +08:00
|
|
|
for (nu = 0; nu < context->pl_hash_elements; nu++) {
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_start_foreach_llp(struct lws_peer **, peer,
|
2018-05-02 18:35:58 +08:00
|
|
|
context->pl_hash_table[nu]) {
|
2018-04-29 10:44:36 +08:00
|
|
|
struct lws_peer *df = *peer;
|
|
|
|
*peer = df->next;
|
|
|
|
lws_free(df);
|
|
|
|
continue;
|
|
|
|
} lws_end_foreach_llp(peer, next);
|
|
|
|
}
|
|
|
|
lws_free(context->pl_hash_table);
|
|
|
|
#endif
|
|
|
|
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%p: baggage\n", __func__);
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (context->external_baggage_free_on_destroy)
|
|
|
|
free(context->external_baggage_free_on_destroy);
|
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
vhost_destroy: use vhost wsi reference counting to trigger destroy
This changes the vhost destroy flow to only hand off the listen
socket if another vhost sharing it, and mark the vhost as
being_destroyed.
Each tsi calls lws_check_deferred_free() once a second, if it sees
any vhost being_destroyed there, it closes all wsi on its tsi on
the same vhost, one time.
As the wsi on the vhost complete close (ie, after libuv async close
if on libuv event loop), they decrement a reference count for all
wsi open on the vhost. The tsi who closes the last one then
completes the destroy flow for the vhost itself... it's random
which tsi completes the vhost destroy but since there are no
wsi left on the vhost, and it holds the context lock, nothing
can conflict.
The advantage of this is that owning tsi do the close for wsi
that are bound to the vhost under destruction, at a time when
they are guaranteed to be idle for service, and they do it with
both vhost and context locks owned, so no other service thread
can conflict for stuff protected by those either.
For the situation the user code may have allocations attached to
the vhost, this adds args to lws_vhost_destroy() to allow destroying
the user allocations just before the vhost is freed.
2018-06-16 09:31:07 +08:00
|
|
|
lws_check_deferred_free(context, 0, 1);
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
#if LWS_MAX_SMP > 1
|
2018-06-27 07:15:39 +08:00
|
|
|
lws_mutex_refcount_destroy(&context->mr);
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-29 10:44:36 +08:00
|
|
|
if (context->event_loop_ops->destroy_context2)
|
|
|
|
if (context->event_loop_ops->destroy_context2(context)) {
|
2018-08-01 06:52:03 +08:00
|
|
|
lws_context_unlock(context); /* } context ----------- */
|
2018-04-29 10:44:36 +08:00
|
|
|
context->finalize_destroy_after_internal_loops_stopped = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%p: post dc2\n", __func__);
|
|
|
|
|
2018-10-10 13:54:43 +08:00
|
|
|
if (!context->pt[0].event_loop_foreign) {
|
|
|
|
int n;
|
2018-05-02 18:35:58 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++)
|
2018-08-01 06:52:03 +08:00
|
|
|
if (context->pt[n].inside_service) {
|
2019-03-21 09:27:45 +08:00
|
|
|
lwsl_debug("%p: bailing as inside service\n", __func__);
|
2018-08-01 06:52:03 +08:00
|
|
|
lws_context_unlock(context); /* } context --- */
|
2018-05-02 18:35:58 +08:00
|
|
|
return;
|
2018-08-01 06:52:03 +08:00
|
|
|
}
|
2018-10-10 13:54:43 +08:00
|
|
|
}
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2018-06-27 07:49:04 +08:00
|
|
|
lws_context_unlock(context); /* } context ------------------- */
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_context_destroy3(context);
|
|
|
|
}
|
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
static void
|
|
|
|
lws_pt_destroy(struct lws_context_per_thread *pt)
|
|
|
|
{
|
|
|
|
volatile struct lws_foreign_thread_pollfd *ftp, *next;
|
|
|
|
volatile struct lws_context_per_thread *vpt;
|
|
|
|
|
|
|
|
assert(!pt->is_destroyed);
|
|
|
|
pt->destroy_self = 0;
|
|
|
|
|
|
|
|
vpt = (volatile struct lws_context_per_thread *)pt;
|
|
|
|
ftp = vpt->foreign_pfd_list;
|
|
|
|
while (ftp) {
|
|
|
|
next = ftp->next;
|
|
|
|
lws_free((void *)ftp);
|
|
|
|
ftp = next;
|
|
|
|
}
|
|
|
|
vpt->foreign_pfd_list = NULL;
|
|
|
|
|
|
|
|
if (pt->pipe_wsi)
|
|
|
|
lws_destroy_event_pipe(pt->pipe_wsi);
|
|
|
|
pt->pipe_wsi = NULL;
|
|
|
|
|
|
|
|
while (pt->fds_count) {
|
|
|
|
struct lws *wsi = wsi_from_fd(pt->context, pt->fds[0].fd);
|
|
|
|
|
|
|
|
if (!wsi)
|
|
|
|
break;
|
|
|
|
|
|
|
|
lws_close_free_wsi(wsi,
|
|
|
|
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
|
|
|
|
"ctx destroy"
|
|
|
|
/* no protocol close */);
|
|
|
|
}
|
|
|
|
lws_pt_mutex_destroy(pt);
|
|
|
|
|
|
|
|
pt->is_destroyed = 1;
|
|
|
|
|
|
|
|
lwsl_info("%s: pt destroyed\n", __func__);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-05-02 19:27:29 +08:00
|
|
|
/*
|
|
|
|
* Begin the context takedown
|
|
|
|
*/
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
void
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_context_destroy(struct lws_context *context)
|
2014-04-03 07:29:50 +08:00
|
|
|
{
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
struct lws_vhost *vh = NULL;
|
2020-01-23 11:12:28 +00:00
|
|
|
int m, deferred_pt = 0;
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
if (!context || context->inside_context_destroy)
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
return;
|
2020-01-23 11:12:28 +00:00
|
|
|
|
|
|
|
context->inside_context_destroy = 1;
|
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2018-04-29 10:44:36 +08:00
|
|
|
if (context->finalize_destroy_after_internal_loops_stopped) {
|
|
|
|
if (context->event_loop_ops->destroy_context2)
|
|
|
|
context->event_loop_ops->destroy_context2(context);
|
|
|
|
lws_context_destroy3(context);
|
2020-01-23 11:12:28 +00:00
|
|
|
/* context is invalid, no need to reset inside flag */
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
|
|
|
}
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
if (context->being_destroyed1) {
|
2018-04-30 19:17:32 +08:00
|
|
|
if (!context->being_destroyed2) {
|
|
|
|
lws_context_destroy2(context);
|
|
|
|
|
2020-02-24 10:06:43 +00:00
|
|
|
return;
|
2018-04-30 19:17:32 +08:00
|
|
|
}
|
2018-04-29 10:44:36 +08:00
|
|
|
lwsl_info("%s: ctx %p: already being destroyed\n",
|
2017-09-23 12:55:21 +08:00
|
|
|
__func__, context);
|
2018-05-02 18:35:58 +08:00
|
|
|
|
|
|
|
lws_context_destroy3(context);
|
2020-01-23 11:12:28 +00:00
|
|
|
/* context is invalid, no need to reset inside flag */
|
2015-06-25 17:51:07 +02:00
|
|
|
return;
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:30:06 +08:00
|
|
|
lwsl_info("%s: ctx %p\n", __func__, context);
|
2015-06-25 17:51:07 +02:00
|
|
|
|
2016-01-18 11:49:41 +08:00
|
|
|
context->being_destroyed = 1;
|
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2019-09-19 09:48:17 +01:00
|
|
|
lws_state_transition(&context->mgr_system, LWS_SYSTATE_POLICY_INVALID);
|
2019-01-13 06:58:21 +08:00
|
|
|
m = context->count_threads;
|
2014-04-03 07:29:50 +08:00
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
while (m--) {
|
2018-10-10 13:54:43 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[m];
|
2017-11-12 09:16:46 +08:00
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
if (pt->is_destroyed)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (pt->inside_lws_service) {
|
|
|
|
pt->destroy_self = 1;
|
|
|
|
deferred_pt = 1;
|
|
|
|
continue;
|
2016-01-19 03:34:24 +08:00
|
|
|
}
|
2020-01-23 11:12:28 +00:00
|
|
|
|
|
|
|
lws_pt_destroy(pt);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (deferred_pt) {
|
|
|
|
lwsl_info("%s: waiting for deferred pt close\n", __func__);
|
|
|
|
lws_cancel_service(context);
|
|
|
|
goto out;
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
context->being_destroyed1 = 1;
|
|
|
|
context->requested_kill = 1;
|
|
|
|
|
2014-04-03 07:29:50 +08:00
|
|
|
/*
|
|
|
|
* inform all the protocols that they are done and will have no more
|
2016-04-06 16:15:40 +08:00
|
|
|
* callbacks.
|
|
|
|
*
|
|
|
|
* We can't free things until after the event loop shuts down.
|
2014-04-03 07:29:50 +08:00
|
|
|
*/
|
2016-05-08 17:00:38 +08:00
|
|
|
if (context->protocol_init_done)
|
|
|
|
vh = context->vhost_list;
|
2016-03-28 10:10:43 +08:00
|
|
|
while (vh) {
|
2017-10-11 19:42:45 +08:00
|
|
|
struct lws_vhost *vhn = vh->vhost_next;
|
2017-07-15 14:37:04 +08:00
|
|
|
lws_vhost_destroy1(vh);
|
2017-10-11 19:42:45 +08:00
|
|
|
vh = vhn;
|
2016-03-28 10:10:43 +08:00
|
|
|
}
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
|
|
|
|
lws_plat_context_early_destroy(context);
|
2016-03-28 10:10:43 +08:00
|
|
|
|
2019-01-13 06:58:21 +08:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
|
2018-05-02 19:27:29 +08:00
|
|
|
/*
|
|
|
|
* We face two different needs depending if foreign loop or not.
|
|
|
|
*
|
|
|
|
* 1) If foreign loop, we really want to advance the destroy_context()
|
|
|
|
* past here, and block only for libuv-style async close completion.
|
|
|
|
*
|
|
|
|
* 2a) If poll, and we exited by ourselves and are calling a final
|
|
|
|
* destroy_context() outside of any service already, we want to
|
|
|
|
* advance all the way in one step.
|
|
|
|
*
|
|
|
|
* 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
|
|
|
|
* be in poll wait or servicing. We can't advance the
|
|
|
|
* destroy_context() to the point it's freeing things; we have to
|
|
|
|
* leave that for the final destroy_context() after the service
|
|
|
|
* thread(s) are finished calling for service.
|
|
|
|
*/
|
2015-06-25 17:51:07 +02:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (context->event_loop_ops->destroy_context1) {
|
|
|
|
context->event_loop_ops->destroy_context1(context);
|
2017-07-15 14:37:04 +08:00
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
goto out;
|
2017-09-14 13:14:11 +08:00
|
|
|
}
|
2019-01-13 06:58:21 +08:00
|
|
|
#endif
|
2016-12-04 07:34:05 +08:00
|
|
|
|
2019-08-18 10:35:43 +01:00
|
|
|
#if defined(LWS_PLAT_FREERTOS)
|
2019-06-05 05:04:17 +01:00
|
|
|
#if defined(LWS_AMAZON_RTOS)
|
|
|
|
context->last_free_heap = xPortGetFreeHeapSize();
|
|
|
|
#else
|
|
|
|
context->last_free_heap = esp_get_free_heap_size();
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
context->inside_context_destroy = 0;
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_context_destroy2(context);
|
2020-01-23 11:12:28 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
2020-02-24 10:06:43 +00:00
|
|
|
#if defined(LWS_WITH_NETWORK)
|
2020-01-23 11:12:28 +00:00
|
|
|
out:
|
|
|
|
context->inside_context_destroy = 0;
|
2020-02-24 10:06:43 +00:00
|
|
|
#endif
|
2014-04-03 07:29:50 +08:00
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
struct lws_context *
|
|
|
|
lws_system_context_from_system_mgr(lws_state_manager_t *mgr)
|
|
|
|
{
|
|
|
|
#if defined(LWS_WITH_NETWORK)
|
|
|
|
return lws_container_of(mgr, struct lws_context, mgr_system);
|
|
|
|
#else
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
}
|