2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2021-05-27 17:34:53 -07:00
|
|
|
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
|
2020-02-29 12:37:24 +00:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <private-lib-core.h>
|
|
|
|
|
|
|
|
static const struct ss_pcols *ss_pcols[] = {
|
|
|
|
#if defined(LWS_ROLE_H1)
|
|
|
|
&ss_pcol_h1, /* LWSSSP_H1 */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_H2)
|
|
|
|
&ss_pcol_h2, /* LWSSSP_H2 */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_WS)
|
|
|
|
&ss_pcol_ws, /* LWSSSP_WS */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_MQTT)
|
|
|
|
&ss_pcol_mqtt, /* LWSSSP_MQTT */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
2020-03-28 16:20:50 +00:00
|
|
|
&ss_pcol_raw, /* LWSSSP_RAW */
|
|
|
|
NULL,
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const char *state_names[] = {
|
2021-01-02 10:49:43 +00:00
|
|
|
"(unset)",
|
2020-02-29 12:37:24 +00:00
|
|
|
"LWSSSCS_CREATING",
|
|
|
|
"LWSSSCS_DISCONNECTED",
|
|
|
|
"LWSSSCS_UNREACHABLE",
|
|
|
|
"LWSSSCS_AUTH_FAILED",
|
|
|
|
"LWSSSCS_CONNECTED",
|
|
|
|
"LWSSSCS_CONNECTING",
|
|
|
|
"LWSSSCS_DESTROYING",
|
|
|
|
"LWSSSCS_POLL",
|
|
|
|
"LWSSSCS_ALL_RETRIES_FAILED",
|
|
|
|
"LWSSSCS_QOS_ACK_REMOTE",
|
|
|
|
"LWSSSCS_QOS_NACK_REMOTE",
|
|
|
|
"LWSSSCS_QOS_ACK_LOCAL",
|
|
|
|
"LWSSSCS_QOS_NACK_LOCAL",
|
2020-06-30 16:42:37 +01:00
|
|
|
"LWSSSCS_TIMEOUT",
|
2020-07-27 10:03:12 +01:00
|
|
|
"LWSSSCS_SERVER_TXN",
|
|
|
|
"LWSSSCS_SERVER_UPGRADE",
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
/*
|
|
|
|
* For each "current state", set bit offsets for valid "next states".
|
|
|
|
*
|
|
|
|
* Since there are complicated ways to arrive at state transitions like proxying
|
|
|
|
* and asynchronous destruction etc, so we monitor the state transitions we are
|
|
|
|
* giving the ss user code to ensure we never deliver illegal state transitions
|
|
|
|
* (because we will assert if we have bugs that do it)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const uint32_t ss_state_txn_validity[] = {
|
|
|
|
|
|
|
|
/* if we was last in this state... we can legally go to these states */
|
|
|
|
|
|
|
|
[0] = (1 << LWSSSCS_CREATING) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_CREATING] = (1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_POLL) |
|
|
|
|
(1 << LWSSSCS_SERVER_UPGRADE) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_DISCONNECTED] = (1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_POLL) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_UNREACHABLE] = (1 << LWSSSCS_ALL_RETRIES_FAILED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_POLL) |
|
|
|
|
(1 << LWSSSCS_CONNECTING) |
|
|
|
|
/* win conn failure > retry > succ */
|
|
|
|
(1 << LWSSSCS_CONNECTED) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_AUTH_FAILED] = (1 << LWSSSCS_ALL_RETRIES_FAILED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_CONNECTED] = (1 << LWSSSCS_SERVER_UPGRADE) |
|
2021-01-11 08:29:02 +00:00
|
|
|
(1 << LWSSSCS_SERVER_TXN) |
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_AUTH_FAILED) |
|
|
|
|
(1 << LWSSSCS_QOS_ACK_REMOTE) |
|
|
|
|
(1 << LWSSSCS_QOS_NACK_REMOTE) |
|
|
|
|
(1 << LWSSSCS_QOS_ACK_LOCAL) |
|
|
|
|
(1 << LWSSSCS_QOS_NACK_LOCAL) |
|
|
|
|
(1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
2021-04-07 14:47:21 +01:00
|
|
|
(1 << LWSSSCS_POLL) | /* proxy retry */
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_CONNECTING] = (1 << LWSSSCS_UNREACHABLE) |
|
|
|
|
(1 << LWSSSCS_AUTH_FAILED) |
|
|
|
|
(1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_CONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
2021-04-07 14:47:21 +01:00
|
|
|
(1 << LWSSSCS_DISCONNECTED) | /* proxy retry */
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_DESTROYING] = 0,
|
|
|
|
|
|
|
|
[LWSSSCS_POLL] = (1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_ALL_RETRIES_FAILED] = (1 << LWSSSCS_CONNECTING) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_QOS_ACK_REMOTE] = (1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
2021-03-30 21:19:36 -07:00
|
|
|
#if defined(LWS_ROLE_MQTT)
|
|
|
|
(1 << LWSSSCS_QOS_ACK_REMOTE) |
|
|
|
|
#endif
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_QOS_NACK_REMOTE] = (1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_QOS_ACK_LOCAL] = (1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_QOS_NACK_LOCAL] = (1 << LWSSSCS_DESTROYING) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT),
|
|
|
|
|
2021-02-01 11:48:04 +00:00
|
|
|
/* he can get the timeout at any point and take no action... */
|
2021-01-02 10:49:43 +00:00
|
|
|
[LWSSSCS_TIMEOUT] = (1 << LWSSSCS_CONNECTING) |
|
2021-02-01 11:48:53 +00:00
|
|
|
(1 << LWSSSCS_CONNECTED) |
|
2021-02-01 11:48:04 +00:00
|
|
|
(1 << LWSSSCS_QOS_ACK_REMOTE) |
|
|
|
|
(1 << LWSSSCS_QOS_NACK_REMOTE) |
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_POLL) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DISCONNECTED) |
|
2021-01-06 15:08:22 +00:00
|
|
|
(1 << LWSSSCS_UNREACHABLE) |
|
2021-01-02 10:49:43 +00:00
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_SERVER_TXN] = (1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
|
|
|
|
[LWSSSCS_SERVER_UPGRADE] = (1 << LWSSSCS_SERVER_TXN) |
|
|
|
|
(1 << LWSSSCS_TIMEOUT) |
|
|
|
|
(1 << LWSSSCS_DISCONNECTED) |
|
|
|
|
(1 << LWSSSCS_DESTROYING),
|
|
|
|
};
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
#if defined(LWS_WITH_CONMON)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert any conmon data to JSON and attach to the ss handle.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_ss_state_return_t
|
|
|
|
lws_conmon_ss_json(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
char ads[48], *end, *buf, *obuf;
|
|
|
|
const struct addrinfo *ai;
|
|
|
|
lws_ss_state_return_t ret = LWSSSSRET_OK;
|
|
|
|
struct lws_conmon cm;
|
|
|
|
size_t len = 500;
|
|
|
|
|
|
|
|
if (!h->policy || !(h->policy->flags & LWSSSPOLF_PERF) || !h->wsi ||
|
|
|
|
h->wsi->perf_done)
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
|
|
|
|
if (h->conmon_json)
|
|
|
|
lws_free_set_NULL(h->conmon_json);
|
|
|
|
|
|
|
|
h->conmon_json = lws_malloc(len, __func__);
|
|
|
|
if (!h->conmon_json)
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
|
|
|
|
obuf = buf = h->conmon_json;
|
|
|
|
end = buf + len - 1;
|
|
|
|
|
|
|
|
lws_conmon_wsi_take(h->wsi, &cm);
|
|
|
|
|
|
|
|
lws_sa46_write_numeric_address(&cm.peer46, ads, sizeof(ads));
|
|
|
|
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
|
|
|
|
"{\"peer\":\"%s\","
|
|
|
|
"\"dns_us\":%u,"
|
|
|
|
"\"sockconn_us\":%u,"
|
|
|
|
"\"tls_us\":%u,"
|
|
|
|
"\"txn_resp_us:%u,"
|
|
|
|
"\"dns\":[",
|
|
|
|
ads,
|
|
|
|
(unsigned int)cm.ciu_dns,
|
|
|
|
(unsigned int)cm.ciu_sockconn,
|
|
|
|
(unsigned int)cm.ciu_tls,
|
|
|
|
(unsigned int)cm.ciu_txn_resp);
|
|
|
|
|
|
|
|
ai = cm.dns_results_copy;
|
|
|
|
while (ai) {
|
|
|
|
lws_sa46_write_numeric_address((lws_sockaddr46 *)ai->ai_addr, ads, sizeof(ads));
|
|
|
|
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "\"%s\"", ads);
|
|
|
|
if (ai->ai_next && buf < end - 2)
|
|
|
|
*buf++ = ',';
|
|
|
|
ai = ai->ai_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "]}");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This destroys the DNS list in the lws_conmon that we took
|
|
|
|
* responsibility for when we used lws_conmon_wsi_take()
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_conmon_release(&cm);
|
|
|
|
|
|
|
|
h->conmon_len = (uint16_t)lws_ptr_diff(buf, obuf);
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
|
|
|
|
if (h->proxy_onward) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ask to forward it on the proxy link
|
|
|
|
*/
|
|
|
|
|
|
|
|
ss_proxy_onward_link_req_writeable(h);
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can deliver it directly
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (h->info.rx)
|
2021-08-12 13:22:05 +01:00
|
|
|
ret = h->info.rx(ss_to_userobj(h), (uint8_t *)h->conmon_json,
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
(unsigned int)h->conmon_len,
|
|
|
|
(int)(LWSSS_FLAG_SOM | LWSSS_FLAG_EOM |
|
|
|
|
LWSSS_FLAG_PERF_JSON));
|
|
|
|
|
|
|
|
lws_free_set_NULL(h->conmon_json);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
int
|
2021-02-20 06:13:49 +00:00
|
|
|
lws_ss_check_next_state(lws_lifecycle_t *lc, uint8_t *prevstate,
|
|
|
|
lws_ss_constate_t cs)
|
2021-01-02 10:49:43 +00:00
|
|
|
{
|
|
|
|
if (cs >= LWSSSCS_USER_BASE)
|
|
|
|
/*
|
|
|
|
* we can't judge user states, leave the old state and
|
|
|
|
* just wave them through
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (cs >= LWS_ARRAY_SIZE(ss_state_txn_validity)) {
|
|
|
|
/* we don't recognize this state as usable */
|
2021-02-20 06:13:49 +00:00
|
|
|
lwsl_err("%s: %s: bad new state %u\n", __func__, lc->gutag, cs);
|
2021-01-02 10:49:43 +00:00
|
|
|
assert(0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*prevstate >= LWS_ARRAY_SIZE(ss_state_txn_validity)) {
|
|
|
|
/* existing state is broken */
|
2021-02-20 06:13:49 +00:00
|
|
|
lwsl_err("%s: %s: bad existing state %u\n", __func__,
|
|
|
|
lc->gutag, (unsigned int)*prevstate);
|
2021-01-02 10:49:43 +00:00
|
|
|
assert(0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ss_state_txn_validity[*prevstate] & (1u << cs)) {
|
2021-02-20 06:13:49 +00:00
|
|
|
|
|
|
|
lwsl_notice("%s: %s: %s -> %s\n", __func__, lc->gutag,
|
|
|
|
lws_ss_state_name((int)*prevstate),
|
|
|
|
lws_ss_state_name((int)cs));
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
/* this is explicitly allowed, update old state to new */
|
|
|
|
*prevstate = (uint8_t)cs;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-20 06:13:49 +00:00
|
|
|
lwsl_err("%s: %s: transition from %s -> %s is illegal\n", __func__,
|
|
|
|
lc->gutag, lws_ss_state_name((int)*prevstate),
|
|
|
|
lws_ss_state_name((int)cs));
|
2021-01-02 10:49:43 +00:00
|
|
|
|
|
|
|
assert(0);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
const char *
|
|
|
|
lws_ss_state_name(int state)
|
|
|
|
{
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
if (state >= LWSSSCS_USER_BASE)
|
|
|
|
return "user state";
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (state >= (int)LWS_ARRAY_SIZE(state_names))
|
|
|
|
return "unknown";
|
|
|
|
|
|
|
|
return state_names[state];
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_event_helper(lws_ss_handle_t *h, lws_ss_constate_t cs)
|
|
|
|
{
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-01 07:33:37 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!h)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2021-02-20 06:13:49 +00:00
|
|
|
if (lws_ss_check_next_state(&h->lc, &h->prev_ss_state, cs))
|
2021-01-02 10:49:43 +00:00
|
|
|
return LWSSSSRET_DESTROY_ME;
|
|
|
|
|
2020-12-27 16:05:48 +00:00
|
|
|
if (cs == LWSSSCS_CONNECTED)
|
|
|
|
h->ss_dangling_connected = 1;
|
|
|
|
if (cs == LWSSSCS_DISCONNECTED)
|
|
|
|
h->ss_dangling_connected = 0;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
#if defined(LWS_WITH_SEQUENCER)
|
|
|
|
/*
|
|
|
|
* A parent sequencer for the ss is optional, if we have one, keep it
|
|
|
|
* informed of state changes on the ss connection
|
|
|
|
*/
|
|
|
|
if (h->seq && cs != LWSSSCS_DESTROYING)
|
|
|
|
lws_seq_queue_event(h->seq, LWSSEQ_SS_STATE_BASE + cs,
|
|
|
|
(void *)h, NULL);
|
|
|
|
#endif
|
|
|
|
|
2020-06-01 07:33:37 +01:00
|
|
|
if (h->info.state) {
|
2020-12-22 14:24:14 +00:00
|
|
|
r = h->info.state(ss_to_userobj(h), NULL, cs,
|
|
|
|
cs == LWSSSCS_UNREACHABLE &&
|
|
|
|
h->wsi && h->wsi->dns_reachability);
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if ((h->info.flags & LWSSSINFLAGS_ACCEPTED) &&
|
|
|
|
cs == LWSSSCS_DISCONNECTED)
|
2020-08-26 11:05:41 +01:00
|
|
|
r = LWSSSSRET_DESTROY_ME;
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-08-26 11:05:41 +01:00
|
|
|
return r;
|
2020-06-01 07:33:37 +01:00
|
|
|
}
|
2020-06-01 07:17:48 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-12-22 15:56:41 +00:00
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(lws_ss_state_return_t r, struct lws *wsi,
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_handle_t **ph)
|
|
|
|
{
|
|
|
|
if (r == LWSSSSRET_DESTROY_ME) {
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: DESTROY ME: %s, %s\n", __func__,
|
|
|
|
lws_wsi_tag(wsi), lws_ss_tag(*ph));
|
2020-12-08 10:30:55 +00:00
|
|
|
if (wsi) {
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_set_opaque_user_data(wsi, NULL);
|
2020-12-08 10:30:55 +00:00
|
|
|
lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC);
|
|
|
|
} else {
|
|
|
|
if ((*ph)->wsi) {
|
|
|
|
lws_set_opaque_user_data((*ph)->wsi, NULL);
|
|
|
|
lws_set_timeout((*ph)->wsi, 1, LWS_TO_KILL_ASYNC);
|
|
|
|
}
|
|
|
|
}
|
2020-08-26 11:05:41 +01:00
|
|
|
(*ph)->wsi = NULL;
|
|
|
|
lws_ss_destroy(ph);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1; /* close connection */
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_timeout_sul_check_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
2021-03-31 15:07:27 -07:00
|
|
|
lws_ss_state_return_t r;
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, sul);
|
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: retrying %s after backoff\n", __func__, lws_ss_tag(h));
|
2020-02-29 12:37:24 +00:00
|
|
|
/* we want to retry... */
|
|
|
|
h->seqstate = SSSEQ_DO_RETRY;
|
|
|
|
|
2021-05-27 17:34:53 -07:00
|
|
|
r = _lws_ss_request_tx(h);
|
2021-03-31 15:07:27 -07:00
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, NULL, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_exp_cb_metadata(void *priv, const char *name, char *out, size_t *pos,
|
|
|
|
size_t olen, size_t *exp_ofs)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)priv;
|
|
|
|
const char *replace = NULL;
|
|
|
|
size_t total, budget;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
lws_ss_metadata_t *md = lws_ss_policy_metadata(h->policy, name),
|
2020-10-11 07:29:47 +01:00
|
|
|
*hmd = lws_ss_get_handle_metadata(h, name);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!md) {
|
|
|
|
lwsl_err("%s: Unknown metadata %s\n", __func__, name);
|
|
|
|
|
|
|
|
return LSTRX_FATAL_NAME_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2021-05-25 14:54:59 +01:00
|
|
|
if (!hmd)
|
|
|
|
return LSTRX_FILLED_OUT;
|
|
|
|
|
2020-10-11 07:29:47 +01:00
|
|
|
replace = hmd->value__may_own_heap;
|
2021-05-25 14:54:59 +01:00
|
|
|
|
|
|
|
if (!replace)
|
2021-06-07 15:10:29 +01:00
|
|
|
return LSTRX_DONE;
|
2021-05-25 14:54:59 +01:00
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
total = hmd->length;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
budget = olen - *pos;
|
|
|
|
total -= *exp_ofs;
|
|
|
|
if (total < budget)
|
|
|
|
budget = total;
|
|
|
|
|
2020-06-15 15:01:35 +01:00
|
|
|
if (out)
|
|
|
|
memcpy(out + *pos, replace + (*exp_ofs), budget);
|
2020-02-29 12:37:24 +00:00
|
|
|
*exp_ofs += budget;
|
|
|
|
*pos += budget;
|
|
|
|
|
|
|
|
if (budget == total)
|
|
|
|
return LSTRX_DONE;
|
|
|
|
|
|
|
|
return LSTRX_FILLED_OUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_set_timeout_us(lws_ss_handle_t *h, lws_usec_t us)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &h->context->pt[h->tsi];
|
|
|
|
|
|
|
|
h->sul.cb = lws_ss_timeout_sul_check_cb;
|
2020-05-28 12:48:17 +01:00
|
|
|
__lws_sul_insert_us(&pt->pt_sul_owner[
|
|
|
|
!!(h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)],
|
|
|
|
&h->sul, us);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-09-17 12:43:31 +01:00
|
|
|
_lws_ss_backoff(lws_ss_handle_t *h, lws_usec_t us_override)
|
2020-02-29 12:37:24 +00:00
|
|
|
{
|
|
|
|
uint64_t ms;
|
|
|
|
char conceal;
|
|
|
|
|
|
|
|
if (h->seqstate == SSSEQ_RECONNECT_WAIT)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/* figure out what we should do about another retry */
|
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: %s: retry backoff after failure\n", __func__, lws_ss_tag(h));
|
2020-02-29 12:37:24 +00:00
|
|
|
ms = lws_retry_get_delay_ms(h->context, h->policy->retry_bo,
|
|
|
|
&h->retry, &conceal);
|
|
|
|
if (!conceal) {
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: %s: abandon conn attempt \n",__func__, lws_ss_tag(h));
|
2020-12-18 21:59:10 -08:00
|
|
|
|
|
|
|
if (h->seqstate == SSSEQ_IDLE) /* been here? */
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->seqstate = SSSEQ_IDLE;
|
2020-08-26 11:05:41 +01:00
|
|
|
|
|
|
|
return lws_ss_event_helper(h, LWSSSCS_ALL_RETRIES_FAILED);
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
/* Only increase our planned backoff, or go with it */
|
|
|
|
|
|
|
|
if (us_override < (lws_usec_t)ms * LWS_US_PER_MS)
|
2020-12-12 06:21:40 +00:00
|
|
|
us_override = (lws_usec_t)(ms * LWS_US_PER_MS);
|
2020-09-17 12:43:31 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->seqstate = SSSEQ_RECONNECT_WAIT;
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_ss_set_timeout_us(h, us_override);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: %s: retry wait %dms\n", __func__, lws_ss_tag(h),
|
2020-09-17 12:43:31 +01:00
|
|
|
(int)(us_override / 1000));
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_ss_state_return_t
|
|
|
|
lws_ss_backoff(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
return _lws_ss_backoff(h, 0);
|
|
|
|
}
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local SMD <-> SS
|
|
|
|
*
|
|
|
|
* We pass received messages through to the SS handler synchronously, using the
|
|
|
|
* lws service thread context.
|
|
|
|
*
|
|
|
|
* After the SS is created and registered, still nothing is going to come here
|
|
|
|
* until the peer sends us his rx_class_mask and we update his registration with
|
|
|
|
* it, because from SS creation his rx_class_mask defaults to 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_smd_ss_cb(void *opaque, lws_smd_class_t _class,
|
|
|
|
lws_usec_t timestamp, void *buf, size_t len)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)opaque;
|
|
|
|
uint8_t *p = (uint8_t *)buf - LWS_SMD_SS_RX_HEADER_LEN;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When configured with SS enabled, lws over-allocates
|
|
|
|
* LWS_SMD_SS_RX_HEADER_LEN bytes behind the payload of the queued
|
|
|
|
* message, for prepending serialized class and timestamp data in-band
|
|
|
|
* with the payload.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_ser_wu64be(p, _class);
|
2020-12-12 06:21:40 +00:00
|
|
|
lws_ser_wu64be(p + 8, (uint64_t)timestamp);
|
2020-06-24 20:15:46 +01:00
|
|
|
|
|
|
|
if (h->info.rx)
|
|
|
|
h->info.rx((void *)&h[1], p, len + LWS_SMD_SS_RX_HEADER_LEN,
|
|
|
|
LWSSS_FLAG_SOM | LWSSS_FLAG_EOM);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_smd_tx_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, u.smd.sul_write);
|
|
|
|
uint8_t buf[LWS_SMD_SS_RX_HEADER_LEN + LWS_SMD_MAX_PAYLOAD], *p;
|
|
|
|
size_t len = sizeof(buf);
|
|
|
|
lws_smd_class_t _class;
|
|
|
|
int flags = 0, n;
|
|
|
|
|
|
|
|
if (!h->info.tx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
n = h->info.tx(&h[1], h->txord++, buf, &len, &flags);
|
|
|
|
if (n)
|
|
|
|
/* nonzero return means don't want to send anything */
|
|
|
|
return;
|
|
|
|
|
|
|
|
// lwsl_notice("%s: (SS %p bound to _lws_smd creates message) tx len %d\n", __func__, h, (int)len);
|
|
|
|
// lwsl_hexdump_notice(buf, len);
|
|
|
|
|
|
|
|
assert(len >= LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
_class = (lws_smd_class_t)lws_ser_ru64be(buf);
|
|
|
|
p = lws_smd_msg_alloc(h->context, _class, len - LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
if (!p) {
|
2020-07-07 15:13:37 +01:00
|
|
|
// this can be rejected if nobody listening for this class
|
|
|
|
//lwsl_notice("%s: failed to alloc\n", __func__);
|
2020-06-24 20:15:46 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(p, buf + LWS_SMD_SS_RX_HEADER_LEN, len - LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
if (lws_smd_msg_send(h->context, p)) {
|
|
|
|
lwsl_notice("%s: failed to queue\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
lws_ss_state_return_t
|
2020-12-27 19:34:30 +00:00
|
|
|
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw)
|
2020-02-29 12:37:24 +00:00
|
|
|
{
|
2020-03-14 06:56:41 +00:00
|
|
|
const char *prot, *_prot, *ipath, *_ipath, *ads, *_ads;
|
2020-02-29 12:37:24 +00:00
|
|
|
struct lws_client_connect_info i;
|
|
|
|
const struct ss_pcols *ssp;
|
2020-03-13 07:42:25 +00:00
|
|
|
size_t used_in, used_out;
|
2020-02-29 12:37:24 +00:00
|
|
|
union lws_ss_contemp ct;
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-03-14 06:56:41 +00:00
|
|
|
int port, _port, tls;
|
2021-03-02 16:34:33 +00:00
|
|
|
char *path, ep[96];
|
2020-03-13 07:42:25 +00:00
|
|
|
lws_strexp_t exp;
|
2021-03-02 16:34:33 +00:00
|
|
|
struct lws *wsi;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!h->policy) {
|
|
|
|
lwsl_err("%s: ss with no policy\n", __func__);
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are already bound to a sink?
|
|
|
|
*/
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
// if (h->h_sink)
|
|
|
|
// return 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-26 11:28:25 +01:00
|
|
|
if (!is_retry)
|
|
|
|
h->retry = 0;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
if (h->policy == &pol_smd) {
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
if (h->u.smd.smd_peer)
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-06-24 20:15:46 +01:00
|
|
|
|
|
|
|
// lwsl_notice("%s: received connect for _lws_smd, registering for class mask 0x%x\n",
|
|
|
|
// __func__, h->info.manual_initial_tx_credit);
|
|
|
|
|
|
|
|
h->u.smd.smd_peer = lws_smd_register(h->context, h,
|
|
|
|
(h->info.flags & LWSSSINFLAGS_PROXIED) ?
|
|
|
|
LWSSMDREG_FLAG_PROXIED_SS : 0,
|
2020-12-12 06:21:40 +00:00
|
|
|
(lws_smd_class_t)h->info.manual_initial_tx_credit,
|
2020-06-24 20:15:46 +01:00
|
|
|
lws_smd_ss_cb);
|
|
|
|
if (!h->u.smd.smd_peer)
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-06-24 20:15:46 +01:00
|
|
|
|
|
|
|
if (lws_ss_event_helper(h, LWSSSCS_CONNECTING))
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
if (lws_ss_event_helper(h, LWSSSCS_CONNECTED))
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
|
|
|
return LWSSSSRET_OK;
|
2020-06-24 20:15:46 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
/*
|
|
|
|
* We're going to substitute ${metadata} in the endpoint at connection-
|
|
|
|
* time, so this can be set dynamically...
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_strexp_init(&exp, (void *)h, lws_ss_exp_cb_metadata, ep, sizeof(ep));
|
|
|
|
|
|
|
|
if (lws_strexp_expand(&exp, h->policy->endpoint,
|
|
|
|
strlen(h->policy->endpoint),
|
|
|
|
&used_in, &used_out) != LSTRX_DONE) {
|
|
|
|
lwsl_err("%s: address strexp failed\n", __func__);
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-03-14 06:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... in some cases, we might want the user to be able to override
|
|
|
|
* some policy settings by what he provided in there. For example,
|
|
|
|
* if he set the endpoint to "https://myendpoint.com:4443/mypath" it
|
|
|
|
* might be quite convenient to override the policy to follow the info
|
|
|
|
* that was given for at least server, port and the url path.
|
|
|
|
*/
|
|
|
|
|
|
|
|
_port = port = h->policy->port;
|
|
|
|
_prot = prot = NULL;
|
|
|
|
_ipath = ipath = "";
|
|
|
|
_ads = ads = ep;
|
|
|
|
|
|
|
|
if (strchr(ep, ':') &&
|
|
|
|
!lws_parse_uri(ep, &_prot, &_ads, &_port, &_ipath)) {
|
|
|
|
lwsl_debug("%s: using uri parse results '%s' '%s' %d '%s'\n",
|
|
|
|
__func__, _prot, _ads, _port, _ipath);
|
|
|
|
prot = _prot;
|
|
|
|
ads = _ads;
|
|
|
|
port = _port;
|
|
|
|
ipath = _ipath;
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
memset(&i, 0, sizeof i); /* otherwise uninitialized garbage */
|
|
|
|
i.context = h->context;
|
2020-03-14 06:56:41 +00:00
|
|
|
tls = !!(h->policy->flags & LWSSSPOLF_TLS);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
if (prot && (!strcmp(prot, "http") || !strcmp(prot, "ws") ||
|
|
|
|
!strcmp(prot, "mqtt")))
|
|
|
|
tls = 0;
|
|
|
|
|
|
|
|
if (tls) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_info("%s: using tls\n", __func__);
|
|
|
|
i.ssl_connection = LCCSCF_USE_SSL;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!h->policy->trust.store)
|
2020-03-15 04:55:21 +00:00
|
|
|
lwsl_info("%s: using platform trust store\n", __func__);
|
|
|
|
else {
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-15 04:55:21 +00:00
|
|
|
i.vhost = lws_get_vhost_by_name(h->context,
|
2020-07-27 10:03:12 +01:00
|
|
|
h->policy->trust.store->name);
|
2020-03-15 04:55:21 +00:00
|
|
|
if (!i.vhost) {
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
lwsl_err("%s: missing vh for policy %s\n",
|
2020-07-27 10:03:12 +01:00
|
|
|
__func__,
|
|
|
|
h->policy->trust.store->name);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-15 04:55:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-28 12:48:17 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)
|
|
|
|
i.ssl_connection |= LCCSCF_WAKE_SUSPEND__VALIDITY;
|
|
|
|
|
2021-02-04 09:08:17 +00:00
|
|
|
/* translate policy attributes to IP ToS flags */
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_ATTR_LOW_LATENCY)
|
|
|
|
i.ssl_connection |= LCCSCF_IP_LOW_LATENCY;
|
|
|
|
if (h->policy->flags & LWSSSPOLF_ATTR_HIGH_THROUGHPUT)
|
|
|
|
i.ssl_connection |= LCCSCF_IP_HIGH_THROUGHPUT;
|
|
|
|
if (h->policy->flags & LWSSSPOLF_ATTR_HIGH_RELIABILITY)
|
|
|
|
i.ssl_connection |= LCCSCF_IP_HIGH_RELIABILITY;
|
|
|
|
if (h->policy->flags & LWSSSPOLF_ATTR_LOW_COST)
|
|
|
|
i.ssl_connection |= LCCSCF_IP_LOW_COST;
|
|
|
|
if (h->policy->flags & LWSSSPOLF_PERF) /* collect conmon stats on this */
|
|
|
|
i.ssl_connection |= LCCSCF_CONMON;
|
2021-02-04 09:08:17 +00:00
|
|
|
|
|
|
|
/* mark the connection with the streamtype priority from the policy */
|
|
|
|
|
|
|
|
i.priority = h->policy->priority;
|
|
|
|
|
2020-12-21 23:16:10 +00:00
|
|
|
i.ssl_connection |= LCCSCF_SECSTREAM_CLIENT;
|
|
|
|
|
2020-12-27 19:34:30 +00:00
|
|
|
if (conn_if_sspc_onw) {
|
|
|
|
i.ssl_connection |= LCCSCF_SECSTREAM_PROXY_ONWARD;
|
2021-04-21 09:15:58 +01:00
|
|
|
h->conn_if_sspc_onw = conn_if_sspc_onw;
|
2020-12-27 19:34:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
i.address = ads;
|
|
|
|
i.port = port;
|
|
|
|
i.host = i.address;
|
|
|
|
i.origin = i.address;
|
|
|
|
i.opaque_user_data = h;
|
|
|
|
i.seq = h->seq;
|
|
|
|
i.retry_and_idle_policy = h->policy->retry_bo;
|
|
|
|
i.sys_tls_client_cert = h->policy->client_cert;
|
|
|
|
|
|
|
|
i.path = ipath;
|
|
|
|
/* if this is not "", munge should use it instead of policy
|
|
|
|
* url path
|
|
|
|
*/
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
if (!ssp) {
|
|
|
|
lwsl_err("%s: unsupported protocol\n", __func__);
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
i.alpn = ssp->alpn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For http, we can get the method from the http object, override in
|
|
|
|
* the protocol-specific munge callback below if not http
|
|
|
|
*/
|
|
|
|
i.method = h->policy->u.http.method;
|
2020-07-27 10:03:12 +01:00
|
|
|
i.protocol = ssp->protocol->name; /* lws protocol name */
|
2020-02-29 12:37:24 +00:00
|
|
|
i.local_protocol_name = i.protocol;
|
|
|
|
|
2021-03-02 16:34:33 +00:00
|
|
|
path = lws_malloc(h->context->max_http_header_data, __func__);
|
|
|
|
if (!path) {
|
|
|
|
lwsl_warn("%s: OOM on path prealloc\n", __func__);
|
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
|
|
|
}
|
|
|
|
|
2020-03-28 16:20:50 +00:00
|
|
|
if (ssp->munge) /* eg, raw doesn't use; endpoint strexp already done */
|
2021-03-02 16:34:33 +00:00
|
|
|
ssp->munge(h, path, h->context->max_http_header_data, &i, &ct);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
i.pwsi = &h->wsi;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->policy->plugins[0] && h->policy->plugins[0]->munge)
|
2021-03-02 16:34:33 +00:00
|
|
|
h->policy->plugins[0]->munge(h, path, h->context->max_http_header_data);
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
lwsl_info("%s: connecting %s, '%s' '%s' %s\n", __func__, i.method,
|
|
|
|
i.alpn, i.address, i.path);
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS)
|
|
|
|
/* possibly already hanging connect retry... */
|
|
|
|
if (!h->cal_txn.mt)
|
|
|
|
lws_metrics_caliper_bind(h->cal_txn, h->context->mth_ss_conn);
|
|
|
|
|
|
|
|
lws_metrics_tag_add(&h->cal_txn.mtags_owner, "ss", h->policy->streamtype);
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->txn_ok = 0;
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTING);
|
2021-03-02 16:34:33 +00:00
|
|
|
if (r) {
|
|
|
|
lws_free(path);
|
2020-08-26 11:05:41 +01:00
|
|
|
return r;
|
2021-03-02 16:34:33 +00:00
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2021-08-11 13:31:41 +01:00
|
|
|
h->inside_connect = 1;
|
|
|
|
h->pending_ret = LWSSSSRET_OK;
|
2021-03-02 16:34:33 +00:00
|
|
|
wsi = lws_client_connect_via_info(&i);
|
2021-08-11 13:31:41 +01:00
|
|
|
h->inside_connect = 0;
|
2021-03-02 16:34:33 +00:00
|
|
|
lws_free(path);
|
|
|
|
if (!wsi) {
|
2020-12-22 15:56:41 +00:00
|
|
|
/*
|
|
|
|
* We already found that we could not connect, without even
|
|
|
|
* having to go around the event loop
|
|
|
|
*/
|
|
|
|
|
2021-08-11 13:31:41 +01:00
|
|
|
if (h->pending_ret)
|
|
|
|
return h->pending_ret;
|
|
|
|
|
2021-06-28 05:06:38 +01:00
|
|
|
if (h->prev_ss_state != LWSSSCS_UNREACHABLE &&
|
|
|
|
h->prev_ss_state != LWSSSCS_ALL_RETRIES_FAILED) {
|
|
|
|
/*
|
|
|
|
* blocking DNS failure can get to unreachable via
|
|
|
|
* CCE, and unreachable can get to ALL_RETRIES_FAILED
|
|
|
|
*/
|
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = lws_ss_backoff(h);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
lws_ss_state_return_t
|
2020-06-26 11:28:25 +01:00
|
|
|
lws_ss_client_connect(lws_ss_handle_t *h)
|
|
|
|
{
|
2021-05-27 17:34:53 -07:00
|
|
|
lws_ss_state_return_t r;
|
|
|
|
r = _lws_ss_client_connect(h, 0, 0);
|
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, h->wsi, &h);
|
|
|
|
return r;
|
2020-06-26 11:28:25 +01:00
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* Public API
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create either a stream or a sink
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_create(struct lws_context *context, int tsi, const lws_ss_info_t *ssi,
|
|
|
|
void *opaque_user_data, lws_ss_handle_t **ppss,
|
|
|
|
struct lws_sequencer *seq_owner, const char **ppayload_fmt)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
|
|
|
const lws_ss_policy_t *pol;
|
2020-12-22 15:56:41 +00:00
|
|
|
lws_ss_state_return_t r;
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_metadata_t *smd;
|
|
|
|
lws_ss_handle_t *h;
|
|
|
|
size_t size;
|
|
|
|
void **v;
|
|
|
|
char *p;
|
|
|
|
int n;
|
|
|
|
|
2020-09-28 10:13:39 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_CPP)
|
|
|
|
pol = ssi->policy;
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!pol) {
|
2020-09-28 10:13:39 +01:00
|
|
|
#endif
|
2021-03-16 13:32:05 +00:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
|
|
|
|
lws_fi_ctx_t temp_fic;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to do a temp inherit from context to find out
|
|
|
|
* early if we are supposed to inject a fault concealing
|
|
|
|
* the policy
|
|
|
|
*/
|
|
|
|
|
|
|
|
memset(&temp_fic, 0, sizeof(temp_fic));
|
|
|
|
lws_xos_init(&temp_fic.xos, lws_xos(&context->fic.xos));
|
|
|
|
lws_fi_inherit_copy(&temp_fic, &context->fic, "ss", ssi->streamtype);
|
|
|
|
|
|
|
|
if (lws_fi(&temp_fic, "ss_no_streamtype_policy"))
|
|
|
|
pol = NULL;
|
|
|
|
else
|
|
|
|
pol = lws_ss_policy_lookup(context, ssi->streamtype);
|
|
|
|
|
|
|
|
lws_fi_destroy(&temp_fic);
|
|
|
|
#else
|
2020-09-28 10:13:39 +01:00
|
|
|
pol = lws_ss_policy_lookup(context, ssi->streamtype);
|
2021-03-16 13:32:05 +00:00
|
|
|
#endif
|
2020-09-28 10:13:39 +01:00
|
|
|
if (!pol) {
|
|
|
|
lwsl_info("%s: unknown stream type %s\n", __func__,
|
|
|
|
ssi->streamtype);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_CPP)
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
2020-09-28 10:13:39 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-12-27 19:34:30 +00:00
|
|
|
#if 0
|
2020-06-24 20:15:46 +01:00
|
|
|
if (ssi->flags & LWSSSINFLAGS_REGISTER_SINK) {
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* This can register a secure streams sink as well as normal
|
|
|
|
* secure streams connections. If that's what's happening,
|
|
|
|
* confirm the policy agrees that this streamtype should be
|
|
|
|
* directed to a sink.
|
|
|
|
*/
|
|
|
|
if (!(pol->flags & LWSSSPOLF_LOCAL_SINK)) {
|
|
|
|
/*
|
|
|
|
* Caller wanted to create a sink for this streamtype,
|
|
|
|
* but the policy does not agree the streamtype should
|
|
|
|
* be routed to a local sink.
|
|
|
|
*/
|
|
|
|
lwsl_err("%s: %s policy does not allow local sink\n",
|
|
|
|
__func__, ssi->streamtype);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
|
|
|
if (!(pol->flags & LWSSSPOLF_LOCAL_SINK)) {
|
|
|
|
|
|
|
|
}
|
|
|
|
// lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
|
|
|
|
}
|
2020-12-27 19:34:30 +00:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We overallocate and point to things in the overallocation...
|
|
|
|
*
|
|
|
|
* 1) the user_alloc from the stream info
|
|
|
|
* 2) network auth plugin instantiation data
|
|
|
|
* 3) stream auth plugin instantiation data
|
|
|
|
* 4) as many metadata pointer structs as the policy tells
|
|
|
|
* 5) the streamtype name (length is not aligned)
|
|
|
|
*
|
|
|
|
* ... when we come to destroy it, just one free to do.
|
|
|
|
*/
|
|
|
|
|
2020-09-28 10:13:39 +01:00
|
|
|
size = sizeof(*h) + ssi->user_alloc +
|
|
|
|
(ssi->streamtype ? strlen(ssi->streamtype): 0) + 1;
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (pol->plugins[0])
|
|
|
|
size += pol->plugins[0]->alloc;
|
|
|
|
if (pol->plugins[1])
|
|
|
|
size += pol->plugins[1]->alloc;
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
size += pol->metadata_count * sizeof(lws_ss_metadata_t);
|
|
|
|
|
|
|
|
h = lws_zalloc(size, __func__);
|
|
|
|
if (!h)
|
|
|
|
return 2;
|
|
|
|
|
2020-12-31 14:07:13 +00:00
|
|
|
if (ssi->sss_protocol_version)
|
2020-12-31 14:56:43 +00:00
|
|
|
__lws_lc_tag(&context->lcg[LWSLCG_WSI_SS_CLIENT], &h->lc, "%s|v%u|%u",
|
2020-12-31 14:07:13 +00:00
|
|
|
ssi->streamtype ? ssi->streamtype : "nostreamtype",
|
2020-12-31 14:56:43 +00:00
|
|
|
(unsigned int)ssi->sss_protocol_version,
|
|
|
|
(unsigned int)ssi->client_pid);
|
2020-12-31 14:07:13 +00:00
|
|
|
else
|
|
|
|
__lws_lc_tag(&context->lcg[LWSLCG_WSI_SS_CLIENT], &h->lc, "%s",
|
|
|
|
ssi->streamtype ? ssi->streamtype : "nostreamtype");
|
2020-12-25 05:54:19 +00:00
|
|
|
|
2021-02-17 10:31:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
|
2021-03-16 13:32:05 +00:00
|
|
|
h->fic.name = "ss";
|
|
|
|
lws_xos_init(&h->fic.xos, lws_xos(&context->fic.xos));
|
|
|
|
if (ssi->fic.fi_owner.count)
|
|
|
|
lws_fi_import(&h->fic, &ssi->fic);
|
|
|
|
|
|
|
|
lws_fi_inherit_copy(&h->fic, &context->fic, "ss", ssi->streamtype);
|
2021-02-17 10:31:22 +00:00
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->info = *ssi;
|
|
|
|
h->policy = pol;
|
|
|
|
h->context = context;
|
2020-12-12 06:21:40 +00:00
|
|
|
h->tsi = (uint8_t)tsi;
|
2020-02-29 12:37:24 +00:00
|
|
|
h->seq = seq_owner;
|
|
|
|
|
2020-12-27 19:34:30 +00:00
|
|
|
if (h->info.flags & LWSSSINFLAGS_PROXIED)
|
|
|
|
h->proxy_onward = 1;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/* start of overallocated area */
|
|
|
|
p = (char *)&h[1];
|
|
|
|
|
|
|
|
/* set the handle pointer in the user data struct */
|
|
|
|
v = (void **)(p + ssi->handle_offset);
|
|
|
|
*v = h;
|
|
|
|
|
|
|
|
/* set the opaque user data in the user data struct */
|
|
|
|
v = (void **)(p + ssi->opaque_user_data_offset);
|
|
|
|
*v = opaque_user_data;
|
|
|
|
|
|
|
|
p += ssi->user_alloc;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (pol->plugins[0]) {
|
|
|
|
h->nauthi = p;
|
|
|
|
p += pol->plugins[0]->alloc;
|
|
|
|
}
|
|
|
|
if (pol->plugins[1]) {
|
|
|
|
h->sauthi = p;
|
|
|
|
p += pol->plugins[1]->alloc;
|
|
|
|
}
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (pol->metadata_count) {
|
|
|
|
h->metadata = (lws_ss_metadata_t *)p;
|
|
|
|
p += pol->metadata_count * sizeof(lws_ss_metadata_t);
|
|
|
|
|
|
|
|
lwsl_info("%s: %s metadata count %d\n", __func__,
|
|
|
|
pol->streamtype, pol->metadata_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
smd = pol->metadata;
|
|
|
|
for (n = 0; n < pol->metadata_count; n++) {
|
|
|
|
h->metadata[n].name = smd->name;
|
|
|
|
if (n + 1 == pol->metadata_count)
|
|
|
|
h->metadata[n].next = NULL;
|
|
|
|
else
|
|
|
|
h->metadata[n].next = &h->metadata[n + 1];
|
|
|
|
smd = smd->next;
|
|
|
|
}
|
|
|
|
|
2020-09-28 10:13:39 +01:00
|
|
|
if (ssi->streamtype)
|
|
|
|
memcpy(p, ssi->streamtype, strlen(ssi->streamtype) + 1);
|
2020-07-27 10:03:12 +01:00
|
|
|
/* don't mark accepted ss as being the server */
|
|
|
|
if (ssi->flags & LWSSSINFLAGS_SERVER)
|
2020-12-12 06:21:40 +00:00
|
|
|
h->info.flags &= (uint8_t)~LWSSSINFLAGS_SERVER;
|
2020-02-29 12:37:24 +00:00
|
|
|
h->info.streamtype = p;
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
lws_dll2_add_head(&h->list, &pt->ss_owner);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
if (ppss)
|
|
|
|
*ppss = h;
|
|
|
|
|
|
|
|
if (ppayload_fmt)
|
|
|
|
*ppayload_fmt = pol->payload_fmt;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (ssi->flags & LWSSSINFLAGS_SERVER)
|
|
|
|
/*
|
|
|
|
* return early for accepted connection flow
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
/*
|
|
|
|
* For a local Secure Streams connection
|
|
|
|
*/
|
|
|
|
if (!(ssi->flags & LWSSSINFLAGS_PROXIED) &&
|
|
|
|
pol == &pol_smd) {
|
2021-01-02 10:49:43 +00:00
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
/*
|
|
|
|
* So he has asked to be wired up to SMD over a SS link.
|
|
|
|
* Register him as an smd participant in his own right.
|
|
|
|
*
|
|
|
|
* Just for this case, ssi->manual_initial_tx_credit is used
|
|
|
|
* to set the rx class mask (this is part of the SS serialization
|
|
|
|
* format as well)
|
|
|
|
*/
|
|
|
|
h->u.smd.smd_peer = lws_smd_register(context, h, 0,
|
2020-12-12 06:21:40 +00:00
|
|
|
(lws_smd_class_t)ssi->manual_initial_tx_credit,
|
2020-06-24 20:15:46 +01:00
|
|
|
lws_smd_ss_cb);
|
|
|
|
if (!h->u.smd.smd_peer)
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
lwsl_info("%s: registered SS SMD\n", __func__);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (h->policy->flags & LWSSSPOLF_SERVER) {
|
|
|
|
const struct lws_protocols *pprot[3], **ppp = &pprot[0];
|
|
|
|
struct lws_context_creation_info i;
|
2021-04-03 19:29:02 +01:00
|
|
|
struct lws_vhost *vho = NULL;
|
2020-07-27 10:03:12 +01:00
|
|
|
|
|
|
|
lwsl_info("%s: creating server\n", __func__);
|
|
|
|
|
2021-04-03 19:29:02 +01:00
|
|
|
if (h->policy->endpoint &&
|
|
|
|
h->policy->endpoint[0] == '!') {
|
|
|
|
/*
|
|
|
|
* There's already a vhost existing that we want to
|
|
|
|
* bind to, we don't have to specify and create one.
|
|
|
|
*
|
|
|
|
* The vhost must enable any protocols that we want.
|
|
|
|
*/
|
|
|
|
|
|
|
|
vho = lws_get_vhost_by_name(context,
|
|
|
|
&h->policy->endpoint[1]);
|
|
|
|
if (!vho) {
|
|
|
|
lwsl_err("%s: no vhost %s\n", __func__,
|
|
|
|
&h->policy->endpoint[1]);
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
2021-04-03 19:29:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
goto extant;
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
2020-07-27 10:03:12 +01:00
|
|
|
* This streamtype represents a server, we're being asked to
|
|
|
|
* instantiate a corresponding vhost for it
|
|
|
|
*/
|
|
|
|
|
|
|
|
memset(&i, 0, sizeof i);
|
|
|
|
|
|
|
|
i.iface = h->policy->endpoint;
|
|
|
|
i.vhost_name = h->policy->streamtype;
|
|
|
|
i.port = h->policy->port;
|
|
|
|
|
2021-01-11 08:29:02 +00:00
|
|
|
if (i.iface && i.iface[0] == '+') {
|
2020-11-06 20:47:20 +00:00
|
|
|
i.iface++;
|
|
|
|
i.options |= LWS_SERVER_OPTION_UNIX_SOCK;
|
|
|
|
}
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!ss_pcols[h->policy->protocol]) {
|
|
|
|
lwsl_err("%s: unsupp protocol", __func__);
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
2020-07-27 10:03:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*ppp++ = ss_pcols[h->policy->protocol]->protocol;
|
|
|
|
#if defined(LWS_ROLE_WS)
|
|
|
|
if (h->policy->u.http.u.ws.subprotocol)
|
|
|
|
/*
|
|
|
|
* He names a ws subprotocol, ie, we want to support
|
|
|
|
* ss-ws protocol in this vhost
|
|
|
|
*/
|
|
|
|
*ppp++ = &protocol_secstream_ws;
|
|
|
|
#endif
|
|
|
|
*ppp = NULL;
|
|
|
|
i.pprotocols = pprot;
|
|
|
|
|
2020-08-20 07:03:02 +01:00
|
|
|
#if defined(LWS_WITH_TLS)
|
2020-07-27 10:03:12 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_TLS) {
|
|
|
|
i.options |= LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
|
|
|
|
i.server_ssl_cert_mem =
|
|
|
|
h->policy->trust.server.cert->ca_der;
|
|
|
|
i.server_ssl_cert_mem_len = (unsigned int)
|
|
|
|
h->policy->trust.server.cert->ca_der_len;
|
|
|
|
i.server_ssl_private_key_mem =
|
|
|
|
h->policy->trust.server.key->ca_der;
|
|
|
|
i.server_ssl_private_key_mem_len = (unsigned int)
|
|
|
|
h->policy->trust.server.key->ca_der_len;
|
|
|
|
}
|
2020-08-20 07:03:02 +01:00
|
|
|
#endif
|
2020-07-27 10:03:12 +01:00
|
|
|
|
2021-04-03 19:29:02 +01:00
|
|
|
|
|
|
|
if (!lws_fi(&ssi->fic, "ss_srv_vh_fail"))
|
2021-03-16 13:32:05 +00:00
|
|
|
vho = lws_create_vhost(context, &i);
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!vho) {
|
|
|
|
lwsl_err("%s: failed to create vh", __func__);
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
2020-07-27 10:03:12 +01:00
|
|
|
}
|
|
|
|
|
2021-04-03 19:29:02 +01:00
|
|
|
extant:
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
/*
|
|
|
|
* Mark this vhost as having to apply ss server semantics to
|
|
|
|
* any incoming accepted connection
|
2020-02-29 12:37:24 +00:00
|
|
|
*/
|
2020-07-27 10:03:12 +01:00
|
|
|
vho->ss_handle = h;
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CREATING);
|
|
|
|
lwsl_info("%s: CREATING returned status %d\n", __func__, (int)r);
|
|
|
|
if (r == LWSSSSRET_DESTROY_ME)
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
2021-01-02 10:49:43 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
lwsl_notice("%s: created server %s\n", __func__,
|
|
|
|
h->policy->streamtype);
|
|
|
|
|
|
|
|
return 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For static policy case, dynamically ref / instantiate the related
|
|
|
|
* trust store and vhost. We do it by logical ss rather than connection
|
|
|
|
* because we don't want to expose the latency of creating the x.509
|
|
|
|
* trust store at the first connection.
|
|
|
|
*
|
|
|
|
* But it might be given the tls linkup takes time anyway, it can move
|
|
|
|
* to the ss connect code instead.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!lws_ss_policy_ref_trust_store(context, h->policy, 1 /* do the ref */)) {
|
|
|
|
lwsl_err("%s: unable to get vhost / trust store\n", __func__);
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CREATING);
|
|
|
|
lwsl_info("%s: CREATING returned status %d\n", __func__, (int)r);
|
2021-08-12 06:32:34 +01:00
|
|
|
if (r == LWSSSSRET_DESTROY_ME)
|
|
|
|
goto fail_creation;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2021-01-03 08:29:28 +00:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
if (!(ssi->flags & LWSSSINFLAGS_PROXIED) &&
|
|
|
|
pol == &pol_smd) {
|
|
|
|
lws_ss_state_return_t r;
|
|
|
|
|
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTING);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
if (!(ssi->flags & LWSSSINFLAGS_REGISTER_SINK) &&
|
|
|
|
((h->policy->flags & LWSSSPOLF_NAILED_UP)
|
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
|| ((h->policy == &pol_smd) //&&
|
|
|
|
//(ssi->flags & LWSSSINFLAGS_PROXIED))
|
|
|
|
)
|
|
|
|
#endif
|
|
|
|
))
|
2020-12-27 19:34:30 +00:00
|
|
|
switch (_lws_ss_client_connect(h, 0, 0)) {
|
2020-12-22 15:56:41 +00:00
|
|
|
case LWSSSSRET_OK:
|
|
|
|
break;
|
|
|
|
case LWSSSSRET_TX_DONT_SEND:
|
|
|
|
case LWSSSSRET_DISCONNECT_ME:
|
2021-08-12 06:32:34 +01:00
|
|
|
if (lws_ss_backoff(h) == LWSSSSRET_DESTROY_ME)
|
|
|
|
goto fail_creation;
|
2020-12-22 15:56:41 +00:00
|
|
|
break;
|
|
|
|
case LWSSSSRET_DESTROY_ME:
|
2021-08-12 06:32:34 +01:00
|
|
|
goto fail_creation;
|
2020-07-04 21:16:49 +01:00
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
2021-08-12 06:32:34 +01:00
|
|
|
|
|
|
|
fail_creation:
|
|
|
|
|
|
|
|
if (ppss)
|
|
|
|
*ppss = NULL;
|
|
|
|
|
|
|
|
lws_ss_destroy(&h);
|
|
|
|
|
|
|
|
return 1;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-03-15 04:40:30 +00:00
|
|
|
void *
|
|
|
|
lws_ss_to_user_object(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
return (void *)&h[1];
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
void
|
|
|
|
lws_ss_destroy(lws_ss_handle_t **ppss)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
2020-09-28 10:13:39 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
struct lws_vhost *v = NULL;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_handle_t *h = *ppss;
|
|
|
|
lws_ss_metadata_t *pmd;
|
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return;
|
|
|
|
|
2020-08-26 14:03:57 +01:00
|
|
|
if (h->destroying) {
|
|
|
|
lwsl_info("%s: reentrant destroy\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
h->destroying = 1;
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
#if defined(LWS_WITH_CONMON)
|
|
|
|
if (h->conmon_json)
|
|
|
|
lws_free_set_NULL(h->conmon_json);
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->wsi) {
|
|
|
|
/*
|
|
|
|
* Don't let the wsi point to us any more,
|
|
|
|
* we (the ss object bound to the wsi) are going away now
|
|
|
|
*/
|
2020-06-01 07:33:37 +01:00
|
|
|
lws_set_opaque_user_data(h->wsi, NULL);
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_set_timeout(h->wsi, 1, LWS_TO_KILL_SYNC);
|
|
|
|
}
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
/*
|
|
|
|
* if we bound an smd registration to the SS, unregister it
|
|
|
|
*/
|
|
|
|
|
2020-08-16 05:35:56 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
2020-12-29 07:29:30 +00:00
|
|
|
if (h->policy == &pol_smd) {
|
|
|
|
lws_sul_cancel(&h->u.smd.sul_write);
|
|
|
|
|
|
|
|
if (h->u.smd.smd_peer) {
|
|
|
|
lws_smd_unregister(h->u.smd.smd_peer);
|
|
|
|
h->u.smd.smd_peer = NULL;
|
|
|
|
}
|
2020-06-24 20:15:46 +01:00
|
|
|
}
|
2020-08-16 05:35:56 +01:00
|
|
|
#endif
|
2020-06-24 20:15:46 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
pt = &h->context->pt[h->tsi];
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
*ppss = NULL;
|
|
|
|
lws_dll2_remove(&h->list);
|
2020-11-10 11:27:28 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
lws_dll2_remove(&h->cli_list);
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_dll2_remove(&h->to_list);
|
2020-06-30 16:42:37 +01:00
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-09-28 10:13:39 +01:00
|
|
|
/*
|
|
|
|
* for lss, DESTROYING deletes the C++ lss object, making the
|
|
|
|
* self-defined h->policy radioactive
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SERVER)
|
2021-06-22 17:01:20 +01:00
|
|
|
if (h->policy && (h->policy->flags & LWSSSPOLF_SERVER))
|
2020-09-28 10:13:39 +01:00
|
|
|
v = lws_get_vhost_by_name(h->context, h->policy->streamtype);
|
|
|
|
#endif
|
|
|
|
|
2021-08-12 06:32:34 +01:00
|
|
|
/*
|
|
|
|
* Since we also come here to unpick create, it's possible we failed
|
|
|
|
* the creation before issuing any states, even CREATING. We should
|
|
|
|
* only issue cleanup states on destroy if we previously got as far as
|
|
|
|
* issuing CREATING.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (h->prev_ss_state) {
|
|
|
|
if (h->ss_dangling_connected)
|
|
|
|
(void)lws_ss_event_helper(h, LWSSSCS_DISCONNECTED);
|
2020-12-27 16:05:48 +00:00
|
|
|
|
2021-08-12 06:32:34 +01:00
|
|
|
(void)lws_ss_event_helper(h, LWSSSCS_DESTROYING);
|
|
|
|
}
|
2020-09-28 10:13:39 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
/* in proxy case, metadata value on heap may need cleaning up */
|
|
|
|
|
|
|
|
pmd = h->metadata;
|
|
|
|
while (pmd) {
|
|
|
|
lwsl_info("%s: pmd %p\n", __func__, pmd);
|
|
|
|
if (pmd->value_on_lws_heap)
|
2020-10-11 07:29:47 +01:00
|
|
|
lws_free_set_NULL(pmd->value__may_own_heap);
|
2021-08-12 06:32:34 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
pmd = pmd->next;
|
|
|
|
}
|
|
|
|
|
2020-05-28 12:48:17 +01:00
|
|
|
lws_sul_cancel(&h->sul);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For static policy case, dynamically ref / instantiate the related
|
|
|
|
* trust store and vhost. We do it by logical ss rather than connection
|
|
|
|
* because we don't want to expose the latency of creating the x.509
|
|
|
|
* trust store at the first connection.
|
|
|
|
*
|
|
|
|
* But it might be given the tls linkup takes time anyway, it can move
|
|
|
|
* to the ss connect code instead.
|
|
|
|
*/
|
|
|
|
|
2021-06-22 17:01:20 +01:00
|
|
|
if (h->policy)
|
|
|
|
lws_ss_policy_unref_trust_store(h->context, h->policy);
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#endif
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2020-09-28 10:13:39 +01:00
|
|
|
if (v)
|
2020-07-27 10:03:12 +01:00
|
|
|
/*
|
|
|
|
* For server, the policy describes a vhost that implements the
|
|
|
|
* server, when we take down the ss, we take down the related
|
|
|
|
* vhost (if it got that far)
|
|
|
|
*/
|
2020-09-28 10:13:39 +01:00
|
|
|
lws_vhost_destroy(v);
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
|
|
|
|
2021-02-17 10:31:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
|
2021-03-16 13:32:05 +00:00
|
|
|
lws_fi_destroy(&h->fic);
|
2021-02-17 10:31:22 +00:00
|
|
|
#endif
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS)
|
|
|
|
/*
|
|
|
|
* If any hanging caliper measurement, dump it, and free any tags
|
|
|
|
*/
|
|
|
|
lws_metrics_caliper_report_hist(h->cal_txn, (struct lws *)NULL);
|
|
|
|
#endif
|
|
|
|
|
2020-12-08 10:30:55 +00:00
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
|
|
|
|
2020-07-15 10:25:18 +01:00
|
|
|
/* confirm no sul left scheduled in handle or user allocation object */
|
|
|
|
lws_sul_debug_zombies(h->context, h, sizeof(*h) + h->info.user_alloc,
|
|
|
|
__func__);
|
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
__lws_lc_untag(&h->lc);
|
2021-08-12 06:32:34 +01:00
|
|
|
|
|
|
|
lws_explicit_bzero((void *)h, sizeof(*h) + h->info.user_alloc);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_free_set_NULL(h);
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:12:15 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2020-07-27 10:03:12 +01:00
|
|
|
void
|
|
|
|
lws_ss_server_ack(struct lws_ss_handle *h, int nack)
|
|
|
|
{
|
|
|
|
h->txn_resp = nack;
|
|
|
|
h->txn_resp_set = 1;
|
|
|
|
}
|
2020-11-10 11:27:28 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_server_foreach_client(struct lws_ss_handle *h, lws_sssfec_cb cb,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, h->src_list.head) {
|
|
|
|
struct lws_ss_handle *h =
|
|
|
|
lws_container_of(d, struct lws_ss_handle, cli_list);
|
|
|
|
|
|
|
|
cb(h, arg);
|
|
|
|
|
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
|
|
|
}
|
2020-10-29 17:12:15 +00:00
|
|
|
#endif
|
2020-07-27 10:03:12 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_request_tx(lws_ss_handle_t *h)
|
|
|
|
{
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2021-05-27 17:34:53 -07:00
|
|
|
r = _lws_ss_request_tx(h);
|
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, NULL, &h);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
lws_ss_state_return_t
|
|
|
|
_lws_ss_request_tx(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
lws_ss_state_return_t r;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
// lwsl_notice("%s: h %p, wsi %p\n", __func__, h, h->wsi);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (h->wsi) {
|
|
|
|
lws_callback_on_writable(h->wsi);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2021-06-22 16:46:53 +01:00
|
|
|
if (!h->policy) {
|
|
|
|
/* avoid crash */
|
|
|
|
lwsl_err("%s: null policy\n", __func__);
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
}
|
|
|
|
|
2020-09-11 10:24:31 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_SERVER)
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
/*
|
|
|
|
* there's currently no wsi / connection associated with the ss handle
|
|
|
|
*/
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
if (h->policy == &pol_smd) {
|
|
|
|
/*
|
|
|
|
* He's an _lws_smd... and no wsi... since we're just going
|
|
|
|
* to queue it, we could call his tx() right here, but rather
|
|
|
|
* than surprise him let's set a sul to do it next time around
|
|
|
|
* the event loop
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_sul_schedule(h->context, 0, &h->u.smd.sul_write,
|
|
|
|
lws_ss_smd_tx_cb, 1);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-06-24 20:15:46 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->seqstate != SSSEQ_IDLE &&
|
|
|
|
h->seqstate != SSSEQ_DO_RETRY)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
h->seqstate = SSSEQ_TRY_CONNECT;
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_POLL);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-26 11:28:25 +01:00
|
|
|
/*
|
|
|
|
* Retries operate via lws_ss_request_tx(), explicitly ask for a
|
|
|
|
* reconnection to clear the retry limit
|
|
|
|
*/
|
2020-12-27 19:34:30 +00:00
|
|
|
r = _lws_ss_client_connect(h, 1, 0);
|
2020-08-26 11:05:41 +01:00
|
|
|
if (r == LWSSSSRET_DESTROY_ME)
|
|
|
|
return r;
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
if (r)
|
|
|
|
return lws_ss_backoff(h);
|
|
|
|
|
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_request_tx_len(lws_ss_handle_t *h, unsigned long len)
|
|
|
|
{
|
2021-06-22 16:47:57 +01:00
|
|
|
if (h->wsi && h->policy &&
|
2020-08-03 15:33:09 +01:00
|
|
|
(h->policy->protocol == LWSSSP_H1 ||
|
|
|
|
h->policy->protocol == LWSSSP_H2 ||
|
|
|
|
h->policy->protocol == LWSSSP_WS))
|
2020-02-29 12:37:24 +00:00
|
|
|
h->wsi->http.writeable_len = len;
|
|
|
|
else
|
|
|
|
h->writeable_len = len;
|
2020-08-03 15:33:09 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return lws_ss_request_tx(h);
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* private helpers
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* used on context destroy when iterating listed lws_ss on a pt */
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_destroy_dll(struct lws_dll2 *d, void *user)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(d, lws_ss_handle_t, list);
|
|
|
|
|
|
|
|
lws_ss_destroy(&h);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lws_sequencer *
|
|
|
|
lws_ss_get_sequencer(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
return h->seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lws_context *
|
|
|
|
lws_ss_get_context(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
return h->context;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
lws_ss_rideshare(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
if (!h->rideshare)
|
|
|
|
return h->policy->streamtype;
|
|
|
|
|
|
|
|
return h->rideshare->streamtype;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_add_peer_tx_credit(struct lws_ss_handle *h, int32_t bump)
|
|
|
|
{
|
|
|
|
const struct ss_pcols *ssp;
|
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
|
|
|
|
if (h->wsi && ssp && ssp->tx_cr_add)
|
|
|
|
return ssp->tx_cr_add(h, bump);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_get_est_peer_tx_credit(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
const struct ss_pcols *ssp;
|
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
|
|
|
|
if (h->wsi && ssp && ssp->tx_cr_add)
|
|
|
|
return ssp->tx_cr_est(h);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-06-30 16:42:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* protocol-independent handler for ss timeout
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_to_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, sul_timeout);
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-30 16:42:37 +01:00
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: %s timeout fired\n", __func__, lws_ss_tag(h));
|
2020-06-30 16:42:37 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_TIMEOUT);
|
2020-11-25 19:45:33 +00:00
|
|
|
if (r != LWSSSSRET_DISCONNECT_ME && r != LWSSSSRET_DESTROY_ME)
|
|
|
|
return;
|
|
|
|
|
2021-03-13 07:15:04 +00:00
|
|
|
if (!h->wsi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
lws_set_timeout(h->wsi, 1, LWS_TO_KILL_ASYNC);
|
2020-11-25 19:45:33 +00:00
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, h->wsi, &h);
|
2020-06-30 16:42:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_start_timeout(struct lws_ss_handle *h, unsigned int timeout_ms)
|
|
|
|
{
|
|
|
|
if (!timeout_ms && !h->policy->timeout_ms)
|
|
|
|
return;
|
|
|
|
|
|
|
|
lws_sul_schedule(h->context, 0, &h->sul_timeout, lws_ss_to_cb,
|
|
|
|
(timeout_ms ? timeout_ms : h->policy->timeout_ms) *
|
|
|
|
LWS_US_PER_MS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_cancel_timeout(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
|
|
|
}
|
2020-07-27 09:52:30 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_change_handlers(struct lws_ss_handle *h,
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t (*rx)(void *userobj, const uint8_t *buf,
|
|
|
|
size_t len, int flags),
|
|
|
|
lws_ss_state_return_t (*tx)(void *userobj, lws_ss_tx_ordinal_t ord,
|
|
|
|
uint8_t *buf, size_t *len, int *flags),
|
|
|
|
lws_ss_state_return_t (*state)(void *userobj, void *h_src /* ss handle type */,
|
|
|
|
lws_ss_constate_t state,
|
|
|
|
lws_ss_tx_ordinal_t ack))
|
2020-07-27 09:52:30 +01:00
|
|
|
{
|
|
|
|
if (rx)
|
|
|
|
h->info.rx = rx;
|
|
|
|
if (tx)
|
|
|
|
h->info.tx = tx;
|
|
|
|
if (state)
|
|
|
|
h->info.state = state;
|
|
|
|
}
|
2020-12-25 05:54:19 +00:00
|
|
|
|
|
|
|
const char *
|
|
|
|
lws_ss_tag(struct lws_ss_handle *h)
|
|
|
|
{
|
2020-12-27 16:05:48 +00:00
|
|
|
if (!h)
|
|
|
|
return "[null ss]";
|
2020-12-25 05:54:19 +00:00
|
|
|
return lws_lc_tag(&h->lc);
|
|
|
|
}
|