2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <private-lib-core.h>
|
|
|
|
|
|
|
|
static const struct ss_pcols *ss_pcols[] = {
|
|
|
|
#if defined(LWS_ROLE_H1)
|
|
|
|
&ss_pcol_h1, /* LWSSSP_H1 */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_H2)
|
|
|
|
&ss_pcol_h2, /* LWSSSP_H2 */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_WS)
|
|
|
|
&ss_pcol_ws, /* LWSSSP_WS */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_MQTT)
|
|
|
|
&ss_pcol_mqtt, /* LWSSSP_MQTT */
|
|
|
|
#else
|
|
|
|
NULL,
|
|
|
|
#endif
|
2020-03-28 16:20:50 +00:00
|
|
|
&ss_pcol_raw, /* LWSSSP_RAW */
|
|
|
|
NULL,
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const char *state_names[] = {
|
|
|
|
"LWSSSCS_CREATING",
|
|
|
|
"LWSSSCS_DISCONNECTED",
|
|
|
|
"LWSSSCS_UNREACHABLE",
|
|
|
|
"LWSSSCS_AUTH_FAILED",
|
|
|
|
"LWSSSCS_CONNECTED",
|
|
|
|
"LWSSSCS_CONNECTING",
|
|
|
|
"LWSSSCS_DESTROYING",
|
|
|
|
"LWSSSCS_POLL",
|
|
|
|
"LWSSSCS_ALL_RETRIES_FAILED",
|
|
|
|
"LWSSSCS_QOS_ACK_REMOTE",
|
|
|
|
"LWSSSCS_QOS_NACK_REMOTE",
|
|
|
|
"LWSSSCS_QOS_ACK_LOCAL",
|
|
|
|
"LWSSSCS_QOS_NACK_LOCAL",
|
2020-06-30 16:42:37 +01:00
|
|
|
"LWSSSCS_TIMEOUT",
|
2020-07-27 10:03:12 +01:00
|
|
|
"LWSSSCS_SERVER_TXN",
|
|
|
|
"LWSSSCS_SERVER_UPGRADE",
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *
|
|
|
|
lws_ss_state_name(int state)
|
|
|
|
{
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
if (state >= LWSSSCS_USER_BASE)
|
|
|
|
return "user state";
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (state >= (int)LWS_ARRAY_SIZE(state_names))
|
|
|
|
return "unknown";
|
|
|
|
|
|
|
|
return state_names[state];
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_event_helper(lws_ss_handle_t *h, lws_ss_constate_t cs)
|
|
|
|
{
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-01 07:33:37 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!h)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_SEQUENCER)
|
|
|
|
/*
|
|
|
|
* A parent sequencer for the ss is optional, if we have one, keep it
|
|
|
|
* informed of state changes on the ss connection
|
|
|
|
*/
|
|
|
|
if (h->seq && cs != LWSSSCS_DESTROYING)
|
|
|
|
lws_seq_queue_event(h->seq, LWSSEQ_SS_STATE_BASE + cs,
|
|
|
|
(void *)h, NULL);
|
|
|
|
#endif
|
|
|
|
|
2020-06-01 07:33:37 +01:00
|
|
|
if (h->info.state) {
|
2020-08-26 11:05:41 +01:00
|
|
|
r = h->info.state(ss_to_userobj(h), NULL, cs, 0);
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if ((h->info.flags & LWSSSINFLAGS_ACCEPTED) &&
|
|
|
|
cs == LWSSSCS_DISCONNECTED)
|
2020-08-26 11:05:41 +01:00
|
|
|
r = LWSSSSRET_DESTROY_ME;
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-08-26 11:05:41 +01:00
|
|
|
return r;
|
2020-06-01 07:33:37 +01:00
|
|
|
}
|
2020-06-01 07:17:48 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_lws_ss_handle_state_ret(lws_ss_state_return_t r, struct lws *wsi,
|
|
|
|
lws_ss_handle_t **ph)
|
|
|
|
{
|
|
|
|
if (r == LWSSSSRET_DESTROY_ME) {
|
|
|
|
if (wsi)
|
|
|
|
lws_set_opaque_user_data(wsi, NULL);
|
|
|
|
(*ph)->wsi = NULL;
|
|
|
|
lws_ss_destroy(ph);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1; /* close connection */
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_timeout_sul_check_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, sul);
|
|
|
|
|
2020-06-10 19:07:38 +01:00
|
|
|
lwsl_notice("%s: retrying ss h %p (%s) after backoff\n", __func__, h,
|
|
|
|
h->policy->streamtype);
|
2020-02-29 12:37:24 +00:00
|
|
|
/* we want to retry... */
|
|
|
|
h->seqstate = SSSEQ_DO_RETRY;
|
|
|
|
|
|
|
|
lws_ss_request_tx(h);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_exp_cb_metadata(void *priv, const char *name, char *out, size_t *pos,
|
|
|
|
size_t olen, size_t *exp_ofs)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)priv;
|
|
|
|
const char *replace = NULL;
|
|
|
|
size_t total, budget;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
lws_ss_metadata_t *md = lws_ss_policy_metadata(h->policy, name),
|
2020-10-11 07:29:47 +01:00
|
|
|
*hmd = lws_ss_get_handle_metadata(h, name);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!md) {
|
|
|
|
lwsl_err("%s: Unknown metadata %s\n", __func__, name);
|
|
|
|
|
|
|
|
return LSTRX_FATAL_NAME_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2020-10-11 07:29:47 +01:00
|
|
|
replace = hmd->value__may_own_heap;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
total = hmd->length;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
budget = olen - *pos;
|
|
|
|
total -= *exp_ofs;
|
|
|
|
if (total < budget)
|
|
|
|
budget = total;
|
|
|
|
|
2020-06-15 15:01:35 +01:00
|
|
|
if (out)
|
|
|
|
memcpy(out + *pos, replace + (*exp_ofs), budget);
|
2020-02-29 12:37:24 +00:00
|
|
|
*exp_ofs += budget;
|
|
|
|
*pos += budget;
|
|
|
|
|
|
|
|
if (budget == total)
|
|
|
|
return LSTRX_DONE;
|
|
|
|
|
|
|
|
return LSTRX_FILLED_OUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_set_timeout_us(lws_ss_handle_t *h, lws_usec_t us)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &h->context->pt[h->tsi];
|
|
|
|
|
|
|
|
h->sul.cb = lws_ss_timeout_sul_check_cb;
|
2020-05-28 12:48:17 +01:00
|
|
|
__lws_sul_insert_us(&pt->pt_sul_owner[
|
|
|
|
!!(h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)],
|
|
|
|
&h->sul, us);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-09-17 12:43:31 +01:00
|
|
|
_lws_ss_backoff(lws_ss_handle_t *h, lws_usec_t us_override)
|
2020-02-29 12:37:24 +00:00
|
|
|
{
|
|
|
|
uint64_t ms;
|
|
|
|
char conceal;
|
|
|
|
|
|
|
|
if (h->seqstate == SSSEQ_RECONNECT_WAIT)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/* figure out what we should do about another retry */
|
|
|
|
|
|
|
|
lwsl_info("%s: ss %p: retry backoff after failure\n", __func__, h);
|
|
|
|
ms = lws_retry_get_delay_ms(h->context, h->policy->retry_bo,
|
|
|
|
&h->retry, &conceal);
|
|
|
|
if (!conceal) {
|
|
|
|
lwsl_info("%s: ss %p: abandon conn attempt \n",__func__, h);
|
|
|
|
h->seqstate = SSSEQ_IDLE;
|
2020-08-26 11:05:41 +01:00
|
|
|
|
|
|
|
return lws_ss_event_helper(h, LWSSSCS_ALL_RETRIES_FAILED);
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
/* Only increase our planned backoff, or go with it */
|
|
|
|
|
|
|
|
if (us_override < (lws_usec_t)ms * LWS_US_PER_MS)
|
|
|
|
us_override = ms * LWS_US_PER_MS;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->seqstate = SSSEQ_RECONNECT_WAIT;
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_ss_set_timeout_us(h, us_override);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
lwsl_info("%s: ss %p: retry wait %dms\n", __func__, h,
|
|
|
|
(int)(us_override / 1000));
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_ss_state_return_t
|
|
|
|
lws_ss_backoff(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
return _lws_ss_backoff(h, 0);
|
|
|
|
}
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local SMD <-> SS
|
|
|
|
*
|
|
|
|
* We pass received messages through to the SS handler synchronously, using the
|
|
|
|
* lws service thread context.
|
|
|
|
*
|
|
|
|
* After the SS is created and registered, still nothing is going to come here
|
|
|
|
* until the peer sends us his rx_class_mask and we update his registration with
|
|
|
|
* it, because from SS creation his rx_class_mask defaults to 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_smd_ss_cb(void *opaque, lws_smd_class_t _class,
|
|
|
|
lws_usec_t timestamp, void *buf, size_t len)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)opaque;
|
|
|
|
uint8_t *p = (uint8_t *)buf - LWS_SMD_SS_RX_HEADER_LEN;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When configured with SS enabled, lws over-allocates
|
|
|
|
* LWS_SMD_SS_RX_HEADER_LEN bytes behind the payload of the queued
|
|
|
|
* message, for prepending serialized class and timestamp data in-band
|
|
|
|
* with the payload.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_ser_wu64be(p, _class);
|
|
|
|
lws_ser_wu64be(p + 8, timestamp);
|
|
|
|
|
|
|
|
if (h->info.rx)
|
|
|
|
h->info.rx((void *)&h[1], p, len + LWS_SMD_SS_RX_HEADER_LEN,
|
|
|
|
LWSSS_FLAG_SOM | LWSSS_FLAG_EOM);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_smd_tx_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, u.smd.sul_write);
|
|
|
|
uint8_t buf[LWS_SMD_SS_RX_HEADER_LEN + LWS_SMD_MAX_PAYLOAD], *p;
|
|
|
|
size_t len = sizeof(buf);
|
|
|
|
lws_smd_class_t _class;
|
|
|
|
int flags = 0, n;
|
|
|
|
|
|
|
|
if (!h->info.tx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
n = h->info.tx(&h[1], h->txord++, buf, &len, &flags);
|
|
|
|
if (n)
|
|
|
|
/* nonzero return means don't want to send anything */
|
|
|
|
return;
|
|
|
|
|
|
|
|
// lwsl_notice("%s: (SS %p bound to _lws_smd creates message) tx len %d\n", __func__, h, (int)len);
|
|
|
|
// lwsl_hexdump_notice(buf, len);
|
|
|
|
|
|
|
|
assert(len >= LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
_class = (lws_smd_class_t)lws_ser_ru64be(buf);
|
|
|
|
p = lws_smd_msg_alloc(h->context, _class, len - LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
if (!p) {
|
2020-07-07 15:13:37 +01:00
|
|
|
// this can be rejected if nobody listening for this class
|
|
|
|
//lwsl_notice("%s: failed to alloc\n", __func__);
|
2020-06-24 20:15:46 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(p, buf + LWS_SMD_SS_RX_HEADER_LEN, len - LWS_SMD_SS_RX_HEADER_LEN);
|
|
|
|
if (lws_smd_msg_send(h->context, p)) {
|
|
|
|
lwsl_notice("%s: failed to queue\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
int
|
2020-06-26 11:28:25 +01:00
|
|
|
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry)
|
2020-02-29 12:37:24 +00:00
|
|
|
{
|
2020-03-14 06:56:41 +00:00
|
|
|
const char *prot, *_prot, *ipath, *_ipath, *ads, *_ads;
|
2020-02-29 12:37:24 +00:00
|
|
|
struct lws_client_connect_info i;
|
|
|
|
const struct ss_pcols *ssp;
|
2020-03-13 07:42:25 +00:00
|
|
|
size_t used_in, used_out;
|
2020-02-29 12:37:24 +00:00
|
|
|
union lws_ss_contemp ct;
|
2020-07-17 20:44:44 +01:00
|
|
|
char path[1024], ep[96];
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-03-14 06:56:41 +00:00
|
|
|
int port, _port, tls;
|
2020-03-13 07:42:25 +00:00
|
|
|
lws_strexp_t exp;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!h->policy) {
|
|
|
|
lwsl_err("%s: ss with no policy\n", __func__);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are already bound to a sink?
|
|
|
|
*/
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
// if (h->h_sink)
|
|
|
|
// return 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-26 11:28:25 +01:00
|
|
|
if (!is_retry)
|
|
|
|
h->retry = 0;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
if (h->policy == &pol_smd) {
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
if (h->u.smd.smd_peer)
|
2020-06-24 20:15:46 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// lwsl_notice("%s: received connect for _lws_smd, registering for class mask 0x%x\n",
|
|
|
|
// __func__, h->info.manual_initial_tx_credit);
|
|
|
|
|
|
|
|
h->u.smd.smd_peer = lws_smd_register(h->context, h,
|
|
|
|
(h->info.flags & LWSSSINFLAGS_PROXIED) ?
|
|
|
|
LWSSMDREG_FLAG_PROXIED_SS : 0,
|
|
|
|
h->info.manual_initial_tx_credit,
|
|
|
|
lws_smd_ss_cb);
|
|
|
|
if (!h->u.smd.smd_peer)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (lws_ss_event_helper(h, LWSSSCS_CONNECTING))
|
|
|
|
return -1;
|
|
|
|
// lwsl_err("%s: registered SS SMD\n", __func__);
|
|
|
|
if (lws_ss_event_helper(h, LWSSSCS_CONNECTED))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
/*
|
|
|
|
* We're going to substitute ${metadata} in the endpoint at connection-
|
|
|
|
* time, so this can be set dynamically...
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_strexp_init(&exp, (void *)h, lws_ss_exp_cb_metadata, ep, sizeof(ep));
|
|
|
|
|
|
|
|
if (lws_strexp_expand(&exp, h->policy->endpoint,
|
|
|
|
strlen(h->policy->endpoint),
|
|
|
|
&used_in, &used_out) != LSTRX_DONE) {
|
|
|
|
lwsl_err("%s: address strexp failed\n", __func__);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... in some cases, we might want the user to be able to override
|
|
|
|
* some policy settings by what he provided in there. For example,
|
|
|
|
* if he set the endpoint to "https://myendpoint.com:4443/mypath" it
|
|
|
|
* might be quite convenient to override the policy to follow the info
|
|
|
|
* that was given for at least server, port and the url path.
|
|
|
|
*/
|
|
|
|
|
|
|
|
_port = port = h->policy->port;
|
|
|
|
_prot = prot = NULL;
|
|
|
|
_ipath = ipath = "";
|
|
|
|
_ads = ads = ep;
|
|
|
|
|
|
|
|
if (strchr(ep, ':') &&
|
|
|
|
!lws_parse_uri(ep, &_prot, &_ads, &_port, &_ipath)) {
|
|
|
|
lwsl_debug("%s: using uri parse results '%s' '%s' %d '%s'\n",
|
|
|
|
__func__, _prot, _ads, _port, _ipath);
|
|
|
|
prot = _prot;
|
|
|
|
ads = _ads;
|
|
|
|
port = _port;
|
|
|
|
ipath = _ipath;
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
memset(&i, 0, sizeof i); /* otherwise uninitialized garbage */
|
|
|
|
i.context = h->context;
|
2020-03-14 06:56:41 +00:00
|
|
|
tls = !!(h->policy->flags & LWSSSPOLF_TLS);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
if (prot && (!strcmp(prot, "http") || !strcmp(prot, "ws") ||
|
|
|
|
!strcmp(prot, "mqtt")))
|
|
|
|
tls = 0;
|
|
|
|
|
|
|
|
if (tls) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_info("%s: using tls\n", __func__);
|
|
|
|
i.ssl_connection = LCCSCF_USE_SSL;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!h->policy->trust.store)
|
2020-03-15 04:55:21 +00:00
|
|
|
lwsl_info("%s: using platform trust store\n", __func__);
|
|
|
|
else {
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-15 04:55:21 +00:00
|
|
|
i.vhost = lws_get_vhost_by_name(h->context,
|
2020-07-27 10:03:12 +01:00
|
|
|
h->policy->trust.store->name);
|
2020-03-15 04:55:21 +00:00
|
|
|
if (!i.vhost) {
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
lwsl_err("%s: missing vh for policy %s\n",
|
2020-07-27 10:03:12 +01:00
|
|
|
__func__,
|
|
|
|
h->policy->trust.store->name);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-03-15 04:55:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-28 12:48:17 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)
|
|
|
|
i.ssl_connection |= LCCSCF_WAKE_SUSPEND__VALIDITY;
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
i.address = ads;
|
|
|
|
i.port = port;
|
|
|
|
i.host = i.address;
|
|
|
|
i.origin = i.address;
|
|
|
|
i.opaque_user_data = h;
|
|
|
|
i.seq = h->seq;
|
|
|
|
i.retry_and_idle_policy = h->policy->retry_bo;
|
|
|
|
i.sys_tls_client_cert = h->policy->client_cert;
|
|
|
|
|
|
|
|
i.path = ipath;
|
|
|
|
/* if this is not "", munge should use it instead of policy
|
|
|
|
* url path
|
|
|
|
*/
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
if (!ssp) {
|
|
|
|
lwsl_err("%s: unsupported protocol\n", __func__);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
i.alpn = ssp->alpn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For http, we can get the method from the http object, override in
|
|
|
|
* the protocol-specific munge callback below if not http
|
|
|
|
*/
|
|
|
|
i.method = h->policy->u.http.method;
|
2020-07-27 10:03:12 +01:00
|
|
|
i.protocol = ssp->protocol->name; /* lws protocol name */
|
2020-02-29 12:37:24 +00:00
|
|
|
i.local_protocol_name = i.protocol;
|
|
|
|
|
2020-03-28 16:20:50 +00:00
|
|
|
if (ssp->munge) /* eg, raw doesn't use; endpoint strexp already done */
|
|
|
|
ssp->munge(h, path, sizeof(path), &i, &ct);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
i.pwsi = &h->wsi;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->policy->plugins[0] && h->policy->plugins[0]->munge)
|
|
|
|
h->policy->plugins[0]->munge(h, path, sizeof(path));
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
lwsl_info("%s: connecting %s, '%s' '%s' %s\n", __func__, i.method,
|
|
|
|
i.alpn, i.address, i.path);
|
|
|
|
|
|
|
|
h->txn_ok = 0;
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTING);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!lws_client_connect_via_info(&i)) {
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = lws_ss_backoff(h);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-26 11:28:25 +01:00
|
|
|
int
|
|
|
|
lws_ss_client_connect(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
return _lws_ss_client_connect(h, 0);
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* Public API
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create either a stream or a sink
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_create(struct lws_context *context, int tsi, const lws_ss_info_t *ssi,
|
|
|
|
void *opaque_user_data, lws_ss_handle_t **ppss,
|
|
|
|
struct lws_sequencer *seq_owner, const char **ppayload_fmt)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
|
|
|
const lws_ss_policy_t *pol;
|
|
|
|
lws_ss_metadata_t *smd;
|
|
|
|
lws_ss_handle_t *h;
|
|
|
|
size_t size;
|
|
|
|
void **v;
|
|
|
|
char *p;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
pol = lws_ss_policy_lookup(context, ssi->streamtype);
|
|
|
|
if (!pol) {
|
2020-03-19 14:05:18 +00:00
|
|
|
lwsl_info("%s: unknown stream type %s\n", __func__,
|
|
|
|
ssi->streamtype);
|
2020-02-29 12:37:24 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
if (ssi->flags & LWSSSINFLAGS_REGISTER_SINK) {
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* This can register a secure streams sink as well as normal
|
|
|
|
* secure streams connections. If that's what's happening,
|
|
|
|
* confirm the policy agrees that this streamtype should be
|
|
|
|
* directed to a sink.
|
|
|
|
*/
|
|
|
|
if (!(pol->flags & LWSSSPOLF_LOCAL_SINK)) {
|
|
|
|
/*
|
|
|
|
* Caller wanted to create a sink for this streamtype,
|
|
|
|
* but the policy does not agree the streamtype should
|
|
|
|
* be routed to a local sink.
|
|
|
|
*/
|
|
|
|
lwsl_err("%s: %s policy does not allow local sink\n",
|
|
|
|
__func__, ssi->streamtype);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
|
|
|
if (!(pol->flags & LWSSSPOLF_LOCAL_SINK)) {
|
|
|
|
|
|
|
|
}
|
|
|
|
// lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We overallocate and point to things in the overallocation...
|
|
|
|
*
|
|
|
|
* 1) the user_alloc from the stream info
|
|
|
|
* 2) network auth plugin instantiation data
|
|
|
|
* 3) stream auth plugin instantiation data
|
|
|
|
* 4) as many metadata pointer structs as the policy tells
|
|
|
|
* 5) the streamtype name (length is not aligned)
|
|
|
|
*
|
|
|
|
* ... when we come to destroy it, just one free to do.
|
|
|
|
*/
|
|
|
|
|
|
|
|
size = sizeof(*h) + ssi->user_alloc + strlen(ssi->streamtype) + 1;
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (pol->plugins[0])
|
|
|
|
size += pol->plugins[0]->alloc;
|
|
|
|
if (pol->plugins[1])
|
|
|
|
size += pol->plugins[1]->alloc;
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
size += pol->metadata_count * sizeof(lws_ss_metadata_t);
|
|
|
|
|
|
|
|
h = lws_zalloc(size, __func__);
|
|
|
|
if (!h)
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
h->info = *ssi;
|
|
|
|
h->policy = pol;
|
|
|
|
h->context = context;
|
|
|
|
h->tsi = tsi;
|
|
|
|
h->seq = seq_owner;
|
|
|
|
|
|
|
|
/* start of overallocated area */
|
|
|
|
p = (char *)&h[1];
|
|
|
|
|
|
|
|
/* set the handle pointer in the user data struct */
|
|
|
|
v = (void **)(p + ssi->handle_offset);
|
|
|
|
*v = h;
|
|
|
|
|
|
|
|
/* set the opaque user data in the user data struct */
|
|
|
|
v = (void **)(p + ssi->opaque_user_data_offset);
|
|
|
|
*v = opaque_user_data;
|
|
|
|
|
|
|
|
p += ssi->user_alloc;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (pol->plugins[0]) {
|
|
|
|
h->nauthi = p;
|
|
|
|
p += pol->plugins[0]->alloc;
|
|
|
|
}
|
|
|
|
if (pol->plugins[1]) {
|
|
|
|
h->sauthi = p;
|
|
|
|
p += pol->plugins[1]->alloc;
|
|
|
|
}
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (pol->metadata_count) {
|
|
|
|
h->metadata = (lws_ss_metadata_t *)p;
|
|
|
|
p += pol->metadata_count * sizeof(lws_ss_metadata_t);
|
|
|
|
|
|
|
|
lwsl_info("%s: %s metadata count %d\n", __func__,
|
|
|
|
pol->streamtype, pol->metadata_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
smd = pol->metadata;
|
|
|
|
for (n = 0; n < pol->metadata_count; n++) {
|
|
|
|
h->metadata[n].name = smd->name;
|
|
|
|
if (n + 1 == pol->metadata_count)
|
|
|
|
h->metadata[n].next = NULL;
|
|
|
|
else
|
|
|
|
h->metadata[n].next = &h->metadata[n + 1];
|
|
|
|
smd = smd->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(p, ssi->streamtype, strlen(ssi->streamtype) + 1);
|
2020-07-27 10:03:12 +01:00
|
|
|
/* don't mark accepted ss as being the server */
|
|
|
|
if (ssi->flags & LWSSSINFLAGS_SERVER)
|
|
|
|
h->info.flags &= ~LWSSSINFLAGS_SERVER;
|
2020-02-29 12:37:24 +00:00
|
|
|
h->info.streamtype = p;
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
lws_dll2_add_head(&h->list, &pt->ss_owner);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
if (ppss)
|
|
|
|
*ppss = h;
|
|
|
|
|
|
|
|
if (ppayload_fmt)
|
|
|
|
*ppayload_fmt = pol->payload_fmt;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (ssi->flags & LWSSSINFLAGS_SERVER)
|
|
|
|
/*
|
|
|
|
* return early for accepted connection flow
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
/*
|
|
|
|
* For a local Secure Streams connection
|
|
|
|
*/
|
|
|
|
if (!(ssi->flags & LWSSSINFLAGS_PROXIED) &&
|
|
|
|
pol == &pol_smd) {
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-24 20:15:46 +01:00
|
|
|
/*
|
|
|
|
* So he has asked to be wired up to SMD over a SS link.
|
|
|
|
* Register him as an smd participant in his own right.
|
|
|
|
*
|
|
|
|
* Just for this case, ssi->manual_initial_tx_credit is used
|
|
|
|
* to set the rx class mask (this is part of the SS serialization
|
|
|
|
* format as well)
|
|
|
|
*/
|
|
|
|
h->u.smd.smd_peer = lws_smd_register(context, h, 0,
|
|
|
|
ssi->manual_initial_tx_credit,
|
|
|
|
lws_smd_ss_cb);
|
|
|
|
if (!h->u.smd.smd_peer)
|
|
|
|
goto late_bail;
|
|
|
|
lwsl_info("%s: registered SS SMD\n", __func__);
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTING);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2020-06-24 20:15:46 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (h->policy->flags & LWSSSPOLF_SERVER) {
|
|
|
|
const struct lws_protocols *pprot[3], **ppp = &pprot[0];
|
|
|
|
struct lws_context_creation_info i;
|
|
|
|
struct lws_vhost *vho;
|
|
|
|
|
|
|
|
lwsl_info("%s: creating server\n", __func__);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
2020-07-27 10:03:12 +01:00
|
|
|
* This streamtype represents a server, we're being asked to
|
|
|
|
* instantiate a corresponding vhost for it
|
|
|
|
*/
|
|
|
|
|
|
|
|
memset(&i, 0, sizeof i);
|
|
|
|
|
|
|
|
i.iface = h->policy->endpoint;
|
|
|
|
i.vhost_name = h->policy->streamtype;
|
|
|
|
i.port = h->policy->port;
|
|
|
|
|
2020-11-06 20:47:20 +00:00
|
|
|
if (i.iface[0] == '+') {
|
|
|
|
i.iface++;
|
|
|
|
i.options |= LWS_SERVER_OPTION_UNIX_SOCK;
|
|
|
|
}
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!ss_pcols[h->policy->protocol]) {
|
|
|
|
lwsl_err("%s: unsupp protocol", __func__);
|
|
|
|
goto late_bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ppp++ = ss_pcols[h->policy->protocol]->protocol;
|
|
|
|
#if defined(LWS_ROLE_WS)
|
|
|
|
if (h->policy->u.http.u.ws.subprotocol)
|
|
|
|
/*
|
|
|
|
* He names a ws subprotocol, ie, we want to support
|
|
|
|
* ss-ws protocol in this vhost
|
|
|
|
*/
|
|
|
|
*ppp++ = &protocol_secstream_ws;
|
|
|
|
#endif
|
|
|
|
*ppp = NULL;
|
|
|
|
i.pprotocols = pprot;
|
|
|
|
|
2020-08-20 07:03:02 +01:00
|
|
|
#if defined(LWS_WITH_TLS)
|
2020-07-27 10:03:12 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_TLS) {
|
|
|
|
i.options |= LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
|
|
|
|
i.server_ssl_cert_mem =
|
|
|
|
h->policy->trust.server.cert->ca_der;
|
|
|
|
i.server_ssl_cert_mem_len = (unsigned int)
|
|
|
|
h->policy->trust.server.cert->ca_der_len;
|
|
|
|
i.server_ssl_private_key_mem =
|
|
|
|
h->policy->trust.server.key->ca_der;
|
|
|
|
i.server_ssl_private_key_mem_len = (unsigned int)
|
|
|
|
h->policy->trust.server.key->ca_der_len;
|
|
|
|
}
|
2020-08-20 07:03:02 +01:00
|
|
|
#endif
|
2020-07-27 10:03:12 +01:00
|
|
|
|
|
|
|
vho = lws_create_vhost(context, &i);
|
|
|
|
if (!vho) {
|
|
|
|
lwsl_err("%s: failed to create vh", __func__);
|
|
|
|
goto late_bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark this vhost as having to apply ss server semantics to
|
|
|
|
* any incoming accepted connection
|
2020-02-29 12:37:24 +00:00
|
|
|
*/
|
2020-07-27 10:03:12 +01:00
|
|
|
vho->ss_handle = h;
|
|
|
|
|
|
|
|
lwsl_notice("%s: created server %s\n", __func__,
|
|
|
|
h->policy->streamtype);
|
|
|
|
|
|
|
|
return 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For static policy case, dynamically ref / instantiate the related
|
|
|
|
* trust store and vhost. We do it by logical ss rather than connection
|
|
|
|
* because we don't want to expose the latency of creating the x.509
|
|
|
|
* trust store at the first connection.
|
|
|
|
*
|
|
|
|
* But it might be given the tls linkup takes time anyway, it can move
|
|
|
|
* to the ss connect code instead.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!lws_ss_policy_ref_trust_store(context, h->policy, 1 /* do the ref */)) {
|
|
|
|
lwsl_err("%s: unable to get vhost / trust store\n", __func__);
|
|
|
|
goto late_bail;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-06-01 07:33:37 +01:00
|
|
|
if (lws_ss_event_helper(h, LWSSSCS_CREATING)) {
|
2020-06-24 20:15:46 +01:00
|
|
|
late_bail:
|
2020-06-01 07:33:37 +01:00
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
lws_dll2_remove(&h->list);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
lws_free(h);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
if (!(ssi->flags & LWSSSINFLAGS_REGISTER_SINK) &&
|
|
|
|
((h->policy->flags & LWSSSPOLF_NAILED_UP)
|
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
|| ((h->policy == &pol_smd) //&&
|
|
|
|
//(ssi->flags & LWSSSINFLAGS_PROXIED))
|
|
|
|
)
|
|
|
|
#endif
|
|
|
|
))
|
2020-07-04 21:16:49 +01:00
|
|
|
if (_lws_ss_client_connect(h, 0)) {
|
|
|
|
if (lws_ss_backoff(h))
|
|
|
|
/* has been destroyed */
|
|
|
|
return 1;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-15 04:40:30 +00:00
|
|
|
void *
|
|
|
|
lws_ss_to_user_object(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
return (void *)&h[1];
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
void
|
|
|
|
lws_ss_destroy(lws_ss_handle_t **ppss)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
|
|
|
lws_ss_handle_t *h = *ppss;
|
|
|
|
lws_ss_metadata_t *pmd;
|
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return;
|
|
|
|
|
2020-08-26 14:03:57 +01:00
|
|
|
if (h->destroying) {
|
|
|
|
lwsl_info("%s: reentrant destroy\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
h->destroying = 1;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->wsi) {
|
|
|
|
/*
|
|
|
|
* Don't let the wsi point to us any more,
|
|
|
|
* we (the ss object bound to the wsi) are going away now
|
|
|
|
*/
|
2020-06-01 07:33:37 +01:00
|
|
|
lws_set_opaque_user_data(h->wsi, NULL);
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_set_timeout(h->wsi, 1, LWS_TO_KILL_SYNC);
|
|
|
|
}
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
/*
|
|
|
|
* if we bound an smd registration to the SS, unregister it
|
|
|
|
*/
|
|
|
|
|
2020-08-16 05:35:56 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
2020-06-24 20:15:46 +01:00
|
|
|
if (h->policy == &pol_smd && h->u.smd.smd_peer) {
|
|
|
|
lws_smd_unregister(h->u.smd.smd_peer);
|
|
|
|
h->u.smd.smd_peer = NULL;
|
|
|
|
}
|
2020-08-16 05:35:56 +01:00
|
|
|
#endif
|
2020-06-24 20:15:46 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
pt = &h->context->pt[h->tsi];
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
*ppss = NULL;
|
|
|
|
lws_dll2_remove(&h->list);
|
2020-11-10 11:27:28 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
lws_dll2_remove(&h->cli_list);
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_dll2_remove(&h->to_list);
|
2020-06-30 16:42:37 +01:00
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
(void)lws_ss_event_helper(h, LWSSSCS_DESTROYING);
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
/* in proxy case, metadata value on heap may need cleaning up */
|
|
|
|
|
|
|
|
pmd = h->metadata;
|
|
|
|
while (pmd) {
|
|
|
|
lwsl_info("%s: pmd %p\n", __func__, pmd);
|
|
|
|
if (pmd->value_on_lws_heap)
|
2020-10-11 07:29:47 +01:00
|
|
|
lws_free_set_NULL(pmd->value__may_own_heap);
|
2020-02-29 12:37:24 +00:00
|
|
|
pmd = pmd->next;
|
|
|
|
}
|
|
|
|
|
2020-05-28 12:48:17 +01:00
|
|
|
lws_sul_cancel(&h->sul);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For static policy case, dynamically ref / instantiate the related
|
|
|
|
* trust store and vhost. We do it by logical ss rather than connection
|
|
|
|
* because we don't want to expose the latency of creating the x.509
|
|
|
|
* trust store at the first connection.
|
|
|
|
*
|
|
|
|
* But it might be given the tls linkup takes time anyway, it can move
|
|
|
|
* to the ss connect code instead.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
lws_ss_policy_unref_trust_store(h->context, h->policy);
|
|
|
|
#endif
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (h->policy->flags & LWSSSPOLF_SERVER) {
|
|
|
|
struct lws_vhost *v = lws_get_vhost_by_name(h->context,
|
|
|
|
h->policy->streamtype);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For server, the policy describes a vhost that implements the
|
|
|
|
* server, when we take down the ss, we take down the related
|
|
|
|
* vhost (if it got that far)
|
|
|
|
*/
|
|
|
|
if (v)
|
|
|
|
lws_vhost_destroy(v);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-07-15 10:25:18 +01:00
|
|
|
/* confirm no sul left scheduled in handle or user allocation object */
|
|
|
|
lws_sul_debug_zombies(h->context, h, sizeof(*h) + h->info.user_alloc,
|
|
|
|
__func__);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_free_set_NULL(h);
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:12:15 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2020-07-27 10:03:12 +01:00
|
|
|
void
|
|
|
|
lws_ss_server_ack(struct lws_ss_handle *h, int nack)
|
|
|
|
{
|
|
|
|
h->txn_resp = nack;
|
|
|
|
h->txn_resp_set = 1;
|
|
|
|
}
|
2020-11-10 11:27:28 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_server_foreach_client(struct lws_ss_handle *h, lws_sssfec_cb cb,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, h->src_list.head) {
|
|
|
|
struct lws_ss_handle *h =
|
|
|
|
lws_container_of(d, struct lws_ss_handle, cli_list);
|
|
|
|
|
|
|
|
cb(h, arg);
|
|
|
|
|
|
|
|
} lws_end_foreach_dll_safe(d, d1);
|
|
|
|
}
|
2020-10-29 17:12:15 +00:00
|
|
|
#endif
|
2020-07-27 10:03:12 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_request_tx(lws_ss_handle_t *h)
|
|
|
|
{
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
// lwsl_notice("%s: h %p, wsi %p\n", __func__, h, h->wsi);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (h->wsi) {
|
|
|
|
lws_callback_on_writable(h->wsi);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-11 10:24:31 +01:00
|
|
|
if (h->policy->flags & LWSSSPOLF_SERVER)
|
|
|
|
return LWSSSSRET_OK;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
/*
|
|
|
|
* there's currently no wsi / connection associated with the ss handle
|
|
|
|
*/
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
if (h->policy == &pol_smd) {
|
|
|
|
/*
|
|
|
|
* He's an _lws_smd... and no wsi... since we're just going
|
|
|
|
* to queue it, we could call his tx() right here, but rather
|
|
|
|
* than surprise him let's set a sul to do it next time around
|
|
|
|
* the event loop
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_sul_schedule(h->context, 0, &h->u.smd.sul_write,
|
|
|
|
lws_ss_smd_tx_cb, 1);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-06-24 20:15:46 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->seqstate != SSSEQ_IDLE &&
|
|
|
|
h->seqstate != SSSEQ_DO_RETRY)
|
2020-08-26 11:05:41 +01:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
h->seqstate = SSSEQ_TRY_CONNECT;
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_POLL);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-26 11:28:25 +01:00
|
|
|
/*
|
|
|
|
* Retries operate via lws_ss_request_tx(), explicitly ask for a
|
|
|
|
* reconnection to clear the retry limit
|
|
|
|
*/
|
2020-08-26 11:05:41 +01:00
|
|
|
r = _lws_ss_client_connect(h, 1);
|
|
|
|
if (r == LWSSSSRET_DESTROY_ME)
|
|
|
|
return r;
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
if (r)
|
|
|
|
return lws_ss_backoff(h);
|
|
|
|
|
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_request_tx_len(lws_ss_handle_t *h, unsigned long len)
|
|
|
|
{
|
2020-08-03 15:33:09 +01:00
|
|
|
if (h->wsi &&
|
|
|
|
(h->policy->protocol == LWSSSP_H1 ||
|
|
|
|
h->policy->protocol == LWSSSP_H2 ||
|
|
|
|
h->policy->protocol == LWSSSP_WS))
|
2020-02-29 12:37:24 +00:00
|
|
|
h->wsi->http.writeable_len = len;
|
|
|
|
else
|
|
|
|
h->writeable_len = len;
|
2020-08-03 15:33:09 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
return lws_ss_request_tx(h);
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* private helpers
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* used on context destroy when iterating listed lws_ss on a pt */
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_destroy_dll(struct lws_dll2 *d, void *user)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(d, lws_ss_handle_t, list);
|
|
|
|
|
|
|
|
lws_ss_destroy(&h);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lws_sequencer *
|
|
|
|
lws_ss_get_sequencer(lws_ss_handle_t *h)
|
|
|
|
{
|
|
|
|
return h->seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lws_context *
|
|
|
|
lws_ss_get_context(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
return h->context;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
lws_ss_rideshare(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
if (!h->rideshare)
|
|
|
|
return h->policy->streamtype;
|
|
|
|
|
|
|
|
return h->rideshare->streamtype;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_add_peer_tx_credit(struct lws_ss_handle *h, int32_t bump)
|
|
|
|
{
|
|
|
|
const struct ss_pcols *ssp;
|
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
|
|
|
|
if (h->wsi && ssp && ssp->tx_cr_add)
|
|
|
|
return ssp->tx_cr_add(h, bump);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_get_est_peer_tx_credit(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
const struct ss_pcols *ssp;
|
|
|
|
|
|
|
|
ssp = ss_pcols[(int)h->policy->protocol];
|
|
|
|
|
|
|
|
if (h->wsi && ssp && ssp->tx_cr_add)
|
|
|
|
return ssp->tx_cr_est(h);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-06-30 16:42:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* protocol-independent handler for ss timeout
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_ss_to_cb(lws_sorted_usec_list_t *sul)
|
|
|
|
{
|
|
|
|
lws_ss_handle_t *h = lws_container_of(sul, lws_ss_handle_t, sul_timeout);
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-30 16:42:37 +01:00
|
|
|
|
|
|
|
lwsl_info("%s: ss %p timeout fired\n", __func__, h);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_TIMEOUT);
|
2020-11-25 19:45:33 +00:00
|
|
|
if (r != LWSSSSRET_DISCONNECT_ME && r != LWSSSSRET_DESTROY_ME)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (h->wsi)
|
|
|
|
lws_set_timeout(h->wsi, 1, LWS_TO_KILL_ASYNC);
|
|
|
|
|
|
|
|
_lws_ss_handle_state_ret(r, NULL, &h);
|
2020-06-30 16:42:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_start_timeout(struct lws_ss_handle *h, unsigned int timeout_ms)
|
|
|
|
{
|
|
|
|
if (!timeout_ms && !h->policy->timeout_ms)
|
|
|
|
return;
|
|
|
|
|
|
|
|
lws_sul_schedule(h->context, 0, &h->sul_timeout, lws_ss_to_cb,
|
|
|
|
(timeout_ms ? timeout_ms : h->policy->timeout_ms) *
|
|
|
|
LWS_US_PER_MS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_cancel_timeout(struct lws_ss_handle *h)
|
|
|
|
{
|
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
|
|
|
}
|
2020-07-27 09:52:30 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_change_handlers(struct lws_ss_handle *h,
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t (*rx)(void *userobj, const uint8_t *buf,
|
|
|
|
size_t len, int flags),
|
|
|
|
lws_ss_state_return_t (*tx)(void *userobj, lws_ss_tx_ordinal_t ord,
|
|
|
|
uint8_t *buf, size_t *len, int *flags),
|
|
|
|
lws_ss_state_return_t (*state)(void *userobj, void *h_src /* ss handle type */,
|
|
|
|
lws_ss_constate_t state,
|
|
|
|
lws_ss_tx_ordinal_t ack))
|
2020-07-27 09:52:30 +01:00
|
|
|
{
|
|
|
|
if (rx)
|
|
|
|
h->info.rx = rx;
|
|
|
|
if (tx)
|
|
|
|
h->info.tx = tx;
|
|
|
|
if (state)
|
|
|
|
h->info.state = state;
|
|
|
|
}
|