2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2021-05-27 17:34:53 -07:00
|
|
|
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
|
2020-02-29 12:37:24 +00:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* When the user code is in a different process, a non-tls unix domain socket
|
|
|
|
* proxy is used to asynchronusly transfer buffers in each direction via the
|
|
|
|
* network stack, without explicit IPC
|
|
|
|
*
|
|
|
|
* user_process{ [user code] | shim | socket-}------ lws_process{ lws }
|
|
|
|
*
|
|
|
|
* Lws exposes a listening unix domain socket in this case, the user processes
|
|
|
|
* connect to it and pass just info.streamtype in an initial tx packet. All
|
|
|
|
* packets are prepended by a 1-byte type field when used in this mode. See
|
|
|
|
* lws-secure-streams.h for documentation and definitions.
|
|
|
|
*
|
|
|
|
* Proxying in either direction can face the situation it cannot send the onward
|
|
|
|
* packet immediately and is subject to separating the write request from the
|
|
|
|
* write action. To make the best use of memory, a single preallocated buffer
|
|
|
|
* stashes pending packets in all four directions (c->p, p->c, p->ss, ss->p).
|
|
|
|
* This allows it to adapt to different traffic patterns without wasted areas
|
|
|
|
* dedicated to traffic that isn't coming in a particular application.
|
|
|
|
*
|
|
|
|
* A shim is provided to monitor the process' unix domain socket and regenerate
|
|
|
|
* the secure sockets api there with callbacks happening in the process thread
|
|
|
|
* context.
|
|
|
|
*
|
|
|
|
* This file implements the listening unix domain socket proxy... this code is
|
|
|
|
* only going to run on a Linux-class device with its implications about memory
|
|
|
|
* availability.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <private-lib-core.h>
|
|
|
|
|
|
|
|
struct raw_pss {
|
|
|
|
struct conn *conn;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Proxy - onward secure-stream handler
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct ss_proxy_onward {
|
|
|
|
lws_ss_handle_t *ss;
|
|
|
|
struct conn *conn;
|
|
|
|
} ss_proxy_t;
|
|
|
|
|
2020-12-27 19:34:30 +00:00
|
|
|
void
|
|
|
|
lws_proxy_clean_conn_ss(struct lws *wsi)
|
|
|
|
{
|
|
|
|
#if 0
|
2021-04-21 09:15:58 +01:00
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
|
|
|
|
struct conn *conn = h->conn_if_sspc_onw;
|
2020-12-27 19:34:30 +00:00
|
|
|
|
|
|
|
if (!wsi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (conn && conn->ss)
|
|
|
|
conn->ss->wsi = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward)
|
|
|
|
{
|
|
|
|
ss_proxy_t *m = (ss_proxy_t *)&h_onward[1];
|
|
|
|
|
|
|
|
if (m->conn->wsi) /* if possible, request client conn write */
|
|
|
|
lws_callback_on_writable(m->conn->wsi);
|
|
|
|
}
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
int
|
|
|
|
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
|
2020-12-25 05:54:19 +00:00
|
|
|
{
|
|
|
|
struct conn *conn = (struct conn *)parconn;
|
2020-12-28 10:06:16 +00:00
|
|
|
struct lws_context_per_thread *pt;
|
2020-12-25 05:54:19 +00:00
|
|
|
|
|
|
|
if (!conn || !conn->wsi || !conn->ss)
|
2020-12-28 10:06:16 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
pt = &conn->wsi->a.context->pt[(int)conn->wsi->tsi];
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
if (lws_fi(&conn->ss->fic, "ssproxy_dsh_create_oom"))
|
|
|
|
return -1;
|
2020-12-28 10:06:16 +00:00
|
|
|
conn->dsh = lws_dsh_create(&pt->ss_dsh_owner, dsh_size, 2);
|
|
|
|
if (!conn->dsh)
|
|
|
|
return -1;
|
2020-12-25 05:54:19 +00:00
|
|
|
|
|
|
|
__lws_lc_tag_append(&conn->wsi->lc, lws_ss_tag(conn->ss));
|
2020-12-28 10:06:16 +00:00
|
|
|
|
|
|
|
return 0;
|
2020-12-25 05:54:19 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
/* Onward secure streams payload interface */
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
static lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
ss_proxy_onward_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
|
|
|
|
{
|
|
|
|
ss_proxy_t *m = (ss_proxy_t *)userobj;
|
|
|
|
const char *rsp = NULL;
|
|
|
|
int n;
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
// lwsl_notice("%s: len %d\n", __func__, (int)len);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* The onward secure stream connection has received something.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (m->ss->rideshare != m->ss->policy && m->ss->rideshare) {
|
|
|
|
rsp = m->ss->rideshare->streamtype;
|
|
|
|
flags |= LWSSS_FLAG_RIDESHARE;
|
|
|
|
}
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
/*
|
|
|
|
* Apply SSS framing around this chunk of RX and stash it in the dsh
|
|
|
|
* in ss -> proxy [ -> client] direction. This can fail...
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (lws_fi(&m->ss->fic, "ssproxy_dsh_rx_queue_oom"))
|
|
|
|
n = 1;
|
|
|
|
else
|
|
|
|
n = lws_ss_serialize_rx_payload(m->conn->dsh, buf, len,
|
|
|
|
flags, rsp);
|
2020-02-29 12:37:24 +00:00
|
|
|
if (n)
|
2021-03-16 13:32:05 +00:00
|
|
|
/*
|
|
|
|
* We couldn't buffer this rx, eg due to OOM, let's escalate it
|
|
|
|
* to be a "loss of connection", which it basically is...
|
|
|
|
*/
|
|
|
|
return LWSSSSRET_DISCONNECT_ME;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2021-02-01 19:11:25 +00:00
|
|
|
/*
|
|
|
|
* Manage rx flow on the SS (onward) side according to our situation
|
|
|
|
* in the dsh holding proxy->client serialized forwarding rx
|
|
|
|
*/
|
|
|
|
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
if (!m->conn->onward_in_flow_control && m->ss->wsi &&
|
|
|
|
m->ss->policy->proxy_buflen_rxflow_on_above &&
|
|
|
|
lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P) >=
|
2021-02-01 19:11:25 +00:00
|
|
|
m->ss->policy->proxy_buflen_rxflow_on_above) {
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
lwsl_info("%s: %s: rxflow disabling rx (%lu / %lu, hwm %lu)\n", __func__,
|
|
|
|
lws_wsi_tag(m->ss->wsi),
|
|
|
|
(unsigned long)lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P),
|
|
|
|
(unsigned long)m->ss->policy->proxy_buflen,
|
|
|
|
(unsigned long)m->ss->policy->proxy_buflen_rxflow_on_above);
|
|
|
|
/*
|
|
|
|
* stop taking in rx once the onward wsi rx is above the
|
|
|
|
* high water mark
|
|
|
|
*/
|
2021-02-01 19:11:25 +00:00
|
|
|
lws_rx_flow_control(m->ss->wsi, 0);
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
m->conn->onward_in_flow_control = 1;
|
2021-02-01 19:11:25 +00:00
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (m->conn->wsi) /* if possible, request client conn write */
|
|
|
|
lws_callback_on_writable(m->conn->wsi);
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we are transmitting buffered payload originally from the client on to the ss
|
|
|
|
*/
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
static lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
ss_proxy_onward_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf,
|
|
|
|
size_t *len, int *flags)
|
|
|
|
{
|
|
|
|
ss_proxy_t *m = (ss_proxy_t *)userobj;
|
|
|
|
void *p;
|
|
|
|
size_t si;
|
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
if (!m->conn->ss || m->conn->state != LPCSPROX_OPERATIONAL) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_notice("%s: ss not ready\n", __func__);
|
|
|
|
*len = 0;
|
|
|
|
|
2021-02-01 19:11:25 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The onward secure stream says that we could send something to it
|
2021-02-01 19:11:25 +00:00
|
|
|
* (by putting it in buf, and setting *len and *flags)... dredge the
|
|
|
|
* next thing out of the dsh
|
2020-02-29 12:37:24 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (lws_ss_deserialize_tx_payload(m->conn->dsh, m->ss->wsi,
|
|
|
|
ord, buf, len, flags))
|
2021-02-01 19:11:25 +00:00
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2021-02-01 19:11:25 +00:00
|
|
|
/* ... there's more we want to send? */
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!lws_dsh_get_head(m->conn->dsh, KIND_C_TO_P, (void **)&p, &si))
|
2021-05-27 17:34:53 -07:00
|
|
|
_lws_ss_request_tx(m->conn->ss);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (!*len && !*flags)
|
2021-02-01 19:11:25 +00:00
|
|
|
/* we don't actually want to send anything */
|
|
|
|
return LWSSSSRET_TX_DONT_SEND;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
lwsl_info("%s: onward tx %d fl 0x%x\n", __func__, (int)*len, *flags);
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
{
|
|
|
|
int ff = open("/tmp/z", O_RDWR | O_CREAT | O_APPEND, 0666);
|
|
|
|
if (ff == -1)
|
|
|
|
lwsl_err("%s: errno %d\n", __func__, errno);
|
|
|
|
write(ff, buf, *len);
|
|
|
|
close(ff);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-02-01 19:11:25 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
static lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
ss_proxy_onward_state(void *userobj, void *sh,
|
|
|
|
lws_ss_constate_t state, lws_ss_tx_ordinal_t ack)
|
|
|
|
{
|
|
|
|
ss_proxy_t *m = (ss_proxy_t *)userobj;
|
2020-12-28 10:06:16 +00:00
|
|
|
size_t dsh_size;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case LWSSSCS_CREATING:
|
2020-12-28 10:06:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* conn is private to -process.c, call thru to a) adjust
|
|
|
|
* the accepted incoming proxy link wsi tag name to be
|
|
|
|
* appended with the onward ss tag information now we
|
|
|
|
* have it, and b) allocate the dsh buffer now we
|
|
|
|
* can find out the policy about it for the streamtype.
|
|
|
|
*/
|
|
|
|
|
|
|
|
dsh_size = m->ss->policy->proxy_buflen ?
|
|
|
|
m->ss->policy->proxy_buflen : 32768;
|
|
|
|
|
|
|
|
lwsl_notice("%s: %s: initializing dsh max len %lu\n",
|
|
|
|
__func__, lws_ss_tag(m->ss),
|
|
|
|
(unsigned long)dsh_size);
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
/* this includes ssproxy_dsh_create_oom fault generation */
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
if (__lws_ss_proxy_bind_ss_to_conn_wsi(m->conn, dsh_size)) {
|
|
|
|
|
|
|
|
/* failed to allocate the dsh */
|
|
|
|
|
|
|
|
lwsl_notice("%s: dsh init failed\n", __func__);
|
|
|
|
|
|
|
|
return LWSSSSRET_DESTROY_ME;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LWSSSCS_DESTROYING:
|
|
|
|
if (!m->conn)
|
|
|
|
break;
|
|
|
|
if (!m->conn->wsi) {
|
|
|
|
/*
|
|
|
|
* Our onward secure stream is closing and our client
|
|
|
|
* connection has already gone away... destroy the conn.
|
|
|
|
*/
|
|
|
|
lwsl_info("%s: Destroying conn\n", __func__);
|
|
|
|
lws_dsh_destroy(&m->conn->dsh);
|
|
|
|
free(m->conn);
|
|
|
|
m->conn = NULL;
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
lwsl_info("%s: ss DESTROYING, wsi up\n", __func__);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!m->conn) {
|
|
|
|
lwsl_warn("%s: dropping state due to conn not up\n", __func__);
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
if (lws_ss_serialize_state(m->conn->wsi, m->conn->dsh, state, ack))
|
|
|
|
/*
|
|
|
|
* Failed to alloc state packet that we want to send in dsh,
|
|
|
|
* we will lose coherence and have to disconnect the link
|
|
|
|
*/
|
|
|
|
return LWSSSSRET_DISCONNECT_ME;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
if (m->conn->wsi) /* if possible, request client conn write */
|
|
|
|
lws_callback_on_writable(m->conn->wsi);
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
return LWSSSSRET_OK;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ss_proxy_onward_txcr(void *userobj, int bump)
|
|
|
|
{
|
|
|
|
ss_proxy_t *m = (ss_proxy_t *)userobj;
|
|
|
|
|
|
|
|
if (!m->conn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
lws_ss_serialize_txcr(m->conn->dsh, bump);
|
|
|
|
|
|
|
|
if (m->conn->wsi) /* if possible, request client conn write */
|
|
|
|
lws_callback_on_writable(m->conn->wsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-03-16 13:32:05 +00:00
|
|
|
* Client <-> Proxy connection, usually on Unix Domain Socket
|
2020-02-29 12:37:24 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
|
|
|
|
void *user, void *in, size_t len)
|
|
|
|
{
|
|
|
|
struct raw_pss *pss = (struct raw_pss *)user;
|
|
|
|
const lws_ss_policy_t *rsp;
|
|
|
|
struct conn *conn = NULL;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
lws_ss_metadata_t *md;
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_info_t ssi;
|
|
|
|
const uint8_t *cp;
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
char s[512];
|
2020-02-29 12:37:24 +00:00
|
|
|
uint8_t *p;
|
|
|
|
size_t si;
|
|
|
|
char pay;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (pss)
|
|
|
|
conn = pss->conn;
|
|
|
|
|
|
|
|
switch (reason) {
|
|
|
|
case LWS_CALLBACK_PROTOCOL_INIT:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* callbacks related to raw socket descriptor "accepted side" */
|
|
|
|
|
|
|
|
case LWS_CALLBACK_RAW_ADOPT:
|
|
|
|
lwsl_info("LWS_CALLBACK_RAW_ADOPT\n");
|
|
|
|
if (!pss)
|
|
|
|
return -1;
|
2021-03-16 13:32:05 +00:00
|
|
|
|
|
|
|
if (lws_fi(&wsi->fic, "ssproxy_client_adopt_oom"))
|
|
|
|
pss->conn = NULL;
|
|
|
|
else
|
|
|
|
pss->conn = malloc(sizeof(struct conn));
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!pss->conn)
|
|
|
|
return -1;
|
2021-03-16 13:32:05 +00:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
memset(pss->conn, 0, sizeof(*pss->conn));
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
/* dsh is allocated when the onward ss is done */
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
pss->conn->wsi = wsi;
|
2020-12-27 19:34:30 +00:00
|
|
|
wsi->bound_ss_proxy_conn = 1; /* opaque is conn */
|
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
pss->conn->state = LPCSPROX_WAIT_INITIAL_TX;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Client is expected to follow the unix domain socket
|
|
|
|
* acceptance up rapidly with an initial tx containing the
|
|
|
|
* streamtype name. We can't create the stream until then.
|
|
|
|
*/
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_CLIENT_HS_SEND, 3);
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_RAW_CLOSE:
|
|
|
|
lwsl_info("LWS_CALLBACK_RAW_CLOSE:\n");
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
break;
|
|
|
|
|
2020-06-30 16:42:37 +01:00
|
|
|
/*
|
|
|
|
* the client unix domain socket connection (wsi / conn->wsi)
|
|
|
|
* has closed... eg, client has exited or otherwise has
|
|
|
|
* definitively finished with the proxying and onward connection
|
|
|
|
*
|
|
|
|
* But right now, the SS and possibly the SS onward wsi are
|
|
|
|
* still live...
|
|
|
|
*/
|
|
|
|
|
2020-12-27 16:05:48 +00:00
|
|
|
assert(conn->wsi == wsi);
|
|
|
|
conn->wsi = NULL;
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
lwsl_notice("%s: cli->prox link %s closing\n", __func__,
|
|
|
|
lws_wsi_tag(wsi));
|
2020-12-27 16:05:48 +00:00
|
|
|
|
|
|
|
/* sever relationship with conn */
|
|
|
|
lws_set_opaque_user_data(wsi, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The current wsi is decoupled from the pss / conn and
|
2021-03-02 16:34:33 +00:00
|
|
|
* the conn no longer has a pointer on it.
|
|
|
|
*
|
|
|
|
* If there's an outgoing, proxied SS conn on our behalf, we
|
|
|
|
* have to destroy those
|
2020-12-27 16:05:48 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (conn->ss) {
|
2020-08-03 16:04:03 +01:00
|
|
|
struct lws *cw = conn->ss->wsi;
|
|
|
|
/*
|
2020-12-27 16:05:48 +00:00
|
|
|
* conn->ss is the onward connection SS
|
2020-08-03 16:04:03 +01:00
|
|
|
*/
|
2020-12-27 16:05:48 +00:00
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: destroying %s, wsi %s\n",
|
|
|
|
__func__, lws_ss_tag(conn->ss),
|
|
|
|
lws_wsi_tag(conn->ss->wsi));
|
2021-03-02 16:34:33 +00:00
|
|
|
|
|
|
|
/* sever conn relationship with ss about to be deleted */
|
|
|
|
|
2020-12-27 16:05:48 +00:00
|
|
|
conn->ss->wsi = NULL;
|
|
|
|
|
|
|
|
if (cw && wsi != cw) {
|
|
|
|
|
|
|
|
/* disconnect onward SS from its wsi */
|
|
|
|
|
|
|
|
lws_set_opaque_user_data(cw, NULL);
|
|
|
|
|
2020-08-03 16:04:03 +01:00
|
|
|
/*
|
|
|
|
* The wsi doing the onward connection can no
|
|
|
|
* longer relate to the conn... otherwise when
|
|
|
|
* he gets callbacks he wants to bind to
|
|
|
|
* the ss we are about to delete
|
|
|
|
*/
|
|
|
|
lws_wsi_close(cw, LWS_TO_KILL_ASYNC);
|
2020-12-27 16:05:48 +00:00
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
lws_ss_destroy(&conn->ss);
|
2020-12-27 16:05:48 +00:00
|
|
|
/*
|
|
|
|
* Conn may have gone, at ss destroy handler in
|
|
|
|
* ssi.state for proxied ss
|
|
|
|
*/
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
if (conn->state == LPCSPROX_DESTROYED || !conn->ss) {
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* There's no onward secure stream and our client
|
|
|
|
* connection is closing. Destroy the conn.
|
|
|
|
*/
|
|
|
|
lws_dsh_destroy(&conn->dsh);
|
|
|
|
free(conn);
|
|
|
|
pss->conn = NULL;
|
|
|
|
} else
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_debug("%s: CLOSE; %s\n", __func__, lws_ss_tag(conn->ss));
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_RAW_RX:
|
2020-08-16 05:27:40 +01:00
|
|
|
/*
|
|
|
|
* ie, the proxy is receiving something from a client
|
|
|
|
*/
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_info("%s: RX: rx %d\n", __func__, (int)len);
|
|
|
|
|
|
|
|
if (!conn || !conn->wsi) {
|
|
|
|
lwsl_err("%s: rx with bad conn state\n", __func__);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// lwsl_hexdump_info(in, len);
|
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
if (conn->state == LPCSPROX_WAIT_INITIAL_TX) {
|
2020-02-29 12:37:24 +00:00
|
|
|
memset(&ssi, 0, sizeof(ssi));
|
|
|
|
ssi.user_alloc = sizeof(ss_proxy_t);
|
|
|
|
ssi.handle_offset = offsetof(ss_proxy_t, ss);
|
|
|
|
ssi.opaque_user_data_offset =
|
|
|
|
offsetof(ss_proxy_t, conn);
|
|
|
|
ssi.rx = ss_proxy_onward_rx;
|
|
|
|
ssi.tx = ss_proxy_onward_tx;
|
|
|
|
}
|
2020-07-10 19:00:24 +01:00
|
|
|
ssi.state = ss_proxy_onward_state;
|
2020-08-16 05:27:40 +01:00
|
|
|
ssi.flags = 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-08-16 05:27:40 +01:00
|
|
|
n = lws_ss_deserialize_parse(&conn->parser,
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_get_context(wsi), conn->dsh, in, len,
|
2020-08-16 05:27:40 +01:00
|
|
|
&conn->state, conn, &conn->ss, &ssi, 0);
|
|
|
|
switch (n) {
|
|
|
|
case LWSSSSRET_OK:
|
|
|
|
break;
|
|
|
|
case LWSSSSRET_DISCONNECT_ME:
|
|
|
|
return -1;
|
|
|
|
case LWSSSSRET_DESTROY_ME:
|
|
|
|
if (conn->ss)
|
|
|
|
lws_ss_destroy(&conn->ss);
|
2020-02-29 12:37:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
if (conn->state == LPCSPROX_REPORTING_FAIL ||
|
|
|
|
conn->state == LPCSPROX_REPORTING_OK)
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_callback_on_writable(conn->wsi);
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_RAW_WRITEABLE:
|
2021-02-19 11:31:06 +00:00
|
|
|
|
2021-02-20 06:13:49 +00:00
|
|
|
lwsl_debug("%s: %s: LWS_CALLBACK_RAW_WRITEABLE, state 0x%x\n",
|
|
|
|
__func__, lws_wsi_tag(wsi), lwsi_state(wsi));
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can transmit something back to the client from the dsh
|
|
|
|
* of stuff we received on its behalf from the ss
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!conn || !conn->wsi)
|
|
|
|
break;
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
pay = 0;
|
2020-12-31 15:08:48 +00:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
s[3] = 0;
|
|
|
|
cp = (const uint8_t *)s;
|
|
|
|
switch (conn->state) {
|
2020-08-03 15:50:37 +01:00
|
|
|
case LPCSPROX_REPORTING_FAIL:
|
2020-02-29 12:37:24 +00:00
|
|
|
s[3] = 1;
|
|
|
|
/* fallthru */
|
2020-08-03 15:50:37 +01:00
|
|
|
case LPCSPROX_REPORTING_OK:
|
2020-02-29 12:37:24 +00:00
|
|
|
s[0] = LWSSS_SER_RXPRE_CREATE_RESULT;
|
|
|
|
s[1] = 0;
|
|
|
|
s[2] = 1;
|
|
|
|
|
2020-12-31 15:08:48 +00:00
|
|
|
n = 8;
|
|
|
|
|
2021-03-14 12:13:28 +00:00
|
|
|
lws_ser_wu32be((uint8_t *)&s[4], conn->ss &&
|
|
|
|
conn->ss->policy ?
|
|
|
|
conn->ss->policy->client_buflen : 0);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's rideshare sequencing, it's added after the
|
|
|
|
* first 4 bytes or the create result, comma-separated
|
|
|
|
*/
|
|
|
|
|
2020-06-23 13:19:30 +01:00
|
|
|
if (conn->ss) {
|
|
|
|
rsp = conn->ss->policy;
|
|
|
|
|
|
|
|
while (rsp) {
|
|
|
|
if (n != 4 && n < (int)sizeof(s) - 2)
|
|
|
|
s[n++] = ',';
|
2020-12-12 06:21:40 +00:00
|
|
|
n += lws_snprintf(&s[n], sizeof(s) - (unsigned int)n,
|
2020-06-23 13:19:30 +01:00
|
|
|
"%s", rsp->streamtype);
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
rsp = lws_ss_policy_lookup(wsi->a.context,
|
2020-06-23 13:19:30 +01:00
|
|
|
rsp->rideshare_streamtype);
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
2020-12-12 06:21:40 +00:00
|
|
|
s[2] = (char)(n - 3);
|
2020-08-03 15:50:37 +01:00
|
|
|
conn->state = LPCSPROX_OPERATIONAL;
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_set_timeout(wsi, 0, 0);
|
|
|
|
break;
|
2021-03-16 13:32:05 +00:00
|
|
|
|
2020-08-03 15:50:37 +01:00
|
|
|
case LPCSPROX_OPERATIONAL:
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
|
|
|
/*
|
2021-03-16 13:32:05 +00:00
|
|
|
* returning [onward -> ] proxy]-> client
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
* rx metadata has priority 1
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
md = conn->ss->metadata;
|
|
|
|
while (md) {
|
|
|
|
// lwsl_notice("%s: check %s: %d\n", __func__,
|
|
|
|
// md->name, md->pending_onward);
|
|
|
|
if (md->pending_onward) {
|
|
|
|
size_t naml = strlen(md->name);
|
|
|
|
|
|
|
|
// lwsl_notice("%s: proxy issuing rxmd\n", __func__);
|
|
|
|
|
|
|
|
if (4 + naml + md->length > sizeof(s)) {
|
|
|
|
lwsl_err("%s: rxmdata too big\n",
|
|
|
|
__func__);
|
|
|
|
goto hangup;
|
|
|
|
}
|
|
|
|
md->pending_onward = 0;
|
|
|
|
p = (uint8_t *)s;
|
|
|
|
p[0] = LWSSS_SER_RXPRE_METADATA;
|
2020-12-12 06:21:40 +00:00
|
|
|
lws_ser_wu16be(&p[1], (uint16_t)(1 + naml +
|
|
|
|
md->length));
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
p[3] = (uint8_t)naml;
|
|
|
|
memcpy(&p[4], md->name, naml);
|
|
|
|
p += 4 + naml;
|
2021-03-16 13:32:05 +00:00
|
|
|
memcpy(p, md->value__may_own_heap,
|
|
|
|
md->length);
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
p += md->length;
|
|
|
|
|
|
|
|
n = lws_ptr_diff(p, cp);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
md = md->next;
|
|
|
|
}
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
/*
|
|
|
|
* If we have performance data, render it in JSON
|
|
|
|
* and send that in LWSSS_SER_RXPRE_PERF has
|
|
|
|
* priority 2
|
|
|
|
*/
|
|
|
|
|
2021-08-03 15:25:39 +08:00
|
|
|
#if defined(LWS_WITH_CONMON)
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
if (conn->ss->conmon_json) {
|
|
|
|
unsigned int xlen = conn->ss->conmon_len;
|
|
|
|
|
|
|
|
if (xlen > sizeof(s) - 3)
|
|
|
|
xlen = sizeof(s) - 3;
|
|
|
|
cp = (uint8_t *)s;
|
|
|
|
p = (uint8_t *)s;
|
|
|
|
p[0] = LWSSS_SER_RXPRE_PERF;
|
|
|
|
lws_ser_wu16be(&p[1], (uint16_t)xlen);
|
|
|
|
memcpy(&p[3], conn->ss->conmon_json, xlen);
|
|
|
|
|
|
|
|
lws_free_set_NULL(conn->ss->conmon_json);
|
|
|
|
n = (int)(xlen + 3);
|
|
|
|
|
|
|
|
pay = 0;
|
|
|
|
goto again;
|
|
|
|
}
|
2021-08-03 15:25:39 +08:00
|
|
|
#endif
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
/*
|
|
|
|
* if no fresh rx metadata, just pass through incoming
|
|
|
|
* dsh
|
|
|
|
*/
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
|
|
|
|
(void **)&p, &si))
|
|
|
|
break;
|
2021-02-01 19:11:25 +00:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
cp = p;
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if 0
|
2020-02-29 12:37:24 +00:00
|
|
|
if (cp[0] == LWSSS_SER_RXPRE_RX_PAYLOAD &&
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
wsi->a.context->detailed_latency_cb) {
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we're fulfilling rx that came in on ss
|
|
|
|
* by sending it back out to the client on
|
|
|
|
* the Unix Domain Socket
|
|
|
|
*
|
|
|
|
* + 7 u32 write will compute latency here...
|
|
|
|
* + 11 u32 ust we received from ss
|
|
|
|
*
|
|
|
|
* lws_write will report it and fill in
|
|
|
|
* LAT_DUR_PROXY_CLIENT_REQ_TO_WRITE
|
|
|
|
*/
|
|
|
|
|
|
|
|
us = lws_now_usecs();
|
|
|
|
lws_ser_wu32be(&p[7], us -
|
|
|
|
lws_ser_ru64be(&p[11]));
|
|
|
|
lws_ser_wu64be(&p[11], us);
|
|
|
|
|
|
|
|
wsi->detlat.acc_size =
|
|
|
|
wsi->detlat.req_size = si - 19;
|
|
|
|
/* time proxy held it */
|
|
|
|
wsi->detlat.latencies[
|
|
|
|
LAT_DUR_PROXY_RX_TO_ONWARD_TX] =
|
|
|
|
lws_ser_ru32be(&p[7]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
pay = 1;
|
|
|
|
n = (int)si;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
again:
|
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
if (lws_fi(&wsi->fic, "ssproxy_client_write_fail"))
|
|
|
|
n = -1;
|
|
|
|
else
|
|
|
|
n = lws_write(wsi, (uint8_t *)cp, (unsigned int)n, LWS_WRITE_RAW);
|
2020-02-29 12:37:24 +00:00
|
|
|
if (n < 0) {
|
|
|
|
lwsl_info("%s: WRITEABLE: %d\n", __func__, n);
|
|
|
|
|
|
|
|
goto hangup;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (conn->state) {
|
2020-08-03 15:50:37 +01:00
|
|
|
case LPCSPROX_REPORTING_FAIL:
|
2020-02-29 12:37:24 +00:00
|
|
|
goto hangup;
|
2020-08-03 15:50:37 +01:00
|
|
|
case LPCSPROX_OPERATIONAL:
|
2021-02-19 08:15:10 +00:00
|
|
|
if (!conn)
|
|
|
|
break;
|
2021-02-01 19:11:25 +00:00
|
|
|
if (pay) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_dsh_free((void **)&p);
|
2021-02-01 19:11:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Did we go below the rx flow threshold for
|
|
|
|
* this dsh?
|
|
|
|
*/
|
|
|
|
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
if (conn->onward_in_flow_control &&
|
|
|
|
conn->ss->policy->proxy_buflen_rxflow_on_above &&
|
2021-02-01 19:11:25 +00:00
|
|
|
conn->ss->wsi &&
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
lws_dsh_get_size(conn->dsh, KIND_SS_TO_P) <
|
2021-02-01 19:11:25 +00:00
|
|
|
conn->ss->policy->proxy_buflen_rxflow_off_below) {
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
lwsl_info("%s: %s: rxflow enabling rx (%lu / %lu, lwm %lu)\n", __func__,
|
|
|
|
lws_wsi_tag(conn->ss->wsi),
|
|
|
|
(unsigned long)lws_dsh_get_size(conn->dsh, KIND_SS_TO_P),
|
|
|
|
(unsigned long)conn->ss->policy->proxy_buflen,
|
|
|
|
(unsigned long)conn->ss->policy->proxy_buflen_rxflow_off_below);
|
2021-02-01 19:11:25 +00:00
|
|
|
/*
|
|
|
|
* Resume receiving taking in rx once
|
|
|
|
* below the low threshold
|
|
|
|
*/
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
lws_rx_flow_control(conn->ss->wsi,
|
|
|
|
LWS_RXFLOW_ALLOW);
|
|
|
|
conn->onward_in_flow_control = 0;
|
2021-02-01 19:11:25 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
if (!lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
|
|
|
|
(void **)&p, &si)) {
|
|
|
|
if (!lws_send_pipe_choked(wsi)) {
|
|
|
|
cp = p;
|
|
|
|
pay = 1;
|
|
|
|
n = (int)si;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lws_callback_http_dummy(wsi, reason, user, in, len);
|
|
|
|
|
|
|
|
hangup:
|
|
|
|
/* hang up on him */
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct lws_protocols protocols[] = {
|
|
|
|
{
|
|
|
|
"ssproxy-protocol",
|
|
|
|
callback_ss_proxy,
|
|
|
|
sizeof(struct raw_pss),
|
|
|
|
2048, 2048, NULL, 0
|
|
|
|
},
|
|
|
|
{ NULL, NULL, 0, 0, 0, NULL, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called from create_context()
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_proxy_create(struct lws_context *context, const char *bind, int port)
|
|
|
|
{
|
|
|
|
struct lws_context_creation_info info;
|
|
|
|
|
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
|
|
|
|
info.vhost_name = "ssproxy";
|
2020-12-25 05:54:19 +00:00
|
|
|
info.options = LWS_SERVER_OPTION_ADOPT_APPLY_LISTEN_ACCEPT_CONFIG |
|
|
|
|
LWS_SERVER_OPTION_SS_PROXY;
|
2020-02-29 12:37:24 +00:00
|
|
|
info.port = port;
|
|
|
|
if (!port) {
|
|
|
|
if (!bind)
|
2020-11-27 11:28:09 +00:00
|
|
|
#if defined(__linux__)
|
2020-02-29 12:37:24 +00:00
|
|
|
bind = "@proxy.ss.lws";
|
2020-11-27 11:28:09 +00:00
|
|
|
#else
|
|
|
|
bind = "/tmp/proxy.ss.lws";
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
info.options |= LWS_SERVER_OPTION_UNIX_SOCK;
|
|
|
|
}
|
|
|
|
info.iface = bind;
|
2020-11-27 11:28:09 +00:00
|
|
|
#if defined(__linux__)
|
2020-02-29 12:37:24 +00:00
|
|
|
info.unix_socket_perms = "root:root";
|
2020-11-27 11:28:09 +00:00
|
|
|
#else
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
info.listen_accept_role = "raw-skt";
|
|
|
|
info.listen_accept_protocol = "ssproxy-protocol";
|
|
|
|
info.protocols = protocols;
|
|
|
|
|
|
|
|
if (!lws_create_vhost(context, &info)) {
|
|
|
|
lwsl_err("%s: Failed to create ss proxy vhost\n", __func__);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|