2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2021-05-27 17:34:53 -07:00
|
|
|
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
|
2020-02-29 12:37:24 +00:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2020-12-31 14:07:13 +00:00
|
|
|
/* current SS Serialization protocol version */
|
|
|
|
#define LWS_SSS_CLIENT_PROTOCOL_VERSION 1
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* Secure Stream state
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
SSSEQ_IDLE,
|
|
|
|
SSSEQ_TRY_CONNECT,
|
|
|
|
SSSEQ_TRY_CONNECT_NAUTH,
|
|
|
|
SSSEQ_TRY_CONNECT_SAUTH,
|
|
|
|
SSSEQ_RECONNECT_WAIT,
|
|
|
|
SSSEQ_DO_RETRY,
|
|
|
|
SSSEQ_CONNECTED,
|
|
|
|
} lws_ss_seq_state_t;
|
|
|
|
|
2021-04-21 09:15:58 +01:00
|
|
|
struct conn;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* lws_ss_handle_t: publicly-opaque secure stream object implementation
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct lws_ss_handle {
|
|
|
|
lws_ss_info_t info; /**< copy of stream creation info */
|
2020-11-10 11:27:28 +00:00
|
|
|
|
2020-12-25 05:54:19 +00:00
|
|
|
lws_lifecycle_t lc;
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS)
|
|
|
|
lws_metrics_caliper_compose(cal_txn)
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
struct lws_dll2 list; /**< pt lists active ss */
|
|
|
|
struct lws_dll2 to_list; /**< pt lists ss with pending to-s */
|
2020-11-10 11:27:28 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
struct lws_dll2 cli_list; /**< same server clients list */
|
|
|
|
#endif
|
2021-02-17 10:31:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
|
2021-03-16 13:32:05 +00:00
|
|
|
lws_fi_ctx_t fic; /**< Fault Injection context */
|
2021-02-17 10:31:22 +00:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
struct lws_dll2_owner src_list; /**< sink's list of bound sources */
|
|
|
|
|
|
|
|
struct lws_context *context; /**< lws context we are created on */
|
|
|
|
const lws_ss_policy_t *policy; /**< system policy for stream */
|
|
|
|
|
|
|
|
struct lws_sequencer *seq; /**< owning sequencer if any */
|
|
|
|
struct lws *wsi; /**< the stream wsi if any */
|
|
|
|
|
2021-04-21 09:15:58 +01:00
|
|
|
struct conn *conn_if_sspc_onw;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SSPLUGINS)
|
2020-02-29 12:37:24 +00:00
|
|
|
void *nauthi; /**< the nauth plugin instance data */
|
|
|
|
void *sauthi; /**< the sauth plugin instance data */
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
lws_ss_metadata_t *metadata;
|
|
|
|
const lws_ss_policy_t *rideshare;
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
#if defined(LWS_WITH_CONMON)
|
|
|
|
char *conmon_json;
|
|
|
|
#endif
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
//struct lws_ss_handle *h_sink; /**< sink we are bound to, or NULL */
|
|
|
|
//void *sink_obj;/**< sink's private object representing us */
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-06-30 16:42:37 +01:00
|
|
|
lws_sorted_usec_list_t sul_timeout;
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_sorted_usec_list_t sul;
|
|
|
|
lws_ss_tx_ordinal_t txord;
|
|
|
|
|
|
|
|
/* protocol-specific connection helpers */
|
|
|
|
|
|
|
|
union {
|
|
|
|
|
|
|
|
/* ...for http-related protocols... */
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
|
|
|
/* common to all http-related protocols */
|
|
|
|
|
|
|
|
/* incoming multipart parsing */
|
|
|
|
|
|
|
|
char boundary[24]; /* --boundary from headers */
|
|
|
|
uint8_t boundary_len; /* length of --boundary */
|
|
|
|
uint8_t boundary_seq; /* current match amount */
|
|
|
|
uint8_t boundary_dashes; /* check for -- after */
|
|
|
|
uint8_t boundary_post; /* swallow post CRLF */
|
|
|
|
|
|
|
|
uint8_t som:1; /* SOM has been sent */
|
2021-01-06 17:13:17 -08:00
|
|
|
uint8_t eom:1; /* EOM has been sent */
|
2020-02-29 12:37:24 +00:00
|
|
|
uint8_t any:1; /* any content has been sent */
|
|
|
|
|
|
|
|
|
|
|
|
uint8_t good_respcode:1; /* 200 type response code */
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct { /* LWSSSP_H1 */
|
2020-04-06 20:25:06 +01:00
|
|
|
#if defined(WIN32)
|
|
|
|
uint8_t dummy;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
} h1;
|
|
|
|
struct { /* LWSSSP_H2 */
|
2020-04-06 20:25:06 +01:00
|
|
|
#if defined(WIN32)
|
|
|
|
uint8_t dummy;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
} h2;
|
|
|
|
struct { /* LWSSSP_WS */
|
2020-04-06 20:25:06 +01:00
|
|
|
#if defined(WIN32)
|
|
|
|
uint8_t dummy;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
} ws;
|
|
|
|
} u;
|
|
|
|
} http;
|
|
|
|
|
|
|
|
/* details for non-http related protocols... */
|
|
|
|
#if defined(LWS_ROLE_MQTT)
|
|
|
|
struct {
|
|
|
|
lws_mqtt_topic_elem_t topic_qos;
|
|
|
|
lws_mqtt_topic_elem_t sub_top;
|
|
|
|
lws_mqtt_subscribe_param_t sub_info;
|
2020-06-15 15:02:52 +01:00
|
|
|
/* allocation that must be destroyed with conn */
|
|
|
|
void *heap_baggage;
|
|
|
|
const char *subscribe_to;
|
|
|
|
size_t subscribe_to_len;
|
2020-02-29 12:37:24 +00:00
|
|
|
} mqtt;
|
2020-06-24 20:15:46 +01:00
|
|
|
#endif
|
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
struct {
|
|
|
|
struct lws_smd_peer *smd_peer;
|
|
|
|
lws_sorted_usec_list_t sul_write;
|
|
|
|
} smd;
|
2020-02-29 12:37:24 +00:00
|
|
|
#endif
|
|
|
|
} u;
|
|
|
|
|
|
|
|
unsigned long writeable_len;
|
|
|
|
|
|
|
|
lws_ss_constate_t connstate;/**< public connection state */
|
|
|
|
lws_ss_seq_state_t seqstate; /**< private connection state */
|
2021-08-11 13:31:41 +01:00
|
|
|
lws_ss_state_return_t pending_ret; /**< holds desired disposition
|
|
|
|
* for ss during CCE */
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
int txn_resp;
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
uint16_t retry; /**< retry / backoff tracking */
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
#if defined(LWS_WITH_CONMON)
|
|
|
|
uint16_t conmon_len;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
int16_t temp16;
|
|
|
|
|
|
|
|
uint8_t tsi; /**< service thread idx, usually 0 */
|
|
|
|
uint8_t subseq; /**< emulate SOM tracking */
|
|
|
|
uint8_t txn_ok; /**< 1 = transaction was OK */
|
2021-01-02 10:49:43 +00:00
|
|
|
uint8_t prev_ss_state;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
uint8_t txn_resp_set:1; /**< user code set one */
|
|
|
|
uint8_t txn_resp_pending:1; /**< we have yet to send */
|
2020-02-29 12:37:24 +00:00
|
|
|
uint8_t hanging_som:1;
|
|
|
|
uint8_t inside_msg:1;
|
|
|
|
uint8_t being_serialized:1; /* we are not the consumer */
|
2020-08-26 14:03:57 +01:00
|
|
|
uint8_t destroying:1;
|
2020-12-27 16:05:48 +00:00
|
|
|
uint8_t ss_dangling_connected:1;
|
2020-12-27 19:34:30 +00:00
|
|
|
uint8_t proxy_onward:1; /* opaque is conn */
|
2021-08-11 13:31:41 +01:00
|
|
|
uint8_t inside_connect:1; /* set if we are currently
|
|
|
|
* creating the onward
|
|
|
|
* connect */
|
2020-02-29 12:37:24 +00:00
|
|
|
} lws_ss_handle_t;
|
|
|
|
|
|
|
|
/* connection helper that doesn't need to hang around after connection starts */
|
|
|
|
|
|
|
|
union lws_ss_contemp {
|
|
|
|
#if defined(LWS_ROLE_MQTT)
|
|
|
|
lws_mqtt_client_connect_param_t ccp;
|
2020-04-06 20:25:06 +01:00
|
|
|
#else
|
|
|
|
#if defined(WIN32)
|
|
|
|
uint8_t dummy;
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When allocating the opaque handle, we overallocate for:
|
|
|
|
*
|
|
|
|
* 1) policy->nauth_plugin->alloc (.nauthi) if any
|
|
|
|
* 2) policy->sauth_plugin->alloc (.sauthi) if any
|
|
|
|
* 3) copy of creation info stream type pointed to by info.streamtype... this
|
|
|
|
* may be arbitrarily long and since it may be coming from socket ipc and be
|
|
|
|
* temporary at creation time, we need a place for the copy to stay in scope
|
|
|
|
* 4) copy of info->streamtype contents
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* the user object allocation is immediately after the ss object allocation */
|
|
|
|
#define ss_to_userobj(ss) ((void *)&(ss)[1])
|
|
|
|
|
|
|
|
/*
|
|
|
|
* serialization parser state
|
|
|
|
*/
|
|
|
|
|
|
|
|
enum {
|
|
|
|
KIND_C_TO_P,
|
|
|
|
KIND_SS_TO_P,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct lws_ss_serialization_parser {
|
|
|
|
char streamtype[32];
|
|
|
|
char rideshare[32];
|
|
|
|
char metadata_name[32];
|
|
|
|
|
|
|
|
uint64_t ust_pwait;
|
|
|
|
|
|
|
|
lws_ss_metadata_t *ssmd;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
uint8_t *rxmetaval;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
int ps;
|
|
|
|
int ctr;
|
|
|
|
|
|
|
|
uint32_t usd_phandling;
|
|
|
|
uint32_t flags;
|
2020-12-31 14:56:43 +00:00
|
|
|
uint32_t client_pid;
|
2020-02-29 12:37:24 +00:00
|
|
|
int32_t temp32;
|
|
|
|
|
|
|
|
int32_t txcr_out;
|
|
|
|
int32_t txcr_in;
|
|
|
|
uint16_t rem;
|
|
|
|
|
|
|
|
uint8_t type;
|
|
|
|
uint8_t frag1;
|
|
|
|
uint8_t slen;
|
|
|
|
uint8_t rsl_pos;
|
|
|
|
uint8_t rsl_idx;
|
2020-12-31 14:07:13 +00:00
|
|
|
uint8_t protocol_version;
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike locally-fulfilled SS, SSS doesn't have to hold metadata on client side
|
|
|
|
* but pass it through to the proxy. The client side doesn't know the real
|
|
|
|
* metadata names that are available in the policy (since it's hardcoded in code
|
|
|
|
* no point passing them back to the client from the policy). Because of that,
|
|
|
|
* it doesn't know how many to allocate when we create the sspc_handle either.
|
|
|
|
*
|
|
|
|
* So we use a linked-list of changed-but-not-yet-proxied metadata allocated
|
|
|
|
* on the heap and items removed as they are proxied out. Anything on the list
|
|
|
|
* is sent to the proxy before any requested tx is handled.
|
|
|
|
*
|
|
|
|
* This is also used to queue tx credit changes
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct lws_sspc_metadata {
|
|
|
|
lws_dll2_t list;
|
|
|
|
char name[32]; /* empty string, then actually TCXR */
|
|
|
|
size_t len;
|
|
|
|
int tx_cr_adjust;
|
|
|
|
|
|
|
|
/* the value of length .len is overallocated after this */
|
|
|
|
} lws_sspc_metadata_t;
|
|
|
|
|
2020-08-03 15:56:38 +01:00
|
|
|
/* state of the upstream proxy onward connection */
|
|
|
|
|
|
|
|
enum {
|
|
|
|
LWSSSPC_ONW_NONE,
|
|
|
|
LWSSSPC_ONW_REQ,
|
|
|
|
LWSSSPC_ONW_ONGOING,
|
|
|
|
LWSSSPC_ONW_CONN,
|
|
|
|
};
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
typedef struct lws_sspc_handle {
|
|
|
|
char rideshare_list[128];
|
2020-12-25 05:54:19 +00:00
|
|
|
|
|
|
|
lws_lifecycle_t lc;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_info_t ssi;
|
|
|
|
lws_sorted_usec_list_t sul_retry;
|
|
|
|
|
|
|
|
struct lws_ss_serialization_parser parser;
|
|
|
|
|
2021-02-17 10:31:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
|
2021-03-16 13:32:05 +00:00
|
|
|
lws_fi_ctx_t fic; /**< Fault Injection context */
|
2021-02-17 10:31:22 +00:00
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_dll2_owner_t metadata_owner;
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
lws_dll2_owner_t metadata_owner_rx;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
struct lws_dll2 client_list;
|
|
|
|
struct lws_tx_credit txc;
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS)
|
|
|
|
lws_metrics_caliper_compose(cal_txn)
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
struct lws *cwsi;
|
|
|
|
|
|
|
|
struct lws_dsh *dsh;
|
|
|
|
struct lws_context *context;
|
|
|
|
|
|
|
|
lws_usec_t us_earliest_write_req;
|
|
|
|
|
2020-08-03 15:33:09 +01:00
|
|
|
unsigned long writeable_len;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_conn_states_t state;
|
|
|
|
|
2020-06-30 16:42:37 +01:00
|
|
|
uint32_t timeout_ms;
|
2020-02-29 12:37:24 +00:00
|
|
|
uint32_t ord;
|
|
|
|
|
2020-06-30 16:42:37 +01:00
|
|
|
int16_t temp16;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
uint8_t rideshare_ofs[4];
|
|
|
|
uint8_t rsidx;
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
uint8_t prev_ss_state;
|
|
|
|
|
2020-08-03 15:56:38 +01:00
|
|
|
uint8_t conn_req_state:2;
|
2020-02-29 12:37:24 +00:00
|
|
|
uint8_t destroying:1;
|
2020-06-24 20:15:46 +01:00
|
|
|
uint8_t non_wsi:1;
|
|
|
|
uint8_t ignore_txc:1;
|
2020-06-30 16:42:37 +01:00
|
|
|
uint8_t pending_timeout_update:1;
|
2020-08-03 15:56:38 +01:00
|
|
|
uint8_t pending_writeable_len:1;
|
2020-09-04 12:59:34 +01:00
|
|
|
uint8_t creating_cb_done:1;
|
2020-12-27 16:05:48 +00:00
|
|
|
uint8_t ss_dangling_connected:1;
|
2020-02-29 12:37:24 +00:00
|
|
|
} lws_sspc_handle_t;
|
|
|
|
|
2020-03-26 06:48:34 +00:00
|
|
|
typedef struct backoffs {
|
|
|
|
struct backoffs *next;
|
|
|
|
const char *name;
|
|
|
|
lws_retry_bo_t r;
|
|
|
|
} backoff_t;
|
|
|
|
|
|
|
|
union u {
|
2020-08-19 11:21:58 +01:00
|
|
|
backoff_t *b;
|
|
|
|
lws_ss_x509_t *x;
|
|
|
|
lws_ss_trust_store_t *t;
|
|
|
|
lws_ss_policy_t *p;
|
|
|
|
lws_ss_auth_t *a;
|
2021-01-06 15:08:22 +00:00
|
|
|
lws_metric_policy_t *m;
|
2020-03-26 06:48:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
LTY_BACKOFF,
|
|
|
|
LTY_X509,
|
|
|
|
LTY_TRUSTSTORE,
|
|
|
|
LTY_POLICY,
|
2020-08-19 11:21:58 +01:00
|
|
|
LTY_AUTH,
|
2021-01-06 15:08:22 +00:00
|
|
|
LTY_METRICS,
|
2020-03-26 06:48:34 +00:00
|
|
|
|
|
|
|
_LTY_COUNT /* always last */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct policy_cb_args {
|
|
|
|
struct lejp_ctx jctx;
|
|
|
|
struct lws_context *context;
|
|
|
|
struct lwsac *ac;
|
|
|
|
|
|
|
|
const char *socks5_proxy;
|
|
|
|
|
|
|
|
struct lws_b64state b64;
|
|
|
|
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
lws_ss_http_respmap_t respmap[16];
|
|
|
|
|
2020-03-26 06:48:34 +00:00
|
|
|
union u heads[_LTY_COUNT];
|
|
|
|
union u curr[_LTY_COUNT];
|
|
|
|
|
|
|
|
uint8_t *p;
|
|
|
|
|
|
|
|
int count;
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
char pending_respmap;
|
2020-03-26 06:48:34 +00:00
|
|
|
};
|
|
|
|
|
2020-06-24 20:15:46 +01:00
|
|
|
#if defined(LWS_WITH_SYS_SMD)
|
|
|
|
extern const lws_ss_policy_t pol_smd;
|
|
|
|
#endif
|
|
|
|
|
2020-08-16 05:27:40 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* returns one of
|
|
|
|
*
|
|
|
|
* LWSSSSRET_OK
|
|
|
|
* LWSSSSRET_DISCONNECT_ME
|
|
|
|
* LWSSSSRET_DESTROY_ME
|
|
|
|
*/
|
2020-02-29 12:37:24 +00:00
|
|
|
int
|
|
|
|
lws_ss_deserialize_parse(struct lws_ss_serialization_parser *par,
|
|
|
|
struct lws_context *context,
|
|
|
|
struct lws_dsh *dsh, const uint8_t *cp, size_t len,
|
|
|
|
lws_ss_conn_states_t *state, void *parconn,
|
|
|
|
lws_ss_handle_t **pss, lws_ss_info_t *ssi, char client);
|
|
|
|
int
|
|
|
|
lws_ss_serialize_rx_payload(struct lws_dsh *dsh, const uint8_t *buf,
|
|
|
|
size_t len, int flags, const char *rsp);
|
|
|
|
int
|
|
|
|
lws_ss_deserialize_tx_payload(struct lws_dsh *dsh, struct lws *wsi,
|
|
|
|
lws_ss_tx_ordinal_t ord, uint8_t *buf,
|
|
|
|
size_t *len, int *flags);
|
|
|
|
int
|
2021-03-16 13:32:05 +00:00
|
|
|
lws_ss_serialize_state(struct lws *wsi, struct lws_dsh *dsh, lws_ss_constate_t state,
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_tx_ordinal_t ack);
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_ss_serialize_state_transition(lws_ss_conn_states_t *state, int new_state);
|
|
|
|
|
|
|
|
const lws_ss_policy_t *
|
|
|
|
lws_ss_policy_lookup(const struct lws_context *context, const char *streamtype);
|
|
|
|
|
|
|
|
/* can be used as a cb from lws_dll2_foreach_safe() to destroy ss */
|
|
|
|
int
|
|
|
|
lws_ss_destroy_dll(struct lws_dll2 *d, void *user);
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_sspc_destroy_dll(struct lws_dll2 *d, void *user);
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
void
|
|
|
|
lws_sspc_rxmetadata_destroy(lws_sspc_handle_t *h);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
int
|
|
|
|
lws_ss_policy_set(struct lws_context *context, const char *name);
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_sys_fetch_policy(struct lws_context *context);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_event_helper(lws_ss_handle_t *h, lws_ss_constate_t cs);
|
|
|
|
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_ss_state_return_t
|
|
|
|
_lws_ss_backoff(lws_ss_handle_t *h, lws_usec_t us_override);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_backoff(lws_ss_handle_t *h);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
int
|
2020-12-22 15:56:41 +00:00
|
|
|
_lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(lws_ss_state_return_t r, struct lws *wsi,
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_handle_t **ph);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
int
|
|
|
|
lws_ss_set_timeout_us(lws_ss_handle_t *h, lws_usec_t us);
|
|
|
|
|
|
|
|
void
|
|
|
|
ss_proxy_onward_txcr(void *userobj, int bump);
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_serialize_txcr(struct lws_dsh *dsh, int txcr);
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_sys_auth_api_amazon_com(struct lws_context *context);
|
|
|
|
|
|
|
|
lws_ss_metadata_t *
|
|
|
|
lws_ss_get_handle_metadata(struct lws_ss_handle *h, const char *name);
|
|
|
|
lws_ss_metadata_t *
|
|
|
|
lws_ss_policy_metadata_index(const lws_ss_policy_t *p, size_t index);
|
|
|
|
|
|
|
|
lws_ss_metadata_t *
|
|
|
|
lws_ss_policy_metadata(const lws_ss_policy_t *p, const char *name);
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_ss_exp_cb_metadata(void *priv, const char *name, char *out, size_t *pos,
|
|
|
|
size_t olen, size_t *exp_ofs);
|
|
|
|
|
2020-10-07 17:51:58 +01:00
|
|
|
int
|
|
|
|
_lws_ss_set_metadata(lws_ss_metadata_t *omd, const char *name,
|
|
|
|
const void *value, size_t len);
|
|
|
|
|
2021-03-14 10:44:41 +00:00
|
|
|
int
|
|
|
|
_lws_ss_alloc_set_metadata(lws_ss_metadata_t *omd, const char *name,
|
|
|
|
const void *value, size_t len);
|
|
|
|
|
2020-12-22 15:56:41 +00:00
|
|
|
lws_ss_state_return_t
|
2020-12-27 19:34:30 +00:00
|
|
|
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw);
|
2020-06-26 11:28:25 +01:00
|
|
|
|
2021-05-27 17:34:53 -07:00
|
|
|
lws_ss_state_return_t
|
|
|
|
_lws_ss_request_tx(lws_ss_handle_t *h);
|
|
|
|
|
2020-12-28 10:06:16 +00:00
|
|
|
int
|
|
|
|
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size);
|
2020-12-25 05:54:19 +00:00
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
struct lws_vhost *
|
|
|
|
lws_ss_policy_ref_trust_store(struct lws_context *context,
|
|
|
|
const lws_ss_policy_t *pol, char doref);
|
|
|
|
|
2021-01-02 10:49:43 +00:00
|
|
|
lws_ss_state_return_t
|
|
|
|
lws_sspc_event_helper(lws_sspc_handle_t *h, lws_ss_constate_t cs,
|
|
|
|
lws_ss_tx_ordinal_t flags);
|
|
|
|
|
|
|
|
int
|
2021-02-20 06:13:49 +00:00
|
|
|
lws_ss_check_next_state(lws_lifecycle_t *lc, uint8_t *prevstate,
|
|
|
|
lws_ss_constate_t cs);
|
2021-01-02 10:49:43 +00:00
|
|
|
|
2020-12-27 19:34:30 +00:00
|
|
|
void
|
|
|
|
lws_proxy_clean_conn_ss(struct lws *wsi);
|
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
|
|
|
|
int
|
|
|
|
lws_ss_policy_unref_trust_store(struct lws_context *context,
|
|
|
|
const lws_ss_policy_t *pol);
|
|
|
|
#endif
|
|
|
|
|
2020-03-11 12:44:01 +00:00
|
|
|
int
|
|
|
|
lws_ss_sys_cpd(struct lws_context *cx);
|
|
|
|
|
2020-12-29 16:42:25 -08:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_AUTH_SIGV4)
|
|
|
|
int lws_ss_apply_sigv4(struct lws *wsi, struct lws_ss_handle *h,
|
|
|
|
unsigned char **p, unsigned char *end);
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
typedef int (* const secstream_protocol_connect_munge_t)(lws_ss_handle_t *h,
|
|
|
|
char *buf, size_t len, struct lws_client_connect_info *i,
|
|
|
|
union lws_ss_contemp *ct);
|
|
|
|
|
|
|
|
typedef int (* const secstream_protocol_add_txcr_t)(lws_ss_handle_t *h, int add);
|
|
|
|
|
|
|
|
typedef int (* const secstream_protocol_get_txcr_t)(lws_ss_handle_t *h);
|
|
|
|
|
|
|
|
struct ss_pcols {
|
|
|
|
const char *name;
|
|
|
|
const char *alpn;
|
2020-07-27 10:03:12 +01:00
|
|
|
const struct lws_protocols *protocol;
|
2020-04-06 20:25:06 +01:00
|
|
|
secstream_protocol_connect_munge_t munge;
|
|
|
|
secstream_protocol_add_txcr_t tx_cr_add;
|
|
|
|
secstream_protocol_get_txcr_t tx_cr_est;
|
2020-02-29 12:37:24 +00:00
|
|
|
};
|
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
/*
|
|
|
|
* Because both sides of the connection share the conn, we allocate it
|
|
|
|
* during accepted adoption, and both sides point to it.
|
|
|
|
*
|
|
|
|
* When .ss or .wsi close, they must NULL their entry here so no dangling
|
|
|
|
* refereneces.
|
|
|
|
*
|
|
|
|
* The last one of the accepted side and the onward side to close frees it.
|
|
|
|
*/
|
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
lws_ss_state_return_t
|
|
|
|
lws_conmon_ss_json(lws_ss_handle_t *h);
|
2021-01-06 15:08:22 +00:00
|
|
|
|
ss: sspc: add conmon performance telemetry
This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.
You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.
Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation. Streams without the "perf" attribute set never receive
this extra rx.
The received JSON is based on the CONMON struct info and looks like
{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
2021-03-31 13:20:34 +01:00
|
|
|
void
|
|
|
|
ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward);
|
2021-01-06 15:08:22 +00:00
|
|
|
|
|
|
|
struct conn {
|
|
|
|
struct lws_ss_serialization_parser parser;
|
|
|
|
|
|
|
|
lws_dsh_t *dsh; /* unified buffer for both sides */
|
|
|
|
struct lws *wsi; /* the proxy's client side */
|
|
|
|
lws_ss_handle_t *ss; /* the onward, ss side */
|
|
|
|
|
|
|
|
lws_ss_conn_states_t state;
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
|
|
|
|
char onward_in_flow_control;
|
2021-01-06 15:08:22 +00:00
|
|
|
};
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
extern const struct ss_pcols ss_pcol_h1;
|
|
|
|
extern const struct ss_pcols ss_pcol_h2;
|
|
|
|
extern const struct ss_pcols ss_pcol_ws;
|
|
|
|
extern const struct ss_pcols ss_pcol_mqtt;
|
2020-03-28 16:20:50 +00:00
|
|
|
extern const struct ss_pcols ss_pcol_raw;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
extern const struct lws_protocols protocol_secstream_h1;
|
|
|
|
extern const struct lws_protocols protocol_secstream_h2;
|
|
|
|
extern const struct lws_protocols protocol_secstream_ws;
|
|
|
|
extern const struct lws_protocols protocol_secstream_mqtt;
|
2020-03-28 16:20:50 +00:00
|
|
|
extern const struct lws_protocols protocol_secstream_raw;
|
2020-02-29 12:37:24 +00:00
|
|
|
|