1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

ss: proxy_buflen in policy sets proxy max dsh alloc per streamtype

This commit is contained in:
Andy Green 2020-12-28 10:06:16 +00:00
parent 93f54c61c5
commit 905e5373c6
6 changed files with 57 additions and 18 deletions

View file

@ -294,6 +294,7 @@ typedef struct lws_ss_policy {
const lws_retry_bo_t *retry_bo; /**< retry policy to use */
uint32_t proxy_buflen; /**< max dsh alloc for proxy */
uint32_t timeout_ms; /**< default message response
* timeout in ms */
uint32_t flags; /**< stream attribute flags */

View file

@ -326,6 +326,13 @@ interval described in the associated retry / backoff selection, are important
enough to wake the whole system from low power suspend so they happen on
schedule.
### `proxy_buflen`
Only used when the streamtype is proxied... sets the maximum size of the
payload buffering (in bytes) the proxy will hold for this type of stream. If
the endpoint dumps a lot of data without any flow control, this may need to
be correspondingly large. Default is 16KB.
### `metadata`
This allows declaring basically dynamic symbol names to be used by the streamtype,

View file

@ -57,6 +57,7 @@ static const char * const lejp_tokens_policy[] = {
"s[].*.retry",
"s[].*.timeout_ms",
"s[].*.tls_trust_store",
"s[].*.proxy_buflen",
"s[].*.metadata",
"s[].*.metadata[].*",
"s[].*.http_resp_map",
@ -132,6 +133,7 @@ typedef enum {
LSSPPT_RETRYPTR,
LSSPPT_DEFAULT_TIMEOUT_MS,
LSSPPT_TRUST,
LSSPPT_PROXY_BUFLEN,
LSSPPT_METADATA,
LSSPPT_METADATA_ITEM,
LSSPPT_HTTPRESPMAP,
@ -519,6 +521,10 @@ lws_ss_policy_parser_cb(struct lejp_ctx *ctx, char reason)
a->curr[LTY_POLICY].p->port = atoi(ctx->buf);
break;
case LSSPPT_PROXY_BUFLEN:
a->curr[LTY_POLICY].p->proxy_buflen = (uint32_t)atol(ctx->buf);
break;
case LSSPPT_HTTP_METHOD:
pp = (char **)&a->curr[LTY_POLICY].p->u.http.method;
goto string2;

View file

@ -442,8 +442,8 @@ _lws_ss_set_metadata(lws_ss_metadata_t *omd, const char *name,
lws_ss_state_return_t
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry);
void
__lws_ss_proxy_bind_ss_to_conn_wsi(void * parconn);
int
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size);
struct lws_vhost *
lws_ss_policy_ref_trust_store(struct lws_context *context,

View file

@ -85,15 +85,24 @@ typedef struct ss_proxy_onward {
} ss_proxy_t;
void
__lws_ss_proxy_bind_ss_to_conn_wsi(void * parconn)
int
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
{
struct conn *conn = (struct conn *)parconn;
struct lws_context_per_thread *pt;
if (!conn || !conn->wsi || !conn->ss)
return;
return -1;
pt = &conn->wsi->a.context->pt[(int)conn->wsi->tsi];
conn->dsh = lws_dsh_create(&pt->ss_dsh_owner, dsh_size, 2);
if (!conn->dsh)
return -1;
__lws_lc_tag_append(&conn->wsi->lc, lws_ss_tag(conn->ss));
return 0;
}
/* secure streams payload interface */
@ -180,9 +189,34 @@ ss_proxy_onward_state(void *userobj, void *sh,
lws_ss_constate_t state, lws_ss_tx_ordinal_t ack)
{
ss_proxy_t *m = (ss_proxy_t *)userobj;
size_t dsh_size;
switch (state) {
case LWSSSCS_CREATING:
/*
* conn is private to -process.c, call thru to a) adjust
* the accepted incoming proxy link wsi tag name to be
* appended with the onward ss tag information now we
* have it, and b) allocate the dsh buffer now we
* can find out the policy about it for the streamtype.
*/
dsh_size = m->ss->policy->proxy_buflen ?
m->ss->policy->proxy_buflen : 32768;
lwsl_notice("%s: %s: initializing dsh max len %lu\n",
__func__, lws_ss_tag(m->ss),
(unsigned long)dsh_size);
if (__lws_ss_proxy_bind_ss_to_conn_wsi(m->conn, dsh_size)) {
/* failed to allocate the dsh */
lwsl_notice("%s: dsh init failed\n", __func__);
return LWSSSSRET_DESTROY_ME;
}
break;
case LWSSSCS_DESTROYING:
@ -208,7 +242,7 @@ ss_proxy_onward_state(void *userobj, void *sh,
if (!m->conn) {
lwsl_warn("%s: dropping state due to conn not up\n", __func__);
return 0;
return LWSSSSRET_OK;
}
lws_ss_serialize_state(m->conn->dsh, state, ack);
@ -216,7 +250,7 @@ ss_proxy_onward_state(void *userobj, void *sh,
if (m->conn->wsi) /* if possible, request client conn write */
lws_callback_on_writable(m->conn->wsi);
return 0;
return LWSSSSRET_OK;
}
void
@ -241,7 +275,6 @@ static int
callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
void *user, void *in, size_t len)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
struct raw_pss *pss = (struct raw_pss *)user;
const lws_ss_policy_t *rsp;
struct conn *conn = NULL;
@ -278,13 +311,7 @@ callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
return -1;
memset(pss->conn, 0, sizeof(*pss->conn));
pss->conn->dsh = lws_dsh_create(&pt->ss_dsh_owner,
LWS_SS_MTU * 160, 2);
if (!pss->conn->dsh) {
free(pss->conn);
return -1;
}
/* dsh is allocated when the onward ss is done */
pss->conn->wsi = wsi;
wsi->bound_ss_proxy_conn = 1;

View file

@ -363,8 +363,8 @@ lws_ss_deserialize_parse(struct lws_ss_serialization_parser *par,
lws_ss_metadata_t *pm;
lws_sspc_handle_t *h;
uint8_t pre[23];
lws_usec_t us;
uint32_t flags;
lws_usec_t us;
uint8_t *p;
int n;
@ -1116,8 +1116,6 @@ payload_ff:
*state = LPCSPROX_REPORTING_OK;
}
__lws_ss_proxy_bind_ss_to_conn_wsi(parconn);
if (*pss) {
(*pss)->being_serialized = 1;
#if defined(LWS_WITH_SYS_SMD)