2020-02-29 12:37:24 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* This is the glue that wires up h1 to Secure Streams.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <private-lib-core.h>
|
|
|
|
|
|
|
|
#if !defined(LWS_PLAT_FREERTOS) || defined(LWS_ROLE_H2)
|
2020-05-28 13:09:56 +01:00
|
|
|
#define LWS_WITH_SS_RIDESHARE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-02-29 12:37:24 +00:00
|
|
|
static int
|
|
|
|
ss_http_multipart_parser(lws_ss_handle_t *h, void *in, size_t len)
|
|
|
|
{
|
|
|
|
uint8_t *q = (uint8_t *)in;
|
|
|
|
int pending_issue = 0, n = 0;
|
|
|
|
|
|
|
|
/* let's stick it in the boundary state machine first */
|
|
|
|
while (n < (int)len) {
|
|
|
|
if (h->u.http.boundary_seq != h->u.http.boundary_len) {
|
|
|
|
if (q[n] == h->u.http.boundary[h->u.http.boundary_seq])
|
|
|
|
h->u.http.boundary_seq++;
|
|
|
|
else {
|
|
|
|
h->u.http.boundary_seq = 0;
|
|
|
|
h->u.http.boundary_dashes = 0;
|
|
|
|
h->u.http.boundary_post = 0;
|
|
|
|
}
|
|
|
|
goto around;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We already matched the boundary string, now we're
|
|
|
|
* looking if there's a -- afterwards
|
|
|
|
*/
|
|
|
|
if (h->u.http.boundary_dashes < 2) {
|
|
|
|
if (q[n] == '-') {
|
|
|
|
h->u.http.boundary_dashes++;
|
|
|
|
goto around;
|
|
|
|
}
|
|
|
|
/* there was no final -- ... */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (h->u.http.boundary_dashes == 2) {
|
|
|
|
/*
|
|
|
|
* It's an EOM boundary: issue pending + multipart EOP
|
|
|
|
*/
|
|
|
|
lwsl_debug("%s: seen EOP, n %d pi %d\n",
|
|
|
|
__func__, n, pending_issue);
|
|
|
|
/*
|
|
|
|
* It's possible we already started the decode before
|
|
|
|
* the end of the last packet. Then there is no
|
|
|
|
* remainder to send.
|
|
|
|
*/
|
|
|
|
if (n >= pending_issue + h->u.http.boundary_len +
|
|
|
|
(h->u.http.any ? 2 : 0) + 1)
|
|
|
|
h->info.rx(ss_to_userobj(h),
|
|
|
|
&q[pending_issue],
|
2020-12-12 06:21:40 +00:00
|
|
|
(unsigned int)(n - pending_issue -
|
2020-02-29 12:37:24 +00:00
|
|
|
h->u.http.boundary_len - 1 -
|
2020-12-12 06:21:40 +00:00
|
|
|
(h->u.http.any ? 2 : 0) /* crlf */),
|
2020-02-29 12:37:24 +00:00
|
|
|
(!h->u.http.som ? LWSSS_FLAG_SOM : 0) |
|
|
|
|
LWSSS_FLAG_EOM | LWSSS_FLAG_RELATED_END);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Peer may not END_STREAM us
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
//return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* how about --boundaryCRLF */
|
|
|
|
|
|
|
|
if (h->u.http.boundary_post < 2) {
|
|
|
|
if ((!h->u.http.boundary_post && q[n] == '\x0d') ||
|
|
|
|
(h->u.http.boundary_post && q[n] == '\x0a')) {
|
|
|
|
h->u.http.boundary_post++;
|
|
|
|
goto around;
|
|
|
|
}
|
|
|
|
/* there was no final CRLF ... it's wrong */
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (h->u.http.boundary_post != 2)
|
|
|
|
goto around;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have a starting "--boundaryCRLF" or intermediate
|
|
|
|
* "CRLF--boundaryCRLF" boundary
|
|
|
|
*/
|
|
|
|
lwsl_debug("%s: b_post = 2 (pi %d)\n", __func__, pending_issue);
|
|
|
|
h->u.http.boundary_seq = 0;
|
|
|
|
h->u.http.boundary_post = 0;
|
|
|
|
|
|
|
|
if (n >= pending_issue && (h->u.http.any || !h->u.http.som)) {
|
|
|
|
/* Intermediate... do the EOM */
|
|
|
|
lwsl_debug("%s: seen interm EOP n %d pi %d\n", __func__,
|
|
|
|
n, pending_issue);
|
|
|
|
/*
|
|
|
|
* It's possible we already started the decode before
|
|
|
|
* the end of the last packet. Then there is no
|
|
|
|
* remainder to send.
|
|
|
|
*/
|
|
|
|
if (n >= pending_issue + h->u.http.boundary_len +
|
|
|
|
(h->u.http.any ? 2 : 0))
|
|
|
|
h->info.rx(ss_to_userobj(h), &q[pending_issue],
|
2020-12-12 06:21:40 +00:00
|
|
|
(unsigned int)(n - pending_issue -
|
2020-02-29 12:37:24 +00:00
|
|
|
h->u.http.boundary_len -
|
2020-12-12 06:21:40 +00:00
|
|
|
(h->u.http.any ? 2 /* crlf */ : 0)),
|
2020-02-29 12:37:24 +00:00
|
|
|
(!h->u.http.som ? LWSSS_FLAG_SOM : 0) |
|
|
|
|
LWSSS_FLAG_EOM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next message starts after this boundary */
|
|
|
|
|
|
|
|
pending_issue = n;
|
|
|
|
h->u.http.som = 0;
|
|
|
|
|
|
|
|
around:
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pending_issue != n) {
|
2020-12-12 06:21:40 +00:00
|
|
|
h->info.rx(ss_to_userobj(h), &q[pending_issue],
|
|
|
|
(unsigned int)(n - pending_issue),
|
2020-02-29 12:37:24 +00:00
|
|
|
(!h->u.http.som ? LWSSS_FLAG_SOM : 0));
|
|
|
|
h->u.http.any = 1;
|
|
|
|
h->u.http.som = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
/*
|
|
|
|
* Returns 0, or the ss state resp maps on to
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_ss_http_resp_to_state(lws_ss_handle_t *h, int resp)
|
|
|
|
{
|
|
|
|
const lws_ss_http_respmap_t *r = h->policy->u.http.respmap;
|
|
|
|
int n = h->policy->u.http.count_respmap;
|
|
|
|
|
|
|
|
while (n--)
|
|
|
|
if (resp == r->resp)
|
|
|
|
return r->state;
|
|
|
|
else
|
|
|
|
r++;
|
|
|
|
|
|
|
|
return 0; /* no hit */
|
|
|
|
}
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
/*
|
|
|
|
* This converts any set metadata items into outgoing http headers
|
|
|
|
*/
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
static int
|
|
|
|
lws_apply_metadata(lws_ss_handle_t *h, struct lws *wsi, uint8_t *buf,
|
|
|
|
uint8_t **pp, uint8_t *end)
|
|
|
|
{
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
lws_ss_metadata_t *polmd = h->policy->metadata;
|
|
|
|
int m = 0;
|
2020-07-27 10:03:12 +01:00
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
while (polmd) {
|
2020-07-27 10:03:12 +01:00
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
/* has to have a non-empty header string */
|
2020-07-27 10:03:12 +01:00
|
|
|
|
2020-10-11 07:29:47 +01:00
|
|
|
if (polmd->value__may_own_heap &&
|
2020-11-17 11:59:24 +00:00
|
|
|
((uint8_t *)polmd->value__may_own_heap)[0] &&
|
|
|
|
h->metadata[m].value__may_own_heap) {
|
2020-07-27 10:03:12 +01:00
|
|
|
if (lws_add_http_header_by_name(wsi,
|
2020-10-11 07:29:47 +01:00
|
|
|
polmd->value__may_own_heap,
|
|
|
|
h->metadata[m].value__may_own_heap,
|
2020-07-27 10:03:12 +01:00
|
|
|
(int)h->metadata[m].length, pp, end))
|
|
|
|
return -1;
|
2020-11-24 11:11:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for the case he's setting a non-zero
|
|
|
|
* content-length "via the backdoor" metadata-
|
|
|
|
* driven headers, and set the body_pending()
|
|
|
|
* state if so...
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!strncmp(polmd->value__may_own_heap,
|
|
|
|
"content-length", 14) &&
|
|
|
|
atoi(h->metadata[m].value__may_own_heap))
|
|
|
|
lws_client_http_body_pending(wsi, 1);
|
2020-07-27 10:03:12 +01:00
|
|
|
}
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
|
|
|
m++;
|
|
|
|
polmd = polmd->next;
|
2020-07-27 10:03:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Content-length on POST / PUT if we have the length information
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (h->policy->u.http.method && (
|
|
|
|
(!strcmp(h->policy->u.http.method, "POST") ||
|
|
|
|
!strcmp(h->policy->u.http.method, "PUT"))) &&
|
|
|
|
wsi->http.writeable_len) {
|
|
|
|
if (!(h->policy->flags &
|
|
|
|
LWSSSPOLF_HTTP_NO_CONTENT_LENGTH)) {
|
|
|
|
int n = lws_snprintf((char *)buf, 20, "%u",
|
|
|
|
(unsigned int)wsi->http.writeable_len);
|
|
|
|
if (lws_add_http_header_by_token(wsi,
|
|
|
|
WSI_TOKEN_HTTP_CONTENT_LENGTH,
|
|
|
|
buf, n, pp, end))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
lws_client_http_body_pending(wsi, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if any metadata headers present in the server headers, and record
|
|
|
|
* them into the associated metadata item if so.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
lws_extract_metadata(lws_ss_handle_t *h, struct lws *wsi)
|
|
|
|
{
|
|
|
|
lws_ss_metadata_t *polmd = h->policy->metadata, *omd;
|
|
|
|
int n, m = 0;
|
|
|
|
|
|
|
|
while (polmd) {
|
|
|
|
|
2020-10-04 07:27:22 +01:00
|
|
|
if (polmd->value_is_http_token != LWS_HTTP_NO_KNOWN_HEADER) {
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
|
|
|
/* it's a well-known header token */
|
|
|
|
|
|
|
|
n = lws_hdr_total_length(wsi, polmd->value_is_http_token);
|
|
|
|
if (n) {
|
|
|
|
const char *cp = lws_hdr_simple_ptr(wsi,
|
|
|
|
polmd->value_is_http_token);
|
2020-10-07 17:51:58 +01:00
|
|
|
omd = lws_ss_get_handle_metadata(h, polmd->name);
|
|
|
|
if (!omd)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
assert(!strcmp(omd->name, polmd->name));
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* it's present on the wsi, we want to
|
|
|
|
* set the related metadata name to it then
|
|
|
|
*/
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
_lws_ss_set_metadata(omd, polmd->name, cp, (unsigned int)n);
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
|
2020-10-07 17:51:58 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
/*
|
|
|
|
* ...and because we are doing it from parsing
|
|
|
|
* onward rx, we want to mark the metadata as
|
|
|
|
* needing passing to the client
|
|
|
|
*/
|
|
|
|
omd->pending_onward = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_CUSTOM_HEADERS)
|
|
|
|
else
|
|
|
|
|
|
|
|
/* has to have a non-empty header string */
|
|
|
|
|
2020-10-11 07:29:47 +01:00
|
|
|
if (polmd->value__may_own_heap &&
|
|
|
|
((uint8_t *)polmd->value__may_own_heap)[0]) {
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
char *p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can it be a custom header?
|
|
|
|
*/
|
|
|
|
|
2020-10-11 07:29:47 +01:00
|
|
|
n = lws_hdr_custom_length(wsi, (const char *)
|
|
|
|
polmd->value__may_own_heap,
|
|
|
|
polmd->value_length);
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
if (n > 0) {
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
p = lws_malloc((unsigned int)n + 1, __func__);
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
if (!p)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* if needed, free any previous value */
|
|
|
|
|
|
|
|
if (polmd->value_on_lws_heap) {
|
2020-10-11 07:29:47 +01:00
|
|
|
lws_free(
|
|
|
|
polmd->value__may_own_heap);
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
polmd->value_on_lws_heap = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-10-04 07:27:22 +01:00
|
|
|
* copy the named custom header value
|
|
|
|
* into the malloc'd buffer
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (lws_hdr_custom_copy(wsi, p, n + 1,
|
2020-10-11 07:29:47 +01:00
|
|
|
(const char *)
|
|
|
|
polmd->value__may_own_heap,
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
polmd->value_length) < 0) {
|
|
|
|
lws_free(p);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
omd = lws_ss_get_handle_metadata(h,
|
|
|
|
polmd->name);
|
|
|
|
|
2020-10-07 17:51:58 +01:00
|
|
|
_lws_ss_set_metadata(omd, polmd->name,
|
|
|
|
p, (size_t)n);
|
|
|
|
omd->value_on_lws_heap = 1;
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
|
|
|
|
omd->pending_onward = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
m++;
|
|
|
|
polmd = polmd->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
static const uint8_t blob_idx[] = {
|
|
|
|
LWS_SYSBLOB_TYPE_AUTH,
|
|
|
|
LWS_SYSBLOB_TYPE_DEVICE_SERIAL,
|
|
|
|
LWS_SYSBLOB_TYPE_DEVICE_FW_VERSION,
|
|
|
|
LWS_SYSBLOB_TYPE_DEVICE_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
secstream_h1(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|
|
|
void *in, size_t len)
|
|
|
|
{
|
2020-11-10 11:27:28 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_ss_handle_t *h = (lws_ss_handle_t *)lws_get_opaque_user_data(wsi);
|
|
|
|
uint8_t buf[LWS_PRE + 1520], *p = &buf[LWS_PRE],
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
*start = p,
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
*end = &buf[sizeof(buf) - 1];
|
2020-08-26 11:05:41 +01:00
|
|
|
lws_ss_state_return_t r;
|
2020-06-01 07:33:37 +01:00
|
|
|
int f = 0, m, status;
|
2020-05-28 13:09:56 +01:00
|
|
|
char conceal_eom = 0;
|
2020-09-17 12:43:31 +01:00
|
|
|
lws_usec_t inter;
|
2020-02-29 12:37:24 +00:00
|
|
|
size_t buflen;
|
|
|
|
|
|
|
|
switch (reason) {
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
|
2020-12-25 05:54:19 +00:00
|
|
|
if (!h) {
|
|
|
|
lwsl_err("%s: CCE with no ss handle %s\n", __func__, lws_wsi_tag(wsi));
|
2020-07-31 10:44:09 +01:00
|
|
|
break;
|
2020-12-25 05:54:19 +00:00
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
assert(h->policy);
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_info("%s: %s CLIENT_CONNECTION_ERROR: %s\n", __func__,
|
|
|
|
h->lc.gutag, in ? (const char *)in : "none");
|
2020-06-01 07:33:37 +01:00
|
|
|
/* already disconnected, no action for DISCONNECT_ME */
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
|
|
|
|
if (r)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-07-04 21:16:49 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
h->wsi = NULL;
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_backoff(h);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
2020-03-11 12:44:01 +00:00
|
|
|
case LWS_CALLBACK_CLIENT_HTTP_REDIRECT:
|
2020-12-27 16:05:48 +00:00
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return -1;
|
|
|
|
|
2020-03-11 12:44:01 +00:00
|
|
|
if (h->policy->u.http.fail_redirect)
|
|
|
|
lws_system_cpd_set(lws_get_context(wsi),
|
|
|
|
LWS_CPD_CAPTIVE_PORTAL);
|
2020-07-31 11:12:54 +01:00
|
|
|
/* unless it's explicitly allowed, reject to follow it */
|
|
|
|
return !(h->policy->flags & LWSSSPOLF_ALLOW_REDIRECTS);
|
2020-03-11 12:44:01 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
case LWS_CALLBACK_CLOSED_HTTP: /* server */
|
2020-02-29 12:37:24 +00:00
|
|
|
case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
|
|
|
|
if (!h)
|
|
|
|
break;
|
2020-06-30 16:42:37 +01:00
|
|
|
|
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_notice("%s: %s LWS_CALLBACK_CLOSED_CLIENT_HTTP\n",
|
|
|
|
__func__, wsi->lc.gutag);
|
2020-02-29 12:37:24 +00:00
|
|
|
h->wsi = NULL;
|
2020-08-26 11:05:41 +01:00
|
|
|
|
2020-11-10 11:27:28 +00:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
lws_dll2_remove(&h->cli_list);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->policy && !(h->policy->flags & LWSSSPOLF_OPPORTUNISTIC) &&
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
!(h->info.flags & LWSSSINFLAGS_ACCEPTED) && /* not server */
|
|
|
|
#endif
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
!h->txn_ok && !wsi->a.context->being_destroyed) {
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_backoff(h);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-08-26 11:05:41 +01:00
|
|
|
break;
|
2020-07-04 21:16:49 +01:00
|
|
|
} else
|
2020-06-19 18:25:56 +01:00
|
|
|
h->seqstate = SSSEQ_IDLE;
|
2020-06-01 07:33:37 +01:00
|
|
|
/* already disconnected, no action for DISCONNECT_ME */
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_DISCONNECTED);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP:
|
2020-12-27 16:05:48 +00:00
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
status = (int)lws_http_client_http_response(wsi);
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_info("%s: LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: %d\n", __func__, status);
|
|
|
|
// if (!status)
|
|
|
|
/* it's just telling use we connected / joined the nwsi */
|
|
|
|
// break;
|
2020-03-11 12:44:01 +00:00
|
|
|
|
2020-12-07 18:53:00 +00:00
|
|
|
if (status == HTTP_STATUS_SERVICE_UNAVAILABLE /* 503 */ ||
|
|
|
|
status == 429 /* Too many requests */) {
|
2020-09-17 12:43:31 +01:00
|
|
|
/*
|
|
|
|
* We understand this attempt failed, and that we should
|
|
|
|
* conceal this attempt. If there's a specified
|
|
|
|
* retry-after, we should use that if larger than our
|
|
|
|
* computed backoff
|
|
|
|
*/
|
|
|
|
|
|
|
|
inter = 0;
|
|
|
|
lws_http_check_retry_after(wsi, &inter);
|
|
|
|
|
|
|
|
r = _lws_ss_backoff(h, inter);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-09-17 12:43:31 +01:00
|
|
|
|
|
|
|
return -1; /* end this stream */
|
|
|
|
}
|
|
|
|
|
2020-03-11 12:44:01 +00:00
|
|
|
if (h->policy->u.http.resp_expect)
|
|
|
|
h->u.http.good_respcode =
|
|
|
|
status == h->policy->u.http.resp_expect;
|
|
|
|
else
|
|
|
|
h->u.http.good_respcode = (status >= 200 && status < 300);
|
2020-02-29 12:37:24 +00:00
|
|
|
// lwsl_err("%s: good resp %d %d\n", __func__, status, h->u.http.good_respcode);
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
if (lws_extract_metadata(h, wsi)) {
|
|
|
|
lwsl_info("%s: rx metadata extract failed\n", __func__);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
if (status) {
|
|
|
|
/*
|
|
|
|
* Check and see if it's something from the response
|
|
|
|
* map, if so, generate the requested status. If we're
|
|
|
|
* the proxy onward connection, metadata has priority
|
|
|
|
* over state updates on the serialization, so the
|
|
|
|
* state callback will see the right metadata.
|
|
|
|
*/
|
|
|
|
int n = lws_ss_http_resp_to_state(h, status);
|
|
|
|
if (n) {
|
2020-12-12 06:21:40 +00:00
|
|
|
r = lws_ss_event_helper(h, (lws_ss_constate_t)n);
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi,
|
ss: policy: response code mapping
This adds a per-streamtype JSON mapping table in the policy.
In addition to the previous flow, it lets you generate custom
SS state notifications for specific http response codes, eg:
"http_resp_map": [ { "530": 1530 }, { "531": 1531 } ],
It's not recommended to overload the transport-layer response
code with application layer responses. It's better to return
a 200 and then in the application protocol inside http, explain
what happened from the application perspective, usually with
JSON. But this is designed to let you handle existing systems
that do overload the transport layer response code.
SS states for user use start at LWSSSCS_USER_BASE, which is
1000.
You can do a basic test with minimal-secure-streams and --respmap
flag, this will go to httpbin.org and get a 404, and the warmcat.com
policy has the mapping for 404 -> LWSSSCS_USER_BASE (1000).
Since the mapping emits states, these are serialized and handled
like any other state in the proxy case.
The policy2c example / tool is also updated to handle the additional
mapping tables.
2020-09-15 09:11:06 +01:00
|
|
|
&h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->u.http.good_respcode)
|
|
|
|
lwsl_info("%s: Connected streamtype %s, %d\n", __func__,
|
|
|
|
h->policy->streamtype, status);
|
|
|
|
else
|
2020-07-31 10:44:09 +01:00
|
|
|
if (h->u.http.good_respcode)
|
|
|
|
lwsl_warn("%s: Connected streamtype %s, BAD %d\n",
|
|
|
|
__func__, h->policy->streamtype,
|
|
|
|
status);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
h->hanging_som = 0;
|
|
|
|
|
|
|
|
h->retry = 0;
|
|
|
|
h->seqstate = SSSEQ_CONNECTED;
|
2020-05-28 12:48:17 +01:00
|
|
|
lws_sul_cancel(&h->sul);
|
2020-07-31 10:44:09 +01:00
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since it's an http transaction we initiated... this is
|
|
|
|
* proof of connection validity
|
|
|
|
*/
|
|
|
|
lws_validity_confirmed(wsi);
|
|
|
|
|
2020-05-28 13:09:56 +01:00
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-07-29 20:36:19 +01:00
|
|
|
/*
|
2020-08-10 10:38:04 +01:00
|
|
|
* There are two ways we might want to deal with multipart,
|
|
|
|
* one is pass it through raw (although the user code needs
|
|
|
|
* a helping hand for learning the boundary), and the other
|
|
|
|
* is to deframe it and provide basically submessages in the
|
|
|
|
* different parts.
|
2020-07-29 20:36:19 +01:00
|
|
|
*/
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
if (lws_hdr_copy(wsi, (char *)buf, sizeof(buf),
|
|
|
|
WSI_TOKEN_HTTP_CONTENT_TYPE) > 0 &&
|
|
|
|
/* multipart/form-data;
|
|
|
|
* boundary=----WebKitFormBoundarycc7YgAPEIHvgE9Bf */
|
|
|
|
|
|
|
|
(!strncmp((char *)buf, "multipart/form-data", 19) ||
|
|
|
|
!strncmp((char *)buf, "multipart/related", 17))) {
|
|
|
|
struct lws_tokenize ts;
|
|
|
|
lws_tokenize_elem e;
|
|
|
|
|
|
|
|
// puts((const char *)buf);
|
|
|
|
|
|
|
|
memset(&ts, 0, sizeof(ts));
|
|
|
|
ts.start = (char *)buf;
|
|
|
|
ts.len = strlen(ts.start);
|
|
|
|
ts.flags = LWS_TOKENIZE_F_RFC7230_DELIMS |
|
|
|
|
LWS_TOKENIZE_F_SLASH_NONTERM |
|
|
|
|
LWS_TOKENIZE_F_MINUS_NONTERM;
|
|
|
|
|
|
|
|
h->u.http.boundary[0] = '\0';
|
|
|
|
do {
|
|
|
|
e = lws_tokenize(&ts);
|
|
|
|
if (e == LWS_TOKZE_TOKEN_NAME_EQUALS &&
|
|
|
|
!strncmp(ts.token, "boundary", 8) &&
|
|
|
|
ts.token_len == 8) {
|
|
|
|
e = lws_tokenize(&ts);
|
|
|
|
if (e != LWS_TOKZE_TOKEN)
|
|
|
|
goto malformed;
|
|
|
|
h->u.http.boundary[0] = '\x0d';
|
|
|
|
h->u.http.boundary[1] = '\x0a';
|
|
|
|
h->u.http.boundary[2] = '-';
|
|
|
|
h->u.http.boundary[3] = '-';
|
|
|
|
lws_strnncpy(h->u.http.boundary + 4,
|
|
|
|
ts.token, ts.token_len,
|
|
|
|
sizeof(h->u.http.boundary) - 4);
|
2020-04-06 20:25:06 +01:00
|
|
|
h->u.http.boundary_len =
|
|
|
|
(uint8_t)(ts.token_len + 4);
|
2020-02-29 12:37:24 +00:00
|
|
|
h->u.http.boundary_seq = 2;
|
|
|
|
h->u.http.boundary_dashes = 0;
|
|
|
|
}
|
|
|
|
} while (e > 0);
|
|
|
|
lwsl_info("%s: multipart boundary '%s' len %d\n", __func__,
|
|
|
|
h->u.http.boundary, h->u.http.boundary_len);
|
|
|
|
|
|
|
|
/* inform the ss that a related message group begins */
|
|
|
|
|
2020-08-10 10:38:04 +01:00
|
|
|
if ((h->policy->flags & LWSSSPOLF_HTTP_MULTIPART_IN) &&
|
|
|
|
h->u.http.boundary[0])
|
2020-02-29 12:37:24 +00:00
|
|
|
h->info.rx(ss_to_userobj(h), NULL, 0,
|
|
|
|
LWSSS_FLAG_RELATED_START);
|
|
|
|
|
|
|
|
// lws_header_table_detach(wsi, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
malformed:
|
|
|
|
lwsl_notice("%s: malformed multipart header\n", __func__);
|
|
|
|
return -1;
|
|
|
|
#else
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
|
2020-12-27 16:05:48 +00:00
|
|
|
if (!h)
|
|
|
|
return -1;
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->writeable_len)
|
|
|
|
wsi->http.writeable_len = h->writeable_len;
|
|
|
|
|
|
|
|
{
|
|
|
|
uint8_t **p = (uint8_t **)in, *end = (*p) + len,
|
|
|
|
*oin = *(uint8_t **)in;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* blob-based headers
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (m = 0; m < _LWSSS_HBI_COUNT; m++) {
|
2020-08-18 12:45:21 +01:00
|
|
|
lws_system_blob_t *ab;
|
2020-02-29 12:37:24 +00:00
|
|
|
int o = 0, n;
|
|
|
|
|
|
|
|
if (!h->policy->u.http.blob_header[m])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (m == LWSSS_HBI_AUTH &&
|
|
|
|
h->policy->u.http.auth_preamble)
|
|
|
|
o = lws_snprintf((char *)buf, sizeof(buf), "%s",
|
|
|
|
h->policy->u.http.auth_preamble);
|
|
|
|
|
|
|
|
if (o > (int)sizeof(buf) - 2)
|
|
|
|
return -1;
|
|
|
|
|
2020-08-18 12:45:21 +01:00
|
|
|
ab = lws_system_get_blob(wsi->a.context, blob_idx[m], 0);
|
|
|
|
if (!ab)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
buflen = sizeof(buf) - (unsigned int)o - 2u;
|
2020-08-18 12:45:21 +01:00
|
|
|
n = lws_system_blob_get(ab, buf + o, &buflen, 0);
|
2020-02-29 12:37:24 +00:00
|
|
|
if (n < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
buf[(unsigned int)o + buflen] = '\0';
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_debug("%s: adding blob %d: %s\n", __func__, m, buf);
|
|
|
|
|
|
|
|
if (lws_add_http_header_by_name(wsi,
|
2020-07-31 10:44:09 +01:00
|
|
|
(uint8_t *)h->policy->u.http.blob_header[m],
|
2020-12-12 06:21:40 +00:00
|
|
|
buf, (int)((int)buflen + o), p, end))
|
2020-02-29 12:37:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* metadata-based headers
|
|
|
|
*/
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (lws_apply_metadata(h, wsi, buf, p, end))
|
|
|
|
return -1;
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
(void)oin;
|
|
|
|
// if (*p != oin)
|
|
|
|
// lwsl_hexdump_notice(oin, lws_ptr_diff(*p, oin));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-31 10:44:09 +01:00
|
|
|
/*
|
|
|
|
* So when proxied, for POST we have to synthesize a CONNECTED
|
|
|
|
* state, so it can request a writeable and deliver the POST
|
|
|
|
* body
|
|
|
|
*/
|
|
|
|
if ((h->policy->protocol == LWSSSP_H1 ||
|
|
|
|
h->policy->protocol == LWSSSP_H2) &&
|
2020-08-10 14:22:26 +01:00
|
|
|
h->being_serialized && (
|
|
|
|
!strcmp(h->policy->u.http.method, "PUT") ||
|
2020-08-26 11:05:41 +01:00
|
|
|
!strcmp(h->policy->u.http.method, "POST"))) {
|
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
|
|
|
if (r)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-08-26 11:05:41 +01:00
|
|
|
}
|
2020-07-31 10:44:09 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* chunks of chunked content, with header removed */
|
2020-07-27 10:03:12 +01:00
|
|
|
case LWS_CALLBACK_HTTP_BODY:
|
2020-02-29 12:37:24 +00:00
|
|
|
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ:
|
|
|
|
lwsl_debug("%s: RECEIVE_CLIENT_HTTP_READ: read %d\n",
|
|
|
|
__func__, (int)len);
|
2020-06-01 07:17:48 +01:00
|
|
|
if (!h || !h->info.rx)
|
2020-02-29 12:37:24 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-05-28 13:09:56 +01:00
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-08-10 10:38:04 +01:00
|
|
|
if ((h->policy->flags & LWSSSPOLF_HTTP_MULTIPART_IN) &&
|
|
|
|
h->u.http.boundary[0])
|
2020-02-29 12:37:24 +00:00
|
|
|
return ss_http_multipart_parser(h, in, len);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!h->subseq) {
|
|
|
|
f |= LWSSS_FLAG_SOM;
|
|
|
|
h->hanging_som = 1;
|
|
|
|
h->subseq = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// lwsl_notice("%s: HTTP_READ: client side sent len %d fl 0x%x\n",
|
|
|
|
// __func__, (int)len, (int)f);
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = h->info.rx(ss_to_userobj(h), (const uint8_t *)in, len, f);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
return 0; /* don't passthru */
|
|
|
|
|
|
|
|
/* uninterpreted http content */
|
|
|
|
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
|
|
|
|
{
|
|
|
|
char *px = (char *)buf + LWS_PRE; /* guarantees LWS_PRE */
|
|
|
|
int lenx = sizeof(buf) - LWS_PRE;
|
|
|
|
|
ss: static policy: dynamic vhost instantiation
Presently a vh is allocated per trust store at policy parsing-time, this
is no problem on a linux-class device or if you decide you need a dynamic
policy for functionality reasons.
However if you're in a constrained enough situation that the static policy
makes sense, in the case your trust stores do not have 100% duty cycle, ie,
are anyway always in use, the currently-unused vhosts and their x.509 stack
are sitting there taking up heap for no immediate benefit.
This patch modifies behaviour in ..._STATIC_POLICY_ONLY so that vhosts and
associated x.509 tls contexts are not instantiated until a secure stream using
them is created; they are refcounted, and when the last logical secure
stream using a vhost is destroyed, the vhost and its tls context is also
destroyed.
If another ss connection is created that wants to use the trust store, the
vhost and x.509 context is regenerated again as needed.
Currently the refcounting is by ss, it's also possible to move the refcounting
to be by connection. The choice is between the delay to generate the vh
being visisble at logical ss creation-time, or at connection-time. It's anyway
not preferable to have ss instantiated and taking up space with no associated
connection or connection attempt underway.
NB you will need to reprocess any static policies after this patch so they
conform to the trust_store changes.
2020-07-20 07:28:28 +01:00
|
|
|
m = lws_http_client_read(wsi, &px, &lenx);
|
|
|
|
if (m < 0)
|
|
|
|
return m;
|
2020-02-29 12:37:24 +00:00
|
|
|
}
|
|
|
|
lws_set_timeout(wsi, 99, 30);
|
|
|
|
|
|
|
|
return 0; /* don't passthru */
|
|
|
|
|
|
|
|
case LWS_CALLBACK_COMPLETED_CLIENT_HTTP:
|
|
|
|
lwsl_debug("%s: LWS_CALLBACK_COMPLETED_CLIENT_HTTP\n", __func__);
|
2020-12-27 16:05:48 +00:00
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-27 08:41:54 +00:00
|
|
|
if (h->hanging_som) {
|
2020-02-29 12:37:24 +00:00
|
|
|
h->info.rx(ss_to_userobj(h), NULL, 0, LWSSS_FLAG_EOM);
|
2020-12-27 08:41:54 +00:00
|
|
|
h->hanging_som = 0;
|
|
|
|
}
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
wsi->http.writeable_len = h->writeable_len = 0;
|
2020-06-30 16:42:37 +01:00
|
|
|
lws_sul_cancel(&h->sul_timeout);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-07-03 08:57:24 +01:00
|
|
|
h->txn_ok = 1;
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, h->u.http.good_respcode ?
|
|
|
|
LWSSSCS_QOS_ACK_REMOTE :
|
|
|
|
LWSSSCS_QOS_NACK_REMOTE);
|
|
|
|
if (r != LWSSSSRET_OK)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-06-01 07:33:37 +01:00
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_cancel_service(lws_get_context(wsi)); /* abort poll wait */
|
|
|
|
break;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
case LWS_CALLBACK_HTTP_WRITEABLE:
|
2020-02-29 12:37:24 +00:00
|
|
|
case LWS_CALLBACK_CLIENT_HTTP_WRITEABLE:
|
2020-12-25 05:54:19 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
if (!h || !h->info.tx) {
|
2020-12-25 05:54:19 +00:00
|
|
|
lwsl_notice("%s: %s no handle / tx\n", __func__, h->lc.gutag);
|
2020-07-27 10:03:12 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (h->txn_resp_pending) {
|
|
|
|
/*
|
|
|
|
* If we're going to start sending something, we need to
|
|
|
|
* to take care of the http response header for it first
|
|
|
|
*/
|
|
|
|
h->txn_resp_pending = 0;
|
|
|
|
|
|
|
|
if (lws_add_http_common_headers(wsi,
|
2020-12-12 06:21:40 +00:00
|
|
|
(unsigned int)(h->txn_resp_set ?
|
2020-07-27 10:03:12 +01:00
|
|
|
(h->txn_resp ? h->txn_resp : 200) :
|
2020-12-12 06:21:40 +00:00
|
|
|
HTTP_STATUS_NOT_FOUND),
|
2020-07-27 10:03:12 +01:00
|
|
|
NULL, h->wsi->http.writeable_len,
|
|
|
|
&p, end))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* metadata-based headers
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (lws_apply_metadata(h, wsi, buf, &p, end))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (lws_finalize_write_http_header(wsi, start, &p, end))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* write the body separately */
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
return 0;
|
2020-07-27 10:03:12 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (
|
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
!(h->info.flags & LWSSSINFLAGS_ACCEPTED) && /* not accepted */
|
|
|
|
#endif
|
|
|
|
!h->rideshare)
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
h->rideshare = h->policy;
|
|
|
|
|
2020-05-28 13:09:56 +01:00
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-07-27 10:03:12 +01:00
|
|
|
if (
|
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
!(h->info.flags & LWSSSINFLAGS_ACCEPTED) && /* not accepted */
|
|
|
|
#endif
|
|
|
|
!h->inside_msg && h->rideshare->u.http.multipart_name)
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_client_http_multipart(wsi,
|
|
|
|
h->rideshare->u.http.multipart_name,
|
|
|
|
h->rideshare->u.http.multipart_filename,
|
|
|
|
h->rideshare->u.http.multipart_content_type,
|
|
|
|
(char **)&p, (char *)end);
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
buflen = lws_ptr_diff_size_t(end, p);
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->policy->u.http.multipart_name)
|
|
|
|
buflen -= 24; /* allow space for end of multipart */
|
2020-07-14 11:50:04 +01:00
|
|
|
#else
|
2020-12-12 06:21:40 +00:00
|
|
|
buflen = lws_ptr_diff_size_t(end, p);
|
2020-02-29 12:37:24 +00:00
|
|
|
#endif
|
2020-12-12 06:21:40 +00:00
|
|
|
r = h->info.tx(ss_to_userobj(h), h->txord++, p, &buflen, &f);
|
2020-08-26 11:05:41 +01:00
|
|
|
if (r == LWSSSSRET_TX_DONT_SEND)
|
2020-02-29 12:37:24 +00:00
|
|
|
return 0;
|
2020-08-26 11:05:41 +01:00
|
|
|
if (r < 0)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
// lwsl_notice("%s: WRITEABLE: user tx says len %d fl 0x%x\n",
|
|
|
|
// __func__, (int)buflen, (int)f);
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
p += buflen;
|
|
|
|
|
|
|
|
if (f & LWSSS_FLAG_EOM) {
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (!(h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
|
|
|
|
#endif
|
2020-05-28 13:09:56 +01:00
|
|
|
conceal_eom = 1;
|
2020-02-29 12:37:24 +00:00
|
|
|
/* end of rideshares */
|
|
|
|
if (!h->rideshare->rideshare_streamtype) {
|
|
|
|
lws_client_http_body_pending(wsi, 0);
|
2020-06-26 10:35:06 +01:00
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->rideshare->u.http.multipart_name)
|
|
|
|
lws_client_http_multipart(wsi, NULL, NULL, NULL,
|
|
|
|
(char **)&p, (char *)end);
|
2020-05-28 13:09:56 +01:00
|
|
|
conceal_eom = 0;
|
2020-02-29 12:37:24 +00:00
|
|
|
#endif
|
2020-06-26 10:35:06 +01:00
|
|
|
} else {
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
h->rideshare = lws_ss_policy_lookup(wsi->a.context,
|
2020-02-29 12:37:24 +00:00
|
|
|
h->rideshare->rideshare_streamtype);
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
}
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
}
|
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
|
|
|
|
h->inside_msg = 0;
|
|
|
|
} else {
|
|
|
|
/* otherwise we can spin with zero length writes */
|
|
|
|
if (!f && !lws_ptr_diff(p, buf + LWS_PRE))
|
|
|
|
break;
|
|
|
|
h->inside_msg = 1;
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_info("%s: lws_write %d %d\n", __func__,
|
|
|
|
lws_ptr_diff(p, buf + LWS_PRE), f);
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
if (lws_write(wsi, buf + LWS_PRE, lws_ptr_diff_size_t(p, buf + LWS_PRE),
|
2020-05-28 13:09:56 +01:00
|
|
|
(!conceal_eom && (f & LWSSS_FLAG_EOM)) ?
|
|
|
|
LWS_WRITE_HTTP_FINAL : LWS_WRITE_HTTP) !=
|
2020-05-11 15:08:52 +01:00
|
|
|
(int)lws_ptr_diff(p, buf + LWS_PRE)) {
|
2020-02-29 12:37:24 +00:00
|
|
|
lwsl_err("%s: write failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
if (!(h->info.flags & LWSSSINFLAGS_ACCEPTED) &&
|
|
|
|
(f & LWSSS_FLAG_EOM) &&
|
|
|
|
lws_http_transaction_completed(wsi))
|
|
|
|
return -1;
|
|
|
|
#else
|
2020-02-29 12:37:24 +00:00
|
|
|
lws_set_timeout(wsi, 0, 0);
|
2020-07-27 10:03:12 +01:00
|
|
|
#endif
|
2020-02-29 12:37:24 +00:00
|
|
|
break;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
|
|
|
case LWS_CALLBACK_HTTP:
|
|
|
|
|
2020-12-27 16:05:48 +00:00
|
|
|
if (!h)
|
|
|
|
return -1;
|
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
lwsl_notice("%s: LWS_CALLBACK_HTTP\n", __func__);
|
|
|
|
{
|
|
|
|
|
|
|
|
h->txn_resp_set = 0;
|
|
|
|
h->txn_resp_pending = 1;
|
|
|
|
h->writeable_len = 0;
|
|
|
|
|
|
|
|
#if defined(LWS_ROLE_H2)
|
|
|
|
m = lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_COLON_METHOD);
|
|
|
|
if (m) {
|
|
|
|
lws_ss_set_metadata(h, "method",
|
|
|
|
lws_hdr_simple_ptr(wsi,
|
2020-12-12 06:21:40 +00:00
|
|
|
WSI_TOKEN_HTTP_COLON_METHOD), (unsigned int)m);
|
2020-07-27 10:03:12 +01:00
|
|
|
m = lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_COLON_PATH);
|
|
|
|
lws_ss_set_metadata(h, "path",
|
|
|
|
lws_hdr_simple_ptr(wsi,
|
2020-12-12 06:21:40 +00:00
|
|
|
WSI_TOKEN_HTTP_COLON_PATH), (unsigned int)m);
|
2020-07-27 10:03:12 +01:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
m = lws_hdr_total_length(wsi, WSI_TOKEN_GET_URI);
|
|
|
|
if (m) {
|
|
|
|
lws_ss_set_metadata(h, "path",
|
|
|
|
lws_hdr_simple_ptr(wsi,
|
2020-12-12 06:21:40 +00:00
|
|
|
WSI_TOKEN_GET_URI), (unsigned int)m);
|
2020-07-27 10:03:12 +01:00
|
|
|
lws_ss_set_metadata(h, "method", "GET", 3);
|
|
|
|
} else {
|
|
|
|
m = lws_hdr_total_length(wsi, WSI_TOKEN_POST_URI);
|
|
|
|
if (m) {
|
|
|
|
lws_ss_set_metadata(h, "path",
|
|
|
|
lws_hdr_simple_ptr(wsi,
|
2020-12-12 06:21:40 +00:00
|
|
|
WSI_TOKEN_POST_URI), (unsigned int)m);
|
2020-07-27 10:03:12 +01:00
|
|
|
lws_ss_set_metadata(h, "method", "POST", 4);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:05:41 +01:00
|
|
|
r = lws_ss_event_helper(h, LWSSSCS_SERVER_TXN);
|
|
|
|
if (r)
|
2020-12-22 15:56:41 +00:00
|
|
|
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
|
2020-07-27 10:03:12 +01:00
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lws_callback_http_dummy(wsi, reason, user, in, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct lws_protocols protocol_secstream_h1 = {
|
|
|
|
"lws-secstream-h1",
|
|
|
|
secstream_h1,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Munge connect info according to protocol-specific considerations... this
|
|
|
|
* usually means interpreting aux in a protocol-specific way and using the
|
|
|
|
* pieces at connection setup time, eg, http url pieces.
|
|
|
|
*
|
|
|
|
* len bytes of buf can be used for things with scope until after the actual
|
|
|
|
* connect.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
secstream_connect_munge_h1(lws_ss_handle_t *h, char *buf, size_t len,
|
|
|
|
struct lws_client_connect_info *i,
|
|
|
|
union lws_ss_contemp *ct)
|
|
|
|
{
|
2020-03-14 06:56:41 +00:00
|
|
|
const char *pbasis = h->policy->u.http.url;
|
2020-02-29 12:37:24 +00:00
|
|
|
size_t used_in, used_out;
|
|
|
|
lws_strexp_t exp;
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
/* i.path on entry is used to override the policy urlpath if not "" */
|
|
|
|
|
|
|
|
if (i->path[0])
|
|
|
|
pbasis = i->path;
|
|
|
|
|
|
|
|
if (!pbasis)
|
2020-02-29 12:37:24 +00:00
|
|
|
return 0;
|
|
|
|
|
ss: rx metadata
At the moment you can define and set per-stream metadata at the client,
which will be string-substituted and if configured in the policy, set in
related outgoing protocol specific content like h1 headers.
This patch extends the metadata concept to also check incoming protocol-
specific content like h1 headers and where it matches the binding in the
streamtype's metadata entry, make it available to the client by name, via
a new lws_ss_get_metadata() api.
Currently warmcat.com has additional headers for
server: lwsws (well-known header name)
test-custom-header: hello (custom header name)
minimal-secure-streams test is updated to try to recover these both
in direct and -client (via proxy) versions. The corresponding metadata
part of the "mintest" stream policy from warmcat.com is
{
"srv": "server:"
}, {
"test": "test-custom-header:"
},
If built direct, or at the proxy, the stream has access to the static
policy metadata definitions and can store the rx metadata in the stream
metadata allocation, with heap-allocated a value. For client side that
talks to a proxy, only the proxy knows the policy, and it returns rx
metadata inside the serialized link to the client, which stores it on
the heap attached to the stream.
In addition an optimization for mapping static policy metadata definitions
to individual stream handle metadata is changed to match by name.
2020-09-10 06:43:43 +01:00
|
|
|
/* uncomment to force h1 */
|
|
|
|
// i->alpn = "http/1.1";
|
|
|
|
|
2020-05-28 13:09:56 +01:00
|
|
|
#if defined(LWS_WITH_SS_RIDESHARE)
|
2020-02-29 12:37:24 +00:00
|
|
|
if (h->policy->flags & LWSSSPOLF_HTTP_MULTIPART)
|
|
|
|
i->ssl_connection |= LCCSCF_HTTP_MULTIPART_MIME;
|
|
|
|
|
|
|
|
if (h->policy->flags & LWSSSPOLF_HTTP_X_WWW_FORM_URLENCODED)
|
|
|
|
i->ssl_connection |= LCCSCF_HTTP_X_WWW_FORM_URLENCODED;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* protocol aux is the path part */
|
|
|
|
|
|
|
|
i->path = buf;
|
2020-12-15 17:53:38 -08:00
|
|
|
|
|
|
|
/* skip the unnessary '/' */
|
|
|
|
if (*pbasis == '/')
|
|
|
|
pbasis = pbasis + 1;
|
|
|
|
|
2020-02-29 12:37:24 +00:00
|
|
|
buf[0] = '/';
|
|
|
|
|
|
|
|
lws_strexp_init(&exp, (void *)h, lws_ss_exp_cb_metadata, buf + 1, len - 1);
|
|
|
|
|
2020-03-14 06:56:41 +00:00
|
|
|
if (lws_strexp_expand(&exp, pbasis, strlen(pbasis),
|
2020-02-29 12:37:24 +00:00
|
|
|
&used_in, &used_out) != LSTRX_DONE)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const struct ss_pcols ss_pcol_h1 = {
|
|
|
|
"h1",
|
|
|
|
"http/1.1",
|
2020-07-27 10:03:12 +01:00
|
|
|
&protocol_secstream_h1,
|
2020-02-29 12:37:24 +00:00
|
|
|
secstream_connect_munge_h1,
|
|
|
|
NULL
|
|
|
|
};
|