1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

ss: sspc: add conmon performance telemetry

This provides a way to get ahold of LWS_WITH_CONMON telemetry from Secure
Streams, it works the same with direct onward connections or via the proxy.

You can mark streamtypes with a "perf": true policy attribute... this
causes the onward connections on those streamtypes to collect information
about the connection performance, and the unsorted DNS results.

Streams with that policy attribute receive extra data in their rx callback,
with the LWSSS_FLAG_PERF_JSON flag set on it, containing JSON describing the
performance of the onward connection taken from CONMON data, in a JSON
representation.  Streams without the "perf" attribute set never receive
this extra rx.

The received JSON is based on the CONMON struct info and looks like

{"peer":"46.105.127.147","dns_us":596,"sockconn_us":31382,"tls_us":28180,"txn_resp_us:23015,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}

A new minimal example minimal-secure-streams-perf is added that collects
this data on an HTTP GET from warmcat.com, and is built with a -client
version as well if LWS_WITH_SECURE_STREAMS_PROXY_API is set, that operates
via the ss proxy and produces the same result at the client.
This commit is contained in:
Andy Green 2021-03-31 13:20:34 +01:00
parent 51490ae6e6
commit d291c02a23
20 changed files with 1069 additions and 13 deletions

View file

@ -38,6 +38,17 @@ typedef uint32_t lws_conmon_interval_us_t;
* Connection latency information... note that not all wsi actually make
* connections, for example h2 streams after the initial one will have 0
* for everything except ciu_txn_resp.
*
* If represented in JSON, it should look like this
*
* {
* "peer": "46.105.127.147",
* "dns_us": 1234,
* "sockconn_us": 1234,
* "tls_us": 1234,
* "txn_resp_us": 1234,
* "dns":["46.105.127.147", "2001:41d0:2:ee93::1"]
* }
*/
struct lws_conmon {

View file

@ -156,6 +156,8 @@ enum {
/**< stream requires high reliability */
LWSSSPOLF_ATTR_LOW_COST = (1 << 21),
/**< stream is not critical and should be handled as cheap as poss */
LWSSSPOLF_PERF = (1 << 22),
/**< capture and report performace information */
};
typedef struct lws_ss_trust_store {

View file

@ -151,6 +151,11 @@
* - 3: 1 byte state index if state < 256, else 4-byte MSB-first state index
* - 4 or 7: 4-byte MSB-first ordinal
*
* - Proxied performance information
*
* - 0: LWSSS_SER_RXPRE_PERF
* - 1: 2-byte MSB-first rest-of-frame length
* - 3: ... performance JSON (for rest of packet)
*
* Proxied tx may be read by the proxy but rejected due to lack of buffer space
* at the proxy. For that reason, tx must be held at the sender until it has
@ -236,6 +241,9 @@ enum {
LWSSS_FLAG_RIDESHARE = (1 << 5),
/* Serialized payload starts with non-default rideshare name length and
* name string without NUL, then payload */
LWSSS_FLAG_PERF_JSON = (1 << 6),
/* This RX is JSON performance data, only on streams with "perf" flag
* set */
/*
* In the case the secure stream is proxied across a process or thread
@ -255,6 +263,7 @@ enum {
LWSSS_SER_RXPRE_TXCR_UPDATE,
LWSSS_SER_RXPRE_METADATA,
LWSSS_SER_RXPRE_TLSNEG_ENCLAVE_SIGN,
LWSSS_SER_RXPRE_PERF,
/* tx (send by client) prepends for proxied connections */

View file

@ -138,6 +138,7 @@ lws_conmon_wsi_take(struct lws *wsi, struct lws_conmon *dest)
/* wsi no longer has to free it... */
wsi->conmon.dns_results_copy = NULL;
wsi->perf_done = 1;
}
void

View file

@ -835,6 +835,7 @@ struct lws {
/**< because the client connection creation api is still the parent of
* this activity, and will report the failure */
unsigned int tls_session_reused:1;
unsigned int perf_done:1;
#endif
#ifdef _WIN32

View file

@ -320,9 +320,13 @@ static const char * const opts_str =
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
"SSPROX "
#endif
#if defined(LWS_WITH_MBEDTLS)
"MbedTLS "
#endif
#if defined(LWS_WITH_CONMON)
"ConMon "
#endif
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
"FLTINJ "
#endif

View file

@ -337,6 +337,30 @@ If user code applies the `lws_ss_start_timeout()` api on a stream with a
timeout of LWSSS_TIMEOUT_FROM_POLICY, the `timeout_ms` entry given in the
policy is applied.
### `perf`
If set to true, and lws was built with `LWS_WITH_CONMON`, causes this streamtype
to receive additional rx payload with the `LWSSS_FLAG_PERF_JSON` flag set on it,
that is JSON representing the onward connection performance information.
These are based on the information captured in the struct defined in
libwebsockets/lws-conmon.h, represented in JSON
```
{
"peer": "46.105.127.147",
"dns_us": 1234,
"sockconn_us": 1234,
"tls_us": 1234,
"txn_resp_us": 1234,
"dns":["46.105.127.147", "2001:41d0:2:ee93::1"]
}
```
Streamtypes without "perf": true will never see the special rx payloads.
Notice that the `LWSSS_FLAG_PERF_JSON` payloads must be handled out of band
for the normal payloads, as they can appear inside normal payload messages.
### `tls_trust_store`
The name of the trust store described in the `trust_stores` section to apply

View file

@ -66,6 +66,7 @@ static const char * const lejp_tokens_policy[] = {
"s[].*.long_poll",
"s[].*.retry",
"s[].*.timeout_ms",
"s[].*.perf",
"s[].*.tls_trust_store",
"s[].*.proxy_buflen",
"s[].*.proxy_buflen_rxflow_on_above",
@ -166,6 +167,7 @@ typedef enum {
LSSPPT_LONG_POLL,
LSSPPT_RETRYPTR,
LSSPPT_DEFAULT_TIMEOUT_MS,
LSSPPT_PERF,
LSSPPT_TRUST,
LSSPPT_PROXY_BUFLEN,
LSSPPT_PROXY_BUFLEN_RXFLOW_ON_ABOVE,
@ -796,6 +798,11 @@ lws_ss_policy_parser_cb(struct lejp_ctx *ctx, char reason)
a->curr[LTY_POLICY].p->flags |= LWSSSPOLF_ATTR_LOW_COST;
return 0;
case LSSPPT_PERF:
if (reason == LEJPCB_VAL_TRUE)
a->curr[LTY_POLICY].p->flags |= LWSSSPOLF_PERF;
return 0;
case LSSPPT_RETRYPTR:
bot = a->heads[LTY_BACKOFF].b;
while (bot) {

View file

@ -78,6 +78,10 @@ typedef struct lws_ss_handle {
lws_ss_metadata_t *metadata;
const lws_ss_policy_t *rideshare;
#if defined(LWS_WITH_CONMON)
char *conmon_json;
#endif
//struct lws_ss_handle *h_sink; /**< sink we are bound to, or NULL */
//void *sink_obj;/**< sink's private object representing us */
@ -159,6 +163,9 @@ typedef struct lws_ss_handle {
#endif
uint16_t retry; /**< retry / backoff tracking */
#if defined(LWS_WITH_CONMON)
uint16_t conmon_len;
#endif
int16_t temp16;
uint8_t tsi; /**< service thread idx, usually 0 */
@ -534,8 +541,11 @@ struct ss_pcols {
* The last one of the accepted side and the onward side to close frees it.
*/
lws_ss_state_return_t
lws_conmon_ss_json(lws_ss_handle_t *h);
void
ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward);
struct conn {
struct lws_ss_serialization_parser parser;

View file

@ -416,6 +416,9 @@ secstream_h1(struct lws *wsi, enum lws_callback_reasons reason, void *user,
break;
}
assert(h->policy);
lws_conmon_ss_json(h);
lws_metrics_caliper_report_hist(h->cal_txn, wsi);
lwsl_info("%s: %s CLIENT_CONNECTION_ERROR: %s\n", __func__,
h->lc.gutag, in ? (const char *)in : "none");
@ -448,6 +451,8 @@ secstream_h1(struct lws *wsi, enum lws_callback_reasons reason, void *user,
lws_sul_cancel(&h->sul_timeout);
lws_conmon_ss_json(h);
lws_metrics_caliper_report_hist(h->cal_txn, wsi);
//lwsl_notice("%s: %s LWS_CALLBACK_CLOSED_CLIENT_HTTP\n",
// __func__, wsi->lc.gutag);
@ -486,6 +491,8 @@ secstream_h1(struct lws *wsi, enum lws_callback_reasons reason, void *user,
if (!h)
return -1;
lws_conmon_ss_json(h);
status = (int)lws_http_client_http_response(wsi);
lwsl_info("%s: LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: %d\n", __func__, status);
// if (!status)

View file

@ -43,6 +43,9 @@ secstream_mqtt(struct lws *wsi, enum lws_callback_reasons reason, void *user,
in ? (char *)in : "(null)");
if (!h)
break;
lws_conmon_ss_json(h);
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
h->wsi = NULL;
@ -64,6 +67,8 @@ secstream_mqtt(struct lws *wsi, enum lws_callback_reasons reason, void *user,
if (!h)
break;
lws_sul_cancel(&h->sul_timeout);
lws_conmon_ss_json(h);
r= lws_ss_event_helper(h, LWSSSCS_DISCONNECTED);
if (h->wsi)
lws_set_opaque_user_data(h->wsi, NULL);

View file

@ -47,6 +47,9 @@ secstream_raw(struct lws *wsi, enum lws_callback_reasons reason, void *user,
assert(h->policy);
lwsl_info("%s: %s, %s CLIENT_CONNECTION_ERROR: %s\n", __func__,
lws_ss_tag(h), h->policy->streamtype, in ? (char *)in : "(null)");
lws_conmon_ss_json(h);
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
if (r == LWSSSSRET_DESTROY_ME)
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
@ -60,6 +63,9 @@ secstream_raw(struct lws *wsi, enum lws_callback_reasons reason, void *user,
if (!h)
break;
lws_sul_cancel(&h->sul_timeout);
lws_conmon_ss_json(h);
lwsl_info("%s: %s, %s RAW_CLOSE\n", __func__, lws_ss_tag(h),
h->policy ? h->policy->streamtype : "no policy");
h->wsi = NULL;

View file

@ -45,6 +45,9 @@ secstream_ws(struct lws *wsi, enum lws_callback_reasons reason, void *user,
in ? (char *)in : "(null)");
if (!h)
break;
lws_conmon_ss_json(h);
r = lws_ss_event_helper(h, LWSSSCS_UNREACHABLE);
if (r == LWSSSSRET_DESTROY_ME)
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);
@ -60,6 +63,9 @@ secstream_ws(struct lws *wsi, enum lws_callback_reasons reason, void *user,
if (!h)
break;
lws_sul_cancel(&h->sul_timeout);
lws_conmon_ss_json(h);
r = lws_ss_event_helper(h, LWSSSCS_DISCONNECTED);
if (r == LWSSSSRET_DESTROY_ME)
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);

View file

@ -80,6 +80,16 @@ lws_proxy_clean_conn_ss(struct lws *wsi)
#endif
}
void
ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward)
{
ss_proxy_t *m = (ss_proxy_t *)&h_onward[1];
if (m->conn->wsi) /* if possible, request client conn write */
lws_callback_on_writable(m->conn->wsi);
}
int
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
{
@ -315,7 +325,7 @@ callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
lws_ss_metadata_t *md;
lws_ss_info_t ssi;
const uint8_t *cp;
char s[256];
char s[512];
uint8_t *p;
size_t si;
char pay;
@ -550,7 +560,7 @@ callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
/*
* returning [onward -> ] proxy]-> client
* rx metadata has priority
* rx metadata has priority 1
*/
md = conn->ss->metadata;
@ -586,6 +596,30 @@ callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
md = md->next;
}
/*
* If we have performance data, render it in JSON
* and send that in LWSSS_SER_RXPRE_PERF has
* priority 2
*/
if (conn->ss->conmon_json) {
unsigned int xlen = conn->ss->conmon_len;
if (xlen > sizeof(s) - 3)
xlen = sizeof(s) - 3;
cp = (uint8_t *)s;
p = (uint8_t *)s;
p[0] = LWSSS_SER_RXPRE_PERF;
lws_ser_wu16be(&p[1], (uint16_t)xlen);
memcpy(&p[3], conn->ss->conmon_json, xlen);
lws_free_set_NULL(conn->ss->conmon_json);
n = (int)(xlen + 3);
pay = 0;
goto again;
}
/*
* if no fresh rx metadata, just pass through incoming
* dsh

View file

@ -57,6 +57,8 @@ typedef enum {
RPAR_RIDESHARE_LEN,
RPAR_RIDESHARE,
RPAR_PERF,
RPAR_RESULT_CREATION_DSH,
RPAR_RESULT_CREATION_RIDESHARE,
@ -532,6 +534,13 @@ lws_ss_deserialize_parse(struct lws_ss_serialization_parser *par,
par->ps = RPAR_RX_TXCR_UPDATE;
break;
case LWSSS_SER_RXPRE_PERF:
par->ctr = 0;
if (!par->rem)
goto hangup;
par->ps = RPAR_PERF;
break;
default:
lwsl_notice("%s: bad type 0x%x\n", __func__,
par->type);
@ -602,6 +611,44 @@ lws_ss_deserialize_parse(struct lws_ss_serialization_parser *par,
goto hangup;
break;
case RPAR_PERF:
n = (int)len + 1;
if (n > par->rem)
n = par->rem;
if (client &&
client_pss_to_sspc_h(pss, ssi) &&
ssi->rx) {
int ret;
/* we still have an sspc handle */
ret = ssi->rx(client_pss_to_userdata(pss),
(uint8_t *)cp, (unsigned int)n,
(int)(LWSSS_FLAG_SOM | LWSSS_FLAG_EOM |
LWSSS_FLAG_PERF_JSON));
if (lws_fi(&client_pss_to_sspc_h(pss, ssi)->fic,
"sspc_perf_rx_fake_destroy_me"))
ret = LWSSSSRET_DESTROY_ME;
switch (ret) {
case LWSSSSRET_OK:
break;
case LWSSSSRET_DISCONNECT_ME:
goto hangup;
case LWSSSSRET_DESTROY_ME:
return LWSSSSRET_DESTROY_ME;
}
}
if (n) {
cp += n;
par->rem = (uint16_t)(par->rem - (uint16_t)(unsigned int)n);
len = (len + 1) - (unsigned int)n;
}
if (!par->rem)
par->ps = RPAR_TYPE;
break;
case RPAR_RIDESHARE:
par->rideshare[par->ctr++] = (char)*cp++;
if (!par->rem--)

View file

@ -175,6 +175,99 @@ static const uint32_t ss_state_txn_validity[] = {
(1 << LWSSSCS_DESTROYING),
};
#if defined(LWS_WITH_CONMON)
/*
* Convert any conmon data to JSON and attach to the ss handle.
*/
lws_ss_state_return_t
lws_conmon_ss_json(lws_ss_handle_t *h)
{
char ads[48], *end, *buf, *obuf;
const struct addrinfo *ai;
lws_ss_state_return_t ret = LWSSSSRET_OK;
struct lws_conmon cm;
size_t len = 500;
if (!h->policy || !(h->policy->flags & LWSSSPOLF_PERF) || !h->wsi ||
h->wsi->perf_done)
return LWSSSSRET_OK;
if (h->conmon_json)
lws_free_set_NULL(h->conmon_json);
h->conmon_json = lws_malloc(len, __func__);
if (!h->conmon_json)
return LWSSSSRET_OK;
obuf = buf = h->conmon_json;
end = buf + len - 1;
lws_conmon_wsi_take(h->wsi, &cm);
lws_sa46_write_numeric_address(&cm.peer46, ads, sizeof(ads));
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
"{\"peer\":\"%s\","
"\"dns_us\":%u,"
"\"sockconn_us\":%u,"
"\"tls_us\":%u,"
"\"txn_resp_us:%u,"
"\"dns\":[",
ads,
(unsigned int)cm.ciu_dns,
(unsigned int)cm.ciu_sockconn,
(unsigned int)cm.ciu_tls,
(unsigned int)cm.ciu_txn_resp);
ai = cm.dns_results_copy;
while (ai) {
lws_sa46_write_numeric_address((lws_sockaddr46 *)ai->ai_addr, ads, sizeof(ads));
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "\"%s\"", ads);
if (ai->ai_next && buf < end - 2)
*buf++ = ',';
ai = ai->ai_next;
}
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "]}");
/*
* This destroys the DNS list in the lws_conmon that we took
* responsibility for when we used lws_conmon_wsi_take()
*/
lws_conmon_release(&cm);
h->conmon_len = (uint16_t)lws_ptr_diff(buf, obuf);
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
if (h->proxy_onward) {
/*
* ask to forward it on the proxy link
*/
ss_proxy_onward_link_req_writeable(h);
return LWSSSSRET_OK;
}
#endif
/*
* We can deliver it directly
*/
if (h->info.rx)
ret = h->info.rx(h, (uint8_t *)h->conmon_json,
(unsigned int)h->conmon_len,
(int)(LWSSS_FLAG_SOM | LWSSS_FLAG_EOM |
LWSSS_FLAG_PERF_JSON));
lws_free_set_NULL(h->conmon_json);
return ret;
}
#endif
int
lws_ss_check_next_state(lws_lifecycle_t *lc, uint8_t *prevstate,
lws_ss_constate_t cs)
@ -529,7 +622,7 @@ _lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw)
if (lws_ss_event_helper(h, LWSSSCS_CONNECTING))
return LWSSSSRET_TX_DONT_SEND;
// lwsl_err("%s: registered SS SMD\n", __func__);
if (lws_ss_event_helper(h, LWSSSCS_CONNECTED))
return LWSSSSRET_TX_DONT_SEND;
return LWSSSSRET_OK;
@ -607,14 +700,16 @@ _lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw)
/* translate policy attributes to IP ToS flags */
if (h->policy->flags & LCCSCF_IP_LOW_LATENCY)
i.ssl_connection |= LWSSSPOLF_ATTR_LOW_LATENCY;
if (h->policy->flags & LCCSCF_IP_HIGH_THROUGHPUT)
i.ssl_connection |= LWSSSPOLF_ATTR_HIGH_THROUGHPUT;
if (h->policy->flags & LCCSCF_IP_HIGH_RELIABILITY)
i.ssl_connection |= LWSSSPOLF_ATTR_HIGH_RELIABILITY;
if (h->policy->flags & LCCSCF_IP_LOW_COST)
i.ssl_connection |= LWSSSPOLF_ATTR_LOW_COST;
if (h->policy->flags & LWSSSPOLF_ATTR_LOW_LATENCY)
i.ssl_connection |= LCCSCF_IP_LOW_LATENCY;
if (h->policy->flags & LWSSSPOLF_ATTR_HIGH_THROUGHPUT)
i.ssl_connection |= LCCSCF_IP_HIGH_THROUGHPUT;
if (h->policy->flags & LWSSSPOLF_ATTR_HIGH_RELIABILITY)
i.ssl_connection |= LCCSCF_IP_HIGH_RELIABILITY;
if (h->policy->flags & LWSSSPOLF_ATTR_LOW_COST)
i.ssl_connection |= LCCSCF_IP_LOW_COST;
if (h->policy->flags & LWSSSPOLF_PERF) /* collect conmon stats on this */
i.ssl_connection |= LCCSCF_CONMON;
/* mark the connection with the streamtype priority from the policy */
@ -1136,6 +1231,11 @@ lws_ss_destroy(lws_ss_handle_t **ppss)
}
h->destroying = 1;
#if defined(LWS_WITH_CONMON)
if (h->conmon_json)
lws_free_set_NULL(h->conmon_json);
#endif
if (h->wsi) {
/*
* Don't let the wsi point to us any more,

View file

@ -0,0 +1,133 @@
project(lws-minimal-secure-streams-perf C)
cmake_minimum_required(VERSION 2.8.12)
find_package(libwebsockets CONFIG REQUIRED)
list(APPEND CMAKE_MODULE_PATH ${LWS_CMAKE_DIR})
include(CheckCSourceCompiles)
include(LwsCheckRequirements)
set(SAMP lws-minimal-secure-streams-perf)
set(requirements 1)
require_lws_config(LWS_ROLE_H1 1 requirements)
require_lws_config(LWS_WITHOUT_CLIENT 0 requirements)
require_lws_config(LWS_WITH_SECURE_STREAMS 1 requirements)
require_lws_config(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY 0 requirements)
require_lws_config(LWS_WITH_SYS_STATE 1 requirements)
if (requirements)
add_executable(${SAMP} minimal-secure-streams.c)
find_program(VALGRIND "valgrind")
if (LWS_CTEST_INTERNET_AVAILABLE AND NOT WIN32)
#
# When running in CI, wait for a lease on the resources
# before starting this test, so the server does not get
# thousands of simultaneous tls connection attempts
#
# sai-resource holds the lease on the resources until
# the time given in seconds or the sai-resource instance
# exits, whichever happens first
#
# If running under Sai, creates a lock test called "res_sspcmin"
#
sai_resource(warmcat_conns 1 40 ssperfpcmin)
#
# simple test not via proxy
#
if (VALGRIND)
message("testing via valgrind")
add_test(NAME ssperf-warmcat COMMAND
${VALGRIND} --tool=memcheck --leak-check=yes --num-callers=20
$<TARGET_FILE:lws-minimal-secure-streams>)
else()
add_test(NAME ssperf-warmcat COMMAND lws-minimal-secure-streams)
endif()
set_tests_properties(ssperf-warmcat
PROPERTIES
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/minimal-examples/secure-streams/minimal-secure-streams
TIMEOUT 20)
if (DEFINED ENV{SAI_OVN})
set_tests_properties(ssperf-warmcat PROPERTIES FIXTURES_REQUIRED "res_ssperfpcmin")
endif()
if (HAS_LWS_WITH_SECURE_STREAMS_PROXY_API OR LWS_WITH_SECURE_STREAMS_PROXY_API)
#
# Define test dep to bring up and take down the test
# proxy
#
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# uds abstract namespace for linux
set(CTEST_SOCKET_PATH "@ctest-ssperfp-$ENV{SAI_PROJECT}-$ENV{SAI_OVN}")
else()
# filesystem socket for others
set(CTEST_SOCKET_PATH "/tmp/ctest-ssperfp-$ENV{SAI_PROJECT}-$ENV{SAI_OVN}")
endif()
add_test(NAME st_ssperfproxy COMMAND
${CMAKE_SOURCE_DIR}/scripts/ctest-background.sh
ssperfproxy $<TARGET_FILE:lws-minimal-secure-streams-proxy>
-i ${CTEST_SOCKET_PATH} )
set_tests_properties(st_ssperfproxy PROPERTIES WORKING_DIRECTORY . FIXTURES_SETUP ssperfproxy TIMEOUT 800)
add_test(NAME ki_ssperfproxy COMMAND
${CMAKE_SOURCE_DIR}/scripts/ctest-background-kill.sh
ssperfproxy $<TARGET_FILE:lws-minimal-secure-streams-proxy>
-i ${CTEST_SOCKET_PATH})
set_tests_properties(ki_ssperfproxy PROPERTIES FIXTURES_CLEANUP ssperfproxy)
#
# the client part that will connect to the proxy
#
if (VALGRIND)
message("testing via valgrind")
add_test(NAME ssperfpc-minimal COMMAND
${VALGRIND} --tool=memcheck --leak-check=yes --num-callers=20
$<TARGET_FILE:lws-minimal-secure-streams-client> -i +${CTEST_SOCKET_PATH})
else()
add_test(NAME ssperfpc-minimal COMMAND lws-minimal-secure-streams-client -i +${CTEST_SOCKET_PATH})
endif()
set(fixlist "ssperfproxy")
if (DEFINED ENV{SAI_OVN})
list(APPEND fixlist "res_ssperfproxy")
endif()
set_tests_properties(ssperfpc-minimal PROPERTIES
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/minimal-examples/secure-streams/minimal-secure-streams
FIXTURES_REQUIRED "${fixlist}"
TIMEOUT 40)
endif()
endif()
if (websockets_shared)
target_link_libraries(${SAMP} websockets_shared ${LIBWEBSOCKETS_DEP_LIBS})
add_dependencies(${SAMP} websockets_shared)
else()
target_link_libraries(${SAMP} websockets ${LIBWEBSOCKETS_DEP_LIBS})
endif()
CHECK_C_SOURCE_COMPILES("#include <libwebsockets.h>\nint main(void) {\ni#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)\n return 0;\n #else\n fail\n #endif\n return 0;\n}\n" HAS_LWS_WITH_SECURE_STREAMS_PROXY_API)
if (HAS_LWS_WITH_SECURE_STREAMS_PROXY_API OR LWS_WITH_SECURE_STREAMS_PROXY_API)
add_compile_options(-DLWS_SS_USE_SSPC)
add_executable(${SAMP}-client minimal-secure-streams.c)
if (websockets_shared)
target_link_libraries(${SAMP}-client websockets_shared ${LIBWEBSOCKETS_DEP_LIBS})
add_dependencies(${SAMP}-client websockets_shared)
else()
target_link_libraries(${SAMP}-client websockets ${LIBWEBSOCKETS_DEP_LIBS})
endif()
endif()
endif()

View file

@ -0,0 +1,104 @@
# lws minimal secure streams perf
The application goes to https://warmcat.com and reads index.html there.
The streamtype used is marked with a "perf": true policy, it returns additional
rx payload marked with the `LWSSS_FLAG_PERF_JSON` flag containing a JSON rundown
of the connection performance.
This builds both lws-minimal-secure-streams-perf that connects directly, and
lws-minimal-secure-streams-perf-client that connects via the proxy, giving the
same results.
## build
```
$ cmake . && make
```
## usage
Commandline option|Meaning
---|---
-d <loglevel>|Debug verbosity in decimal, eg, -d15
```
[2021/03/31 15:29:46:5162] U: LWS secure streams test client [-d<verb>]
[2021/03/31 15:29:46:5625] N: LWS: 4.1.99-v4.2-rc1-50-g8b5acf835c, loglevel 1031
[2021/03/31 15:29:46:5629] N: NET CLI SRV H1 H2 WS SS-JSON-POL SSPROX ConMon IPV6-on
[2021/03/31 15:29:46:5829] N: ++ [795209|wsi|0|pipe] (1)
[2021/03/31 15:29:46:5892] N: ++ [795209|vh|0|netlink] (1)
[2021/03/31 15:29:46:5983] N: ++ [795209|vh|1|default||-1] (2)
[2021/03/31 15:29:46:7638] N: ++ [795209|SSPcli|0|mintest] (1)
[2021/03/31 15:29:46:7957] N: ++ [795209|wsiSSPcli|0|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (1)
[2021/03/31 15:29:46:8335] N: -- [795209|wsiSSPcli|0|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (0) 35.608ms
[2021/03/31 15:29:47:9096] N: ++ [795209|wsiSSPcli|1|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (1)
[2021/03/31 15:29:47:9103] N: -- [795209|wsiSSPcli|1|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (0) 215μs
[2021/03/31 15:29:48:9117] N: ++ [795209|wsiSSPcli|2|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (1)
[2021/03/31 15:29:48:9339] N: lws_sspc_sul_retry_cb: [795209|wsiSSPcli|2|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])]
[2021/03/31 15:29:48:9625] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: (unset) -> LWSSSCS_CREATING
[2021/03/31 15:29:48:9633] U: myss_state: LWSSSCS_CREATING (1), ord 0x0
[2021/03/31 15:29:48:9728] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: LWSSSCS_CREATING -> LWSSSCS_CONNECTING
[2021/03/31 15:29:48:9731] U: myss_state: LWSSSCS_CONNECTING (6), ord 0x0
[2021/03/31 15:29:49:0670] N: lws_ss_deserialize_parse: RX METADATA test
[2021/03/31 15:29:49:0696] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: LWSSSCS_CONNECTING -> LWSSSCS_CONNECTED
[2021/03/31 15:29:49:0698] U: myss_state: LWSSSCS_CONNECTED (5), ord 0x0
[2021/03/31 15:29:49:0716] N: lws_ss_deserialize_parse: RX METADATA srv
[2021/03/31 15:29:49:0882] U: myss_rx: len 1380, flags: 1, srv: lwsws, test: hello
[2021/03/31 15:29:49:0907] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0926] U: {"peer":"46.105.127.147","dns_us":536,"sockconn_us":30183,"tls_us":29343,"txn_resp_us:25990,"dns":["2001:41d0:2:ee93::1","46.105.127.147"]}
[2021/03/31 15:29:49:0937] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0938] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0940] U: myss_rx: len 829, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0942] U: myss_rx: len 691, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0943] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0944] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0945] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0947] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0948] U: myss_rx: len 292, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0950] U: myss_rx: len 291, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0951] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0952] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0953] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0955] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0956] U: myss_rx: len 692, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0957] U: myss_rx: len 828, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0958] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0960] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0961] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0962] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0963] U: myss_rx: len 155, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0965] U: myss_rx: len 428, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0966] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0967] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0968] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0969] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0970] U: myss_rx: len 555, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0972] U: myss_rx: len 965, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0973] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0975] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0976] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0977] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0978] U: myss_rx: len 18, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0979] U: myss_rx: len 565, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0980] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0981] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0982] U: myss_rx: len 1380, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0983] U: myss_rx: len 140, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0984] U: myss_rx: len 418, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0985] U: myss_rx: len 44, flags: 0, srv: lwsws, test: hello
[2021/03/31 15:29:49:0989] U: myss_rx: len 0, flags: 2, srv: lwsws, test: hello
[2021/03/31 15:29:49:0994] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: LWSSSCS_CONNECTED -> LWSSSCS_QOS_ACK_REMOTE
[2021/03/31 15:29:49:0995] U: myss_state: LWSSSCS_QOS_ACK_REMOTE (10), ord 0x0
[2021/03/31 15:29:49:0998] N: myss_state: LWSSSCS_QOS_ACK_REMOTE
[2021/03/31 15:29:49:1008] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: LWSSSCS_QOS_ACK_REMOTE -> LWSSSCS_DISCONNECTED
[2021/03/31 15:29:49:1010] U: myss_state: LWSSSCS_DISCONNECTED (2), ord 0x0
[2021/03/31 15:29:49:1106] N: -- [795209|wsi|0|pipe] (0) 2.527s
[2021/03/31 15:29:49:1169] N: -- [795209|vh|1|default||-1] (1) 2.518s
[2021/03/31 15:29:49:1172] N: -- [795209|wsiSSPcli|2|RAW/raw-skt/+@proxy.ss.lws/([795209|SSPcli|0|mintest])] (0) 205.495ms
[2021/03/31 15:29:49:1174] N: -- [795209|vh|0|netlink] (0) 2.528s
[2021/03/31 15:29:49:1203] N: lws_ss_check_next_state: [795209|SSPcli|0|mintest]: LWSSSCS_DISCONNECTED -> LWSSSCS_DESTROYING
[2021/03/31 15:29:49:1206] U: myss_state: LWSSSCS_DESTROYING (7), ord 0x0
[2021/03/31 15:29:49:1210] N: -- [795209|SSPcli|0|mintest] (0) 2.357s
[2021/03/31 15:29:49:1292] U: Completed: OK (seen expected 0)
```

View file

@ -0,0 +1,545 @@
/*
* lws-minimal-secure-streams
*
* Written in 2010-2020 by Andy Green <andy@warmcat.com>
*
* This file is made available under the Creative Commons CC0 1.0
* Universal Public Domain Dedication.
*
*
* This demonstrates a minimal http client using secure streams api.
*
* It visits https://warmcat.com/ and receives the html page there.
*
* This example is built two different ways from the same source... one includes
* the policy everything needed to fulfil the stream directly. The other -client
* variant has no policy itself and some other minor init changes, and connects
* to the -proxy example to actually get the connection done.
*
* In the -client build case, the example does not even init the tls libraries
* since the proxy part will take care of all that.
*/
#include <libwebsockets.h>
#include <string.h>
#include <signal.h>
/*
* uncomment to force network traffic through 127.0.0.1:1080
*
* On your local machine, you can run a SOCKS5 proxy like this
*
* $ ssh -N -D 0.0.0.0:1080 localhost -v
*
* If enabled, this also fetches a remote policy that also
* specifies that all traffic should go through the remote
* proxy.
*/
// #define VIA_LOCALHOST_SOCKS
static int interrupted, bad = 1, force_cpd_fail_portal,
force_cpd_fail_no_internet, test_respmap;
static unsigned int timeout_ms = 3000;
static lws_state_notify_link_t nl;
/*
* If the -proxy app is fulfilling our connection, then we don't need to have
* the policy in the client.
*
* When we build with LWS_SS_USE_SSPC, the apis hook up to a proxy process over
* a Unix Domain Socket. To test that, you need to separately run the
* ./lws-minimal-secure-streams-proxy test app on the same machine.
*/
#if !defined(LWS_SS_USE_SSPC)
static const char * const default_ss_policy =
"{"
"\"release\":" "\"01234567\","
"\"product\":" "\"myproduct\","
"\"schema-version\":" "1,"
#if defined(VIA_LOCALHOST_SOCKS)
"\"via-socks5\":" "\"127.0.0.1:1080\","
#endif
"\"retry\": [" /* named backoff / retry strategies */
"{\"default\": {"
"\"backoff\": [" "1000,"
"2000,"
"3000,"
"5000,"
"10000"
"],"
"\"conceal\":" "5,"
"\"jitterpc\":" "20,"
"\"svalidping\":" "30,"
"\"svalidhup\":" "35"
"}}"
"],"
"\"certs\": [" /* named individual certificates in BASE64 DER */
/*
* Let's Encrypt certs for warmcat.com / libwebsockets.org
*
* We fetch the real policy from there using SS and switch to
* using that.
*/
"{\"dst_root_x3\": \""
"MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/"
"MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT"
"DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow"
"PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD"
"Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB"
"AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O"
"rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq"
"OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b"
"xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw"
"7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD"
"aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV"
"HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG"
"SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69"
"ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr"
"AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz"
"R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5"
"JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo"
"Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ"
"\"}"
"],"
"\"trust_stores\": [" /* named cert chains */
"{"
"\"name\": \"le_via_dst\","
"\"stack\": ["
"\"dst_root_x3\""
"]"
"}"
"],"
"\"s\": ["
/*
* "fetch_policy" decides from where the real policy
* will be fetched, if present. Otherwise the initial
* policy is treated as the whole, hardcoded, policy.
*/
"{\"fetch_policy\": {"
"\"endpoint\":" "\"warmcat.com\","
"\"port\":" "443,"
"\"protocol\":" "\"h1\","
"\"http_method\":" "\"GET\","
#if defined(VIA_LOCALHOST_SOCKS)
"\"http_url\":" "\"policy/minimal-proxy-socks.json\","
#else
"\"http_url\":" "\"policy/minimal-proxy-v4.2-v2.json\","
#endif
"\"tls\":" "true,"
"\"opportunistic\":" "true,"
"\"retry\":" "\"default\","
"\"tls_trust_store\":" "\"le_via_dst\""
"}},{"
/*
* "captive_portal_detect" describes
* what to do in order to check if the path to
* the Internet is being interrupted by a
* captive portal. If there's a larger policy
* fetched from elsewhere, it should also include
* this since it needs to be done at least after
* every DHCP acquisition
*/
"\"captive_portal_detect\": {"
"\"endpoint\": \"connectivitycheck.android.com\","
"\"http_url\": \"generate_204\","
"\"port\": 80,"
"\"protocol\": \"h1\","
"\"http_method\": \"GET\","
"\"opportunistic\": true,"
"\"http_expect\": 204,"
"\"http_fail_redirect\": true"
"}}"
"]}"
;
#endif
typedef struct myss {
struct lws_ss_handle *ss;
void *opaque_data;
/* ... application specific state ... */
lws_sorted_usec_list_t sul;
} myss_t;
#if !defined(LWS_SS_USE_SSPC)
static const char *canned_root_token_payload =
"grant_type=refresh_token"
"&refresh_token=Atzr|IwEBIJedGXjDqsU_vMxykqOMg"
"SHfYe3CPcedueWEMWSDMaDnEmiW8RlR1Kns7Cb4B-TOSnqp7ifVsY4BMY2B8tpHfO39XP"
"zfu9HapGjTR458IyHX44FE71pWJkGZ79uVBpljP4sazJuk8XS3Oe_yLnm_DIO6fU1nU3Y"
"0flYmsOiOAQE_gRk_pdlmEtHnpMA-9rLw3mkY5L89Ty9kUygBsiFaYatouROhbsTn8-jW"
"k1zZLUDpT6ICtBXSnrCIg0pUbZevPFhTwdXd6eX-u4rq0W-XaDvPWFO7au-iPb4Zk5eZE"
"iX6sissYrtNmuEXc2uHu7MnQO1hHCaTdIO2CANVumf-PHSD8xseamyh04sLV5JgFzY45S"
"KvKMajiUZuLkMokOx86rjC2Hdkx5DO7G-dbG1ufBDG-N79pFMSs7Ck5pc283IdLoJkCQc"
"AGvTX8o8I29QqkcGou-9TKhOJmpX8As94T61ok0UqqEKPJ7RhfQHHYdCtsdwxgvfVr9qI"
"xL_hDCcTho8opCVX-6QhJHl6SQFlTw13"
"&client_id="
"amzn1.application-oa2-client.4823334c434b4190a2b5a42c07938a2d";
#endif
/* secure streams payload interface */
static lws_ss_state_return_t
myss_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
{
myss_t *m = (myss_t *)userobj;
const char *md_srv = "not set", *md_test = "not set";
size_t md_srv_len = 7, md_test_len = 7;
if (flags & LWSSS_FLAG_PERF_JSON) {
lwsl_user("%.*s\n", (int)len, (const char *)buf);
return LWSSSSRET_OK;
}
lws_ss_get_metadata(m->ss, "srv", (const void **)&md_srv, &md_srv_len);
lws_ss_get_metadata(m->ss, "test", (const void **)&md_test, &md_test_len);
lwsl_user("%s: len %d, flags: %d, srv: %.*s, test: %.*s\n", __func__,
(int)len, flags, (int)md_srv_len, md_srv,
(int)md_test_len, md_test);
lwsl_hexdump_info(buf, len);
/*
* If we received the whole message, for our example it means
* we are done.
*/
if (flags & LWSSS_FLAG_EOM) {
bad = 0;
interrupted = 1;
}
return LWSSSSRET_OK;
}
static lws_ss_state_return_t
myss_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf, size_t *len,
int *flags)
{
//myss_t *m = (myss_t *)userobj;
/* in this example, we don't send stuff */
return LWSSSSRET_TX_DONT_SEND;
}
static lws_ss_state_return_t
myss_state(void *userobj, void *sh, lws_ss_constate_t state,
lws_ss_tx_ordinal_t ack)
{
myss_t *m = (myss_t *)userobj;
lwsl_user("%s: %s (%d), ord 0x%x\n", __func__,
lws_ss_state_name((int)state), state, (unsigned int)ack);
switch (state) {
case LWSSSCS_CREATING:
return lws_ss_client_connect(m->ss);
case LWSSSCS_CONNECTING:
lws_ss_start_timeout(m->ss, timeout_ms);
if (lws_ss_set_metadata(m->ss, "uptag", "myuptag123", 10))
/* can fail, eg due to OOM, retry later if so */
return LWSSSSRET_DISCONNECT_ME;
if (lws_ss_set_metadata(m->ss, "ctype", "myctype", 7))
/* can fail, eg due to OOM, retry later if so */
return LWSSSSRET_DISCONNECT_ME;
break;
case LWSSSCS_ALL_RETRIES_FAILED:
/* if we're out of retries, we want to close the app and FAIL */
interrupted = 1;
bad = 2;
break;
case LWSSSCS_QOS_ACK_REMOTE:
lwsl_notice("%s: LWSSSCS_QOS_ACK_REMOTE\n", __func__);
break;
case LWSSSCS_TIMEOUT:
lwsl_notice("%s: LWSSSCS_TIMEOUT\n", __func__);
/* if we're out of time */
interrupted = 1;
bad = 3;
break;
case LWSSSCS_USER_BASE:
lwsl_notice("%s: LWSSSCS_USER_BASE\n", __func__);
break;
default:
break;
}
return LWSSSSRET_OK;
}
static int
app_system_state_nf(lws_state_manager_t *mgr, lws_state_notify_link_t *link,
int current, int target)
{
struct lws_context *context = lws_system_context_from_system_mgr(mgr);
#if !defined(LWS_SS_USE_SSPC)
lws_system_blob_t *ab = lws_system_get_blob(context,
LWS_SYSBLOB_TYPE_AUTH, 1 /* AUTH_IDX_ROOT */);
size_t size;
#endif
/*
* For the things we care about, let's notice if we are trying to get
* past them when we haven't solved them yet, and make the system
* state wait while we trigger the dependent action.
*/
switch (target) {
#if !defined(LWS_SS_USE_SSPC)
/*
* The proxy takes responsibility for this stuff if we get things
* done through that
*/
case LWS_SYSTATE_INITIALIZED: /* overlay on the hardcoded policy */
case LWS_SYSTATE_POLICY_VALID: /* overlay on the loaded policy */
if (target != current)
break;
if (force_cpd_fail_portal)
/* this makes it look like we're behind a captive portal
* because the overriden address does a redirect */
lws_ss_policy_overlay(context,
"{\"s\": [{\"captive_portal_detect\": {"
"\"endpoint\": \"google.com\","
"\"http_url\": \"/\","
"\"port\": 80"
"}}]}");
if (force_cpd_fail_no_internet)
/* this looks like no internet, because the overridden
* port doesn't have anything that will connect to us */
lws_ss_policy_overlay(context,
"{\"s\": [{\"captive_portal_detect\": {"
"\"endpoint\": \"warmcat.com\","
"\"http_url\": \"/\","
"\"port\": 999"
"}}]}");
break;
case LWS_SYSTATE_REGISTERED:
size = lws_system_blob_get_size(ab);
if (size)
break;
/* let's register our canned root token so auth can use it */
lws_system_blob_direct_set(ab,
(const uint8_t *)canned_root_token_payload,
strlen(canned_root_token_payload));
break;
#endif
case LWS_SYSTATE_OPERATIONAL:
if (current == LWS_SYSTATE_OPERATIONAL) {
lws_ss_info_t ssi;
/* We're making an outgoing secure stream ourselves */
memset(&ssi, 0, sizeof(ssi));
ssi.handle_offset = offsetof(myss_t, ss);
ssi.opaque_user_data_offset = offsetof(myss_t,
opaque_data);
ssi.rx = myss_rx;
ssi.tx = myss_tx;
ssi.state = myss_state;
ssi.user_alloc = sizeof(myss_t);
ssi.streamtype = test_respmap ? "respmap" : "mintest";
if (lws_ss_create(context, 0, &ssi, NULL, NULL,
NULL, NULL)) {
lwsl_err("%s: failed to create secure stream\n",
__func__);
return -1;
}
}
break;
}
return 0;
}
static lws_state_notify_link_t * const app_notifier_list[] = {
&nl, NULL
};
#if defined(LWS_WITH_SYS_METRICS)
static int
my_metric_report(lws_metric_pub_t *mp)
{
lws_metric_bucket_t *sub = mp->u.hist.head;
char buf[192];
do {
if (lws_metrics_format(mp, &sub, buf, sizeof(buf)))
lwsl_user("%s: %s\n", __func__, buf);
} while ((mp->flags & LWSMTFL_REPORT_HIST) && sub);
/* 0 = leave metric to accumulate, 1 = reset the metric */
return 1;
}
static const lws_system_ops_t system_ops = {
.metric_report = my_metric_report,
};
#endif
static void
sigint_handler(int sig)
{
interrupted = 1;
}
int main(int argc, const char **argv)
{
struct lws_context_creation_info info;
struct lws_context *context;
int n = 0, expected = 0;
const char *p;
signal(SIGINT, sigint_handler);
memset(&info, 0, sizeof info);
lws_cmdline_option_handle_builtin(argc, argv, &info);
lwsl_user("LWS secure streams test client [-d<verb>]\n");
/* these options are mutually exclusive if given */
if (lws_cmdline_option(argc, argv, "--force-portal"))
force_cpd_fail_portal = 1;
if (lws_cmdline_option(argc, argv, "--force-no-internet"))
force_cpd_fail_no_internet = 1;
if (lws_cmdline_option(argc, argv, "--respmap"))
test_respmap = 1;
if ((p = lws_cmdline_option(argc, argv, "--timeout_ms")))
timeout_ms = (unsigned int)atoi(p);
info.fd_limit_per_thread = 1 + 6 + 1;
info.port = CONTEXT_PORT_NO_LISTEN;
#if defined(LWS_SS_USE_SSPC)
info.protocols = lws_sspc_protocols;
{
const char *p;
/* connect to ssproxy via UDS by default, else via
* tcp connection to this port */
if ((p = lws_cmdline_option(argc, argv, "-p")))
info.ss_proxy_port = (uint16_t)atoi(p);
/* UDS "proxy.ss.lws" in abstract namespace, else this socket
* path; when -p given this can specify the network interface
* to bind to */
if ((p = lws_cmdline_option(argc, argv, "-i")))
info.ss_proxy_bind = p;
/* if -p given, -a specifies the proxy address to connect to */
if ((p = lws_cmdline_option(argc, argv, "-a")))
info.ss_proxy_address = p;
}
#else
info.pss_policies_json = default_ss_policy;
info.options = LWS_SERVER_OPTION_EXPLICIT_VHOSTS |
LWS_SERVER_OPTION_H2_JUST_FIX_WINDOW_UPDATE_OVERFLOW |
LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
#endif
/* integrate us with lws system state management when context created */
nl.name = "app";
nl.notify_cb = app_system_state_nf;
info.register_notifier_list = app_notifier_list;
#if defined(LWS_WITH_SYS_METRICS)
info.system_ops = &system_ops;
info.metrics_prefix = "ssmex";
#endif
/* create the context */
context = lws_create_context(&info);
if (!context) {
lwsl_err("lws init failed\n");
goto bail;
}
#if !defined(LWS_SS_USE_SSPC)
/*
* If we're being a proxied client, the proxy does all this
*/
/*
* Set the related lws_system blobs
*
* ...direct_set() sets a pointer, so the thing pointed to has to have
* a suitable lifetime, eg, something that already exists on the heap or
* a const string in .rodata like this
*/
lws_system_blob_direct_set(lws_system_get_blob(context,
LWS_SYSBLOB_TYPE_DEVICE_SERIAL, 0),
(const uint8_t *)"SN12345678", 10);
lws_system_blob_direct_set(lws_system_get_blob(context,
LWS_SYSBLOB_TYPE_DEVICE_FW_VERSION, 0),
(const uint8_t *)"v0.01", 5);
/*
* ..._heap_append() appends to a buflist kind of arrangement on heap,
* just one block is fine, otherwise it will concatenate the fragments
* in the order they were appended (and take care of freeing them at
* context destroy time). ..._heap_empty() is also available to remove
* everything that was already allocated.
*
* Here we use _heap_append() just so it's tested as well as direct set.
*/
lws_system_blob_heap_append(lws_system_get_blob(context,
LWS_SYSBLOB_TYPE_DEVICE_TYPE, 0),
(const uint8_t *)"spacerocket", 11);
#endif
/* the event loop */
while (n >= 0 && !interrupted)
n = lws_service(context, 0);
lws_context_destroy(context);
bail:
if ((p = lws_cmdline_option(argc, argv, "--expected-exit")))
expected = atoi(p);
if (bad == expected) {
lwsl_user("Completed: OK (seen expected %d)\n", expected);
return 0;
} else
lwsl_err("Completed: failed: exit %d, expected %d\n", bad, expected);
return 1;
}

View file

@ -111,7 +111,7 @@ static const char * const default_ss_policy =
"\"port\":" "443,"
"\"protocol\":" "\"h1\","
"\"http_method\":" "\"GET\","
"\"http_url\":" "\"policy/minimal-proxy-v4.2.json\","
"\"http_url\":" "\"policy/minimal-proxy-v4.2-v2.json\","
"\"tls\":" "true,"
"\"opportunistic\":" "true,"
"\"retry\":" "\"default\","