1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-16 00:00:07 +01:00

sspc: cleanup bridged wsi

This commit is contained in:
Andy Green 2020-12-27 19:34:30 +00:00
parent ef6bebda3c
commit e11d78dc5e
10 changed files with 110 additions and 50 deletions

View file

@ -318,10 +318,13 @@ lws_client_connect_via_info(const struct lws_client_connect_info *i)
i->opaque_user_data;
#if defined(LWS_WITH_SECURE_STREAMS)
/* any of these imply we are a client wsi bound to an SS, which
* implies our opaque user ptr is the ss (or sspc if PROXY_LINK) handle
*/
wsi->for_ss = !!(i->ssl_connection & (LCCSCF_SECSTREAM_CLIENT | LCCSCF_SECSTREAM_PROXY_LINK | LCCSCF_SECSTREAM_PROXY_ONWARD));
wsi->client_bound_sspc = !!(i->ssl_connection & LCCSCF_SECSTREAM_PROXY_LINK); /* so wsi close understands need to remove sspc ptr to wsi */
/* implies our opaque user ptr is the ss handle */
wsi->client_proxy_onward = !!(i->ssl_connection & LCCSCF_SECSTREAM_PROXY_ONWARD);
if (wsi->for_ss) {
/* it's related to ss... the options are
*

View file

@ -462,11 +462,17 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason,
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol) &&
lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol).
close_via_role_protocol(wsi, reason))
close_via_role_protocol(wsi, reason)) {
lwsl_info("%s: clsoe_via_role took over: %s (sockfd %d)\n", __func__,
lws_wsi_tag(wsi), wsi->desc.sockfd);
return;
}
just_kill_connection:
lwsl_debug("%s: real just_kill_connection A: %s (sockfd %d)\n", __func__,
lws_wsi_tag(wsi), wsi->desc.sockfd);
#if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
if (lwsi_role_http(wsi) && lwsi_role_server(wsi) &&
wsi->http.fop_fd != NULL)
@ -511,40 +517,6 @@ just_kill_connection:
wsi->protocol_bind_balance = 0;
}
#if defined(LWS_WITH_SECURE_STREAMS) && defined(LWS_WITH_SERVER)
if (wsi->for_ss) {
lwsl_debug("%s: for_ss\n", __func__);
/*
* We were adopted for a particular ss, but, eg, we may not
* have succeeded with the connection... we are closing which is
* good, but we have to invalidate any pointer the related ss
* handle may be holding on us
*/
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
if (wsi->client_bound_sspc) {
lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data;
if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
h->cwsi = NULL;
wsi->a.opaque_user_data = NULL;
}
} else
#endif
{
lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
if (h->ss_dangling_connected)
(void)lws_ss_event_helper(h, LWSSSCS_DISCONNECTED);
h->wsi = NULL;
wsi->a.opaque_user_data = NULL;
}
}
}
#endif
#if defined(LWS_WITH_CLIENT)
if ((
#if defined(LWS_ROLE_WS)
@ -629,7 +601,7 @@ just_kill_connection:
#endif
}
lwsl_debug("%s: real just_kill_connection: %s (sockfd %d)\n", __func__,
lwsl_info("%s: real just_kill_connection: %s (sockfd %d)\n", __func__,
lws_wsi_tag(wsi), wsi->desc.sockfd);
#ifdef LWS_WITH_HUBBUB
@ -692,6 +664,8 @@ just_kill_connection:
*/
ccb = 1;
lwsl_info("%s: %s: cce=%d\n", __func__, lws_wsi_tag(wsi), ccb);
pro = wsi->a.protocol;
if (wsi->already_did_cce)
@ -722,6 +696,56 @@ just_kill_connection:
#if defined(LWS_ROLE_RAW_FILE)
async_close:
#endif
#if defined(LWS_WITH_SECURE_STREAMS) && defined(LWS_WITH_SERVER)
if (wsi->for_ss) {
lwsl_debug("%s: for_ss\n", __func__);
/*
* We were adopted for a particular ss, but, eg, we may not
* have succeeded with the connection... we are closing which is
* good, but we have to invalidate any pointer the related ss
* handle may be holding on us
*/
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
if (wsi->client_proxy_onward) {
/*
* We are an onward proxied wsi at the proxy,
* opaque is proxing "conn", we must remove its pointer
* to us since we are destroying
*/
lws_proxy_clean_conn_ss(wsi);
} else
if (wsi->client_bound_sspc) {
lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data;
if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
h->cwsi = NULL;
//wsi->a.opaque_user_data = NULL;
}
} else
#endif
{
lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
h->wsi = NULL;
wsi->a.opaque_user_data = NULL;
if (h->ss_dangling_connected &&
lws_ss_event_helper(h, LWSSSCS_DISCONNECTED) ==
LWSSSSRET_DESTROY_ME) {
lws_ss_destroy(&h);
}
}
}
}
#endif
lws_remove_child_from_any_parent(wsi);
wsi->socket_is_permanently_unusable = 1;

View file

@ -845,6 +845,7 @@ struct lws {
unsigned int for_ss:1;
unsigned int bound_ss_proxy_conn:1;
unsigned int client_bound_sspc:1;
unsigned int client_proxy_onward:1;
#endif
#ifdef LWS_WITH_ACCESS_LOG

View file

@ -161,6 +161,7 @@ typedef struct lws_ss_handle {
uint8_t being_serialized:1; /* we are not the consumer */
uint8_t destroying:1;
uint8_t ss_dangling_connected:1;
uint8_t proxy_onward:1; /* opaque is conn */
} lws_ss_handle_t;
/* connection helper that doesn't need to hang around after connection starts */
@ -440,7 +441,7 @@ _lws_ss_set_metadata(lws_ss_metadata_t *omd, const char *name,
const void *value, size_t len);
lws_ss_state_return_t
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry);
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw);
int
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size);
@ -449,6 +450,9 @@ struct lws_vhost *
lws_ss_policy_ref_trust_store(struct lws_context *context,
const lws_ss_policy_t *pol, char doref);
void
lws_proxy_clean_conn_ss(struct lws *wsi);
#if defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
int
lws_ss_policy_unref_trust_store(struct lws_context *context,

View file

@ -80,7 +80,7 @@ secstream_ws(struct lws *wsi, enum lws_callback_reasons reason, void *user,
#if defined(LWS_WITH_SERVER)
!(h->info.flags & LWSSSINFLAGS_ACCEPTED) && /* not server */
#endif
!h->txn_ok && !wsi->a.context->being_destroyed) {
!wsi->a.context->being_destroyed) {
r = lws_ss_backoff(h);
if (r != LWSSSSRET_OK)
return _lws_ss_handle_state_ret_CAN_DESTROY_HANDLE(r, wsi, &h);

View file

@ -156,8 +156,8 @@ callback_sspc_client(struct lws *wsi, enum lws_callback_reasons reason,
/*
* our ss proxy Unix Domain socket has closed...
*/
lwsl_info("%s: LWS_CALLBACK_RAW_CLOSE: proxy conn down\n",
__func__);
lwsl_info("%s: LWS_CALLBACK_RAW_CLOSE: %s proxy conn down, sspc h %s\n",
__func__, lws_wsi_tag(wsi), lws_sspc_tag(h));
if (h) {
h->cwsi = NULL;
/*
@ -165,7 +165,8 @@ callback_sspc_client(struct lws *wsi, enum lws_callback_reasons reason,
*/
lws_sul_schedule(h->context, 0, &h->sul_retry,
lws_sspc_sul_retry_cb, LWS_US_PER_SEC);
}
} else
lwsl_notice("%s: no sspc on client proxy link close\n", __func__);
break;
case LWS_CALLBACK_RAW_RX:
@ -547,6 +548,7 @@ lws_sspc_destroy(lws_sspc_handle_t **ph)
lws_dsh_destroy(&h->dsh);
if (h->cwsi) {
lws_set_opaque_user_data(h->cwsi, NULL);
lws_wsi_close(h->cwsi, LWS_TO_KILL_ASYNC);
h->cwsi = NULL;
}

View file

@ -84,6 +84,21 @@ typedef struct ss_proxy_onward {
struct conn *conn;
} ss_proxy_t;
void
lws_proxy_clean_conn_ss(struct lws *wsi)
{
#if 0
struct conn *conn;
if (!wsi)
return;
conn = (struct conn *)wsi->a.opaque_user_data;
if (conn && conn->ss)
conn->ss->wsi = NULL;
#endif
}
int
__lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
@ -314,7 +329,8 @@ callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
/* dsh is allocated when the onward ss is done */
pss->conn->wsi = wsi;
wsi->bound_ss_proxy_conn = 1;
wsi->bound_ss_proxy_conn = 1; /* opaque is conn */
pss->conn->state = LPCSPROX_WAIT_INITIAL_TX;
/*

View file

@ -419,7 +419,7 @@ lws_ss_deserialize_parse(struct lws_ss_serialization_parser *par,
* of the ways like DESTROY_ME etc
*/
switch (_lws_ss_client_connect(
proxy_pss_to_ss_h(pss), 0)) {
proxy_pss_to_ss_h(pss), 0, parconn)) {
case LWSSSSRET_OK:
/* well, connect is ongoing */
break;

View file

@ -322,7 +322,7 @@ lws_ss_smd_tx_cb(lws_sorted_usec_list_t *sul)
#endif
lws_ss_state_return_t
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry)
_lws_ss_client_connect(lws_ss_handle_t *h, int is_retry, void *conn_if_sspc_onw)
{
const char *prot, *_prot, *ipath, *_ipath, *ads, *_ads;
struct lws_client_connect_info i;
@ -447,6 +447,12 @@ _lws_ss_client_connect(lws_ss_handle_t *h, int is_retry)
i.ssl_connection |= LCCSCF_SECSTREAM_CLIENT;
if (conn_if_sspc_onw) {
i.ssl_connection |= LCCSCF_SECSTREAM_PROXY_ONWARD;
// i.opaque_user_data = conn_if_sspc_onw;
}
i.address = ads;
i.port = port;
i.host = i.address;
@ -518,7 +524,7 @@ _lws_ss_client_connect(lws_ss_handle_t *h, int is_retry)
lws_ss_state_return_t
lws_ss_client_connect(lws_ss_handle_t *h)
{
return _lws_ss_client_connect(h, 0);
return _lws_ss_client_connect(h, 0, 0);
}
/*
@ -558,6 +564,7 @@ lws_ss_create(struct lws_context *context, int tsi, const lws_ss_info_t *ssi,
}
#endif
#if 0
if (ssi->flags & LWSSSINFLAGS_REGISTER_SINK) {
/*
* This can register a secure streams sink as well as normal
@ -583,6 +590,7 @@ lws_ss_create(struct lws_context *context, int tsi, const lws_ss_info_t *ssi,
}
// lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
}
#endif
/*
* We overallocate and point to things in the overallocation...
@ -618,6 +626,9 @@ lws_ss_create(struct lws_context *context, int tsi, const lws_ss_info_t *ssi,
h->tsi = tsi;
h->seq = seq_owner;
if (h->info.flags & LWSSSINFLAGS_PROXIED)
h->proxy_onward = 1;
/* start of overallocated area */
p = (char *)&h[1];
@ -826,7 +837,7 @@ late_bail:
)
#endif
))
switch (_lws_ss_client_connect(h, 0)) {
switch (_lws_ss_client_connect(h, 0, 0)) {
case LWSSSSRET_OK:
break;
case LWSSSSRET_TX_DONT_SEND:
@ -1035,7 +1046,7 @@ lws_ss_request_tx(lws_ss_handle_t *h)
* Retries operate via lws_ss_request_tx(), explicitly ask for a
* reconnection to clear the retry limit
*/
r = _lws_ss_client_connect(h, 1);
r = _lws_ss_client_connect(h, 1, 0);
if (r == LWSSSSRET_DESTROY_ME)
return r;

View file

@ -56,7 +56,6 @@ lws_ss_sys_auth_api_amazon_com_kick(lws_sorted_usec_list_t *sul)
struct lws_context *context = lws_container_of(sul, struct lws_context,
sul_api_amazon_com_kick);
lwsl_notice("%s\n", __func__);
lws_state_transition_steps(&context->mgr_system,
LWS_SYSTATE_OPERATIONAL);
}