mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
refactor: absorb other caches into buflist
1) Remove the whole ah rxbuf and put things on to the wsi buflist This eliminates the whole detachability thing based on ah rxbuf state... ah can always be detached. 2) Remove h2 scratch and put it on the wsi buflist 3) Remove preamble_rx and use the wsi buflist This was used in the case adopted sockets had already been read. Basically there are now only three forced service scenarios - something in buflist (and not in state LRS_DEFERRING_ACTION) - tls layer has buffered rx - extension has buffered rx This is a net removal of around 400 lines of special-casing.
This commit is contained in:
parent
1bf30c8620
commit
1d2094996e
26 changed files with 553 additions and 945 deletions
|
@ -1156,13 +1156,6 @@ lws_create_context(struct lws_context_creation_info *info)
|
|||
else
|
||||
context->max_http_header_data = LWS_DEF_HEADER_LEN;
|
||||
|
||||
/*
|
||||
* HTTP/1 piplining after POST gets read in pt_serv_buf_size but
|
||||
* may need stashing in ah->rx, so ensure it's always big enough
|
||||
*/
|
||||
if ((int)context->max_http_header_data < (int)context->pt_serv_buf_size)
|
||||
context->max_http_header_data = context->pt_serv_buf_size;
|
||||
|
||||
if (info->max_http_header_pool)
|
||||
context->max_http_header_pool = info->max_http_header_pool;
|
||||
else
|
||||
|
|
|
@ -93,15 +93,13 @@ __lws_free_wsi(struct lws *wsi)
|
|||
wsi->user_space && !wsi->user_space_externally_allocated)
|
||||
lws_free(wsi->user_space);
|
||||
|
||||
lws_buflist_destroy_all_segments(&wsi->buflist_rxflow);
|
||||
lws_buflist_destroy_all_segments(&wsi->buflist);
|
||||
lws_free_set_NULL(wsi->trunc_alloc);
|
||||
lws_free_set_NULL(wsi->ws);
|
||||
lws_free_set_NULL(wsi->udp);
|
||||
|
||||
/* we may not have an ah, but may be on the waiting list... */
|
||||
lwsl_info("ah det due to close\n");
|
||||
/* we're closing, losing some rx is OK */
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
__lws_header_table_detach(wsi, 0);
|
||||
|
||||
if (wsi->vhost && wsi->vhost->lserv_wsi == wsi)
|
||||
|
@ -536,7 +534,7 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
|
|||
struct lws_context_per_thread *pt;
|
||||
struct lws *wsi1, *wsi2;
|
||||
struct lws_context *context;
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
int n, m;
|
||||
|
||||
lwsl_info("%s: %p: caller: %s\n", __func__, wsi, caller);
|
||||
|
@ -546,9 +544,6 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
|
|||
|
||||
lws_access_log(wsi);
|
||||
|
||||
/* we're closing, losing some rx is OK */
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
|
||||
context = wsi->context;
|
||||
pt = &context->pt[(int)wsi->tsi];
|
||||
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_CLOSE, 1);
|
||||
|
@ -706,13 +701,13 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
|
|||
* if there are problems with send, just nuke the connection
|
||||
*/
|
||||
do {
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
|
||||
/* show every extension the new incoming data */
|
||||
|
||||
m = lws_ext_cb_active(wsi,
|
||||
LWS_EXT_CB_FLUSH_PENDING_TX, &eff_buf, 0);
|
||||
LWS_EXT_CB_FLUSH_PENDING_TX, &ebuf, 0);
|
||||
if (m < 0) {
|
||||
lwsl_ext("Extension reports fatal error\n");
|
||||
goto just_kill_connection;
|
||||
|
@ -720,10 +715,10 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
|
|||
|
||||
/* assuming they left us something to send, send it */
|
||||
|
||||
if (eff_buf.token_len)
|
||||
if (lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len) !=
|
||||
eff_buf.token_len) {
|
||||
if (ebuf.len)
|
||||
if (lws_issue_raw(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len) !=
|
||||
ebuf.len) {
|
||||
lwsl_debug("close: ext spill failed\n");
|
||||
goto just_kill_connection;
|
||||
}
|
||||
|
@ -856,8 +851,8 @@ just_kill_connection:
|
|||
lws_same_vh_protocol_remove(wsi);
|
||||
|
||||
lwsi_set_state(wsi, LRS_DEAD_SOCKET);
|
||||
lws_buflist_destroy_all_segments(&wsi->buflist_rxflow);
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
lws_buflist_destroy_all_segments(&wsi->buflist);
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
|
||||
if (wsi->role_ops->close_role)
|
||||
wsi->role_ops->close_role(pt, wsi);
|
||||
|
@ -963,7 +958,8 @@ lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *ca
|
|||
/* lws_buflist */
|
||||
|
||||
int
|
||||
lws_buflist_append_segment(struct lws_buflist **head, uint8_t *buf, size_t len)
|
||||
lws_buflist_append_segment(struct lws_buflist **head, const uint8_t *buf,
|
||||
size_t len)
|
||||
{
|
||||
int first = !*head;
|
||||
void *p = *head;
|
||||
|
@ -1066,6 +1062,24 @@ lws_buflist_use_segment(struct lws_buflist **head, size_t len)
|
|||
return (int)((*head)->len - (*head)->pos);
|
||||
}
|
||||
|
||||
void
|
||||
lws_buflist_describe(struct lws_buflist **head, void *id)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
if (*head == NULL)
|
||||
lwsl_notice("%p: buflist empty\n", id);
|
||||
|
||||
while (*head) {
|
||||
lwsl_notice("%p: %d: %llu / %llu (%llu left)\n", id, n,
|
||||
(unsigned long long)(*head)->pos,
|
||||
(unsigned long long)(*head)->len,
|
||||
(unsigned long long)(*head)->len - (*head)->pos);
|
||||
head = &((*head)->next);
|
||||
n++;
|
||||
}
|
||||
}
|
||||
|
||||
/* ... */
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN const char *
|
||||
|
@ -2242,7 +2256,7 @@ __lws_rx_flow_control(struct lws *wsi)
|
|||
return 0;
|
||||
|
||||
/* stuff is still buffered, not ready to really accept new input */
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL)) {
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
|
||||
/* get ourselves called back to deal with stashed buffer */
|
||||
lws_callback_on_writable(wsi);
|
||||
return 0;
|
||||
|
|
|
@ -2247,7 +2247,7 @@ struct lws_ext_option_arg {
|
|||
* change the data, eg, decompress it. user is pointing to the
|
||||
* extension's private connection context data, in is pointing
|
||||
* to an lws_tokens struct, it consists of a char * pointer called
|
||||
* token, and an int called token_len. At entry, these are
|
||||
* token, and an int called len. At entry, these are
|
||||
* set to point to the received buffer and set to the content
|
||||
* length. If the extension will grow the content, it should use
|
||||
* a new buffer allocated in its private user context data and
|
||||
|
@ -3005,10 +3005,6 @@ struct lws_context_creation_info {
|
|||
* VHOST: If non-NULL, per-vhost list of advertised alpn, comma-
|
||||
* separated
|
||||
*/
|
||||
unsigned int h2_rx_scratch_size;
|
||||
/**< VHOST: size of the rx scratch buffer for each stream. 0 =
|
||||
* default (512 bytes). This affects the RX chunk size
|
||||
* at the callback. */
|
||||
|
||||
/* Add new things just above here ---^
|
||||
* This is part of the ABI, don't needlessly break compatibility
|
||||
|
@ -3981,12 +3977,12 @@ lws_chunked_html_process(struct lws_process_html_args *args,
|
|||
/** struct lws_tokens
|
||||
* you need these to look at headers that have been parsed if using the
|
||||
* LWS_CALLBACK_FILTER_CONNECTION callback. If a header from the enum
|
||||
* list below is absent, .token = NULL and token_len = 0. Otherwise .token
|
||||
* points to .token_len chars containing that header content.
|
||||
* list below is absent, .token = NULL and len = 0. Otherwise .token
|
||||
* points to .len chars containing that header content.
|
||||
*/
|
||||
struct lws_tokens {
|
||||
char *token; /**< pointer to start of the token */
|
||||
int token_len; /**< length of the token's value */
|
||||
int len; /**< length of the token's value */
|
||||
};
|
||||
|
||||
/* enum lws_token_indexes
|
||||
|
@ -5749,7 +5745,8 @@ struct lws_buflist;
|
|||
* it was a subsequent segment.
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_buflist_append_segment(struct lws_buflist **head, uint8_t *buf, size_t len);
|
||||
lws_buflist_append_segment(struct lws_buflist **head, const uint8_t *buf,
|
||||
size_t len);
|
||||
/**
|
||||
* lws_buflist_next_segment_len(): number of bytes left in current segment
|
||||
*
|
||||
|
@ -5790,6 +5787,9 @@ lws_buflist_use_segment(struct lws_buflist **head, size_t len);
|
|||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_buflist_destroy_all_segments(struct lws_buflist **head);
|
||||
|
||||
void
|
||||
lws_buflist_describe(struct lws_buflist **head, void *id);
|
||||
|
||||
/**
|
||||
* lws_ptr_diff(): helper to report distance between pointers as an int
|
||||
*
|
||||
|
|
|
@ -34,7 +34,7 @@ int lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
|
|||
int m;
|
||||
#endif
|
||||
|
||||
// lwsl_hexdump_notice(buf, len);
|
||||
// lwsl_hexdump_err(buf, len);
|
||||
|
||||
/*
|
||||
* Detect if we got called twice without going through the
|
||||
|
|
|
@ -647,9 +647,6 @@ struct lws_role_ops {
|
|||
int (*write_role_protocol)(struct lws *wsi, unsigned char *buf,
|
||||
size_t len, enum lws_write_protocol *wp);
|
||||
|
||||
/* cache rx due to rx flow control */
|
||||
int (*rxflow_cache)(struct lws *wsi, unsigned char *buf, int n,
|
||||
int len);
|
||||
/* get encapsulation parent */
|
||||
struct lws * (*encapsulation_parent)(struct lws *wsi);
|
||||
|
||||
|
@ -685,6 +682,11 @@ extern struct lws_role_ops role_ops_h1, role_ops_h2, role_ops_raw_skt,
|
|||
|
||||
#define lwsi_role_ws(wsi) (wsi->role_ops == &role_ops_ws)
|
||||
#define lwsi_role_h1(wsi) (wsi->role_ops == &role_ops_h1)
|
||||
#if defined(LWS_ROLE_CGI)
|
||||
#define lwsi_role_cgi(wsi) (wsi->role_ops == &role_ops_cgi)
|
||||
#else
|
||||
#define lwsi_role_cgi(wsi) (0)
|
||||
#endif
|
||||
#if defined(LWS_ROLE_H2)
|
||||
#define lwsi_role_h2(wsi) (wsi->role_ops == &role_ops_h2)
|
||||
#else
|
||||
|
@ -697,9 +699,9 @@ enum {
|
|||
LWS_HP_RET_BAIL_DIE,
|
||||
LWS_HP_RET_USER_SERVICE,
|
||||
|
||||
LWS_HPI_RET_DIE, /* we closed it */
|
||||
LWS_HPI_RET_WSI_ALREADY_DIED, /* we closed it */
|
||||
LWS_HPI_RET_HANDLED, /* no probs */
|
||||
LWS_HPI_RET_CLOSE_HANDLED, /* close it for us */
|
||||
LWS_HPI_RET_PLEASE_CLOSE_ME, /* close it for us */
|
||||
|
||||
LWS_UPG_RET_DONE,
|
||||
LWS_UPG_RET_CONTINUE,
|
||||
|
@ -913,11 +915,7 @@ struct allocated_headers {
|
|||
* the actual header data gets dumped as it comes in, into data[]
|
||||
*/
|
||||
uint8_t frag_index[WSI_TOKEN_COUNT];
|
||||
#if defined(LWS_WITH_ESP32)
|
||||
uint8_t rx[256];
|
||||
#else
|
||||
uint8_t rx[2048];
|
||||
#endif
|
||||
|
||||
#ifndef LWS_NO_CLIENT
|
||||
char initial_handshake_hash_base64[30];
|
||||
#endif
|
||||
|
@ -927,8 +925,6 @@ struct allocated_headers {
|
|||
uint32_t current_token_limit;
|
||||
int hdr_token_idx;
|
||||
|
||||
int16_t rxpos;
|
||||
int16_t rxlen;
|
||||
int16_t lextable_pos;
|
||||
|
||||
uint8_t in_use;
|
||||
|
@ -959,7 +955,7 @@ struct lws_context_per_thread {
|
|||
struct lws *tx_draining_ext_list;
|
||||
struct lws_dll_lws dll_head_timeout;
|
||||
struct lws_dll_lws dll_head_hrtimer;
|
||||
struct lws_dll_lws dll_head_rxflow; /* guys with pending rxflow */
|
||||
struct lws_dll_lws dll_head_buflist; /* guys with pending rxflow */
|
||||
#if defined(LWS_WITH_LIBUV) || defined(LWS_WITH_LIBEVENT)
|
||||
struct lws_context *context;
|
||||
#endif
|
||||
|
@ -1139,7 +1135,6 @@ struct lws_vhost {
|
|||
|
||||
int listen_port;
|
||||
unsigned int http_proxy_port;
|
||||
unsigned int h2_rx_scratch_size;
|
||||
#if defined(LWS_WITH_SOCKS5)
|
||||
unsigned int socks_proxy_port;
|
||||
#endif
|
||||
|
@ -1815,7 +1810,6 @@ struct lws_h2_netconn {
|
|||
char goaway_str[32]; /* for rx */
|
||||
struct lws *swsi;
|
||||
struct lws_h2_protocol_send *pps; /* linked list */
|
||||
char *rx_scratch;
|
||||
|
||||
enum http2_hpack_state hpack;
|
||||
enum http2_hpack_type hpack_type;
|
||||
|
@ -1848,9 +1842,6 @@ struct lws_h2_netconn {
|
|||
uint32_t goaway_err;
|
||||
uint32_t hpack_hdr_len;
|
||||
|
||||
uint32_t rx_scratch_pos;
|
||||
uint32_t rx_scratch_len;
|
||||
|
||||
uint16_t hpack_pos;
|
||||
|
||||
uint8_t frame_state;
|
||||
|
@ -2066,7 +2057,7 @@ struct lws {
|
|||
|
||||
struct lws_dll_lws dll_timeout;
|
||||
struct lws_dll_lws dll_hrtimer;
|
||||
struct lws_dll_lws dll_rxflow; /* guys with pending rxflow */
|
||||
struct lws_dll_lws dll_buflist; /* guys with pending rxflow */
|
||||
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
struct lws_peer *peer;
|
||||
|
@ -2074,7 +2065,6 @@ struct lws {
|
|||
struct allocated_headers *ah;
|
||||
struct lws *ah_wait_list;
|
||||
struct lws_udp *udp;
|
||||
unsigned char *preamble_rx;
|
||||
#ifndef LWS_NO_CLIENT
|
||||
struct client_info_stash *stash;
|
||||
char *client_hostname_copy;
|
||||
|
@ -2085,7 +2075,7 @@ struct lws {
|
|||
void *user_space;
|
||||
void *opaque_parent_data;
|
||||
|
||||
struct lws_buflist *buflist_rxflow;
|
||||
struct lws_buflist *buflist;
|
||||
|
||||
/* truncated send handling */
|
||||
unsigned char *trunc_alloc; /* non-NULL means buffering in progress */
|
||||
|
@ -2125,7 +2115,6 @@ struct lws {
|
|||
|
||||
/* ints */
|
||||
int position_in_fds_table;
|
||||
uint32_t preamble_rx_len;
|
||||
unsigned int trunc_alloc_len; /* size of malloc */
|
||||
unsigned int trunc_offset; /* where we are in terms of spilling */
|
||||
unsigned int trunc_len; /* how much is buffered */
|
||||
|
@ -2486,11 +2475,6 @@ _lws_header_table_reset(struct allocated_headers *ah);
|
|||
void
|
||||
__lws_header_table_reset(struct lws *wsi, int autoservice);
|
||||
|
||||
void
|
||||
lws_header_table_force_to_detachable_state(struct lws *wsi);
|
||||
int
|
||||
lws_header_table_is_in_detachable_state(struct lws *wsi);
|
||||
|
||||
LWS_EXTERN char * LWS_WARN_UNUSED_RESULT
|
||||
lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h);
|
||||
|
||||
|
@ -3006,7 +2990,11 @@ lws_read_h1(struct lws *wsi, unsigned char *buf, lws_filepos_t len);
|
|||
int
|
||||
lws_callback_as_writeable(struct lws *wsi);
|
||||
int
|
||||
lws_read_or_use_preamble(struct lws_context_per_thread *pt, struct lws *wsi);
|
||||
lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
struct lws_tokens *ebuf);
|
||||
int
|
||||
lws_buflist_aware_consume(struct lws *wsi, struct lws_tokens *ebuf, int used,
|
||||
int buffered);
|
||||
int
|
||||
lws_process_ws_upgrade(struct lws *wsi);
|
||||
int
|
||||
|
|
|
@ -368,8 +368,7 @@ lws_cgi(struct lws *wsi, const char * const *exec_array, int script_uri_path_len
|
|||
* Actually having made the env, as a cgi we don't need the ah
|
||||
* any more
|
||||
*/
|
||||
if (script_uri_path_len >= 0 &&
|
||||
lws_header_table_is_in_detachable_state(wsi))
|
||||
if (script_uri_path_len >= 0)
|
||||
lws_header_table_detach(wsi, 0);
|
||||
|
||||
/* we are ready with the redirection pipes... run the thing */
|
||||
|
|
|
@ -40,7 +40,7 @@ rops_handle_POLLIN_cgi(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
if (wsi->cgi_channel == LWS_STDIN &&
|
||||
lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
|
||||
lwsl_info("failed at set pollfd\n");
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
|
||||
args.ch = wsi->cgi_channel;
|
||||
|
@ -89,7 +89,6 @@ struct lws_role_ops role_ops_cgi = {
|
|||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ NULL,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -53,10 +53,10 @@ lws_handshake_client(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
continue;
|
||||
}
|
||||
/* account for what we're using in rxflow buffer */
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL) &&
|
||||
!lws_buflist_use_segment(&wsi->buflist_rxflow, 1)) {
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist, NULL) &&
|
||||
!lws_buflist_use_segment(&wsi->buflist, 1)) {
|
||||
lwsl_debug("%s: removed wsi %p from rxflow list\n", __func__, wsi);
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
}
|
||||
|
||||
if (lws_client_rx_sm(wsi, *(*buf)++)) {
|
||||
|
|
|
@ -117,7 +117,8 @@ lws_read_h1(struct lws *wsi, unsigned char *buf, lws_filepos_t len)
|
|||
|
||||
case LRS_BODY:
|
||||
http_postbody:
|
||||
//lwsl_notice("http post body\n");
|
||||
lwsl_notice("%s: http post body: remain %d\n", __func__,
|
||||
(int)wsi->http.rx_content_remain);
|
||||
while (len && wsi->http.rx_content_remain) {
|
||||
/* Copy as much as possible, up to the limit of:
|
||||
* what we have in the read buffer (len)
|
||||
|
@ -178,7 +179,8 @@ postbody_completion:
|
|||
if (!wsi->cgi)
|
||||
#endif
|
||||
{
|
||||
lwsl_info("HTTP_BODY_COMPLETION: %p (%s)\n", wsi, wsi->protocol->name);
|
||||
lwsl_info("HTTP_BODY_COMPLETION: %p (%s)\n",
|
||||
wsi, wsi->protocol->name);
|
||||
n = wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_HTTP_BODY_COMPLETION,
|
||||
wsi->user_space, NULL, 0);
|
||||
|
@ -256,8 +258,8 @@ int
|
|||
lws_h1_server_socket_service(struct lws *wsi, struct lws_pollfd *pollfd)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
struct allocated_headers *ah;
|
||||
int n, len;
|
||||
struct lws_tokens ebuf;
|
||||
int n, buffered;
|
||||
|
||||
if (lwsi_state(wsi) == LRS_DEFERRING_ACTION)
|
||||
goto try_pollout;
|
||||
|
@ -268,11 +270,12 @@ lws_h1_server_socket_service(struct lws *wsi, struct lws_pollfd *pollfd)
|
|||
goto try_pollout;
|
||||
|
||||
/*
|
||||
* If we previously just did POLLIN when IN and OUT were
|
||||
* signalled (because POLLIN processing may have used up
|
||||
* the POLLOUT), don't let that happen twice in a row...
|
||||
* next time we see the situation favour POLLOUT
|
||||
* If we previously just did POLLIN when IN and OUT were signaled
|
||||
* (because POLLIN processing may have used up the POLLOUT), don't let
|
||||
* that happen twice in a row... next time we see the situation favour
|
||||
* POLLOUT
|
||||
*/
|
||||
|
||||
if (wsi->favoured_pollin &&
|
||||
(pollfd->revents & pollfd->events & LWS_POLLOUT)) {
|
||||
// lwsl_notice("favouring pollout\n");
|
||||
|
@ -284,6 +287,7 @@ lws_h1_server_socket_service(struct lws *wsi, struct lws_pollfd *pollfd)
|
|||
* We haven't processed that the tunnel is set up yet, so
|
||||
* defer reading
|
||||
*/
|
||||
|
||||
if (lwsi_state(wsi) == LRS_SSL_ACK_PENDING)
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
||||
|
@ -291,158 +295,89 @@ lws_h1_server_socket_service(struct lws *wsi, struct lws_pollfd *pollfd)
|
|||
|
||||
if ((lwsi_state(wsi) == LRS_ESTABLISHED ||
|
||||
lwsi_state(wsi) == LRS_ISSUING_FILE ||
|
||||
lwsi_state(wsi) == LRS_HEADERS)) {
|
||||
if (!wsi->ah) {
|
||||
/* no autoservice beacuse we will do it next */
|
||||
if (lws_header_table_attach(wsi, 0)) {
|
||||
lwsl_info("wsi %p: ah get fail\n", wsi);
|
||||
goto try_pollout;
|
||||
}
|
||||
}
|
||||
ah = wsi->ah;
|
||||
|
||||
assert(ah->rxpos <= ah->rxlen);
|
||||
/* if nothing in ah rx buffer, get some fresh rx */
|
||||
if (ah->rxpos == ah->rxlen) {
|
||||
|
||||
if (wsi->preamble_rx) {
|
||||
memcpy(ah->rx, wsi->preamble_rx, wsi->preamble_rx_len);
|
||||
lws_free_set_NULL(wsi->preamble_rx);
|
||||
ah->rxlen = wsi->preamble_rx_len;
|
||||
wsi->preamble_rx_len = 0;
|
||||
} else {
|
||||
ah->rxlen = lws_ssl_capable_read(wsi, ah->rx,
|
||||
sizeof(ah->rx));
|
||||
}
|
||||
|
||||
ah->rxpos = 0;
|
||||
switch (ah->rxlen) {
|
||||
case 0:
|
||||
lwsl_info("%s: read 0 len a\n",
|
||||
__func__);
|
||||
wsi->seen_zero_length_recv = 1;
|
||||
lws_change_pollfd(wsi, LWS_POLLIN, 0);
|
||||
goto try_pollout;
|
||||
//goto fail;
|
||||
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
goto fail;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
ah->rxlen = ah->rxpos = 0;
|
||||
goto try_pollout;
|
||||
}
|
||||
lwsi_state(wsi) == LRS_HEADERS ||
|
||||
lwsi_state(wsi) == LRS_BODY)) {
|
||||
if (!wsi->ah &&
|
||||
lws_header_table_attach(wsi, 0)) {
|
||||
lwsl_info("wsi %p: ah get fail\n", wsi);
|
||||
goto try_pollout;
|
||||
}
|
||||
|
||||
if (!(ah->rxpos != ah->rxlen && ah->rxlen)) {
|
||||
lwsl_err("%s: assert: rxpos %d, rxlen %d\n",
|
||||
__func__, ah->rxpos, ah->rxlen);
|
||||
buffered = lws_buflist_aware_read(pt, wsi, &ebuf);
|
||||
switch (ebuf.len) {
|
||||
case 0:
|
||||
lwsl_info("%s: read 0 len a\n",
|
||||
__func__);
|
||||
wsi->seen_zero_length_recv = 1;
|
||||
lws_change_pollfd(wsi, LWS_POLLIN, 0);
|
||||
goto try_pollout;
|
||||
//goto fail;
|
||||
|
||||
assert(0);
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
goto fail;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
goto try_pollout;
|
||||
}
|
||||
|
||||
/* just ignore incoming if waiting for close */
|
||||
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE ||
|
||||
lwsi_state(wsi) == LRS_ISSUING_FILE)
|
||||
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
|
||||
lwsl_notice("%s: just ignoring\n", __func__);
|
||||
goto try_pollout;
|
||||
}
|
||||
|
||||
if (lwsi_state(wsi) == LRS_ISSUING_FILE) {
|
||||
// lwsl_notice("stashing: wsi %p: bd %d\n", wsi, buffered);
|
||||
if (lws_buflist_aware_consume(wsi, &ebuf, 0, buffered))
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
goto try_pollout;
|
||||
}
|
||||
|
||||
/*
|
||||
* otherwise give it to whoever wants it
|
||||
* according to the connection state
|
||||
* Otherwise give it to whoever wants it according to the
|
||||
* connection state
|
||||
*/
|
||||
#if defined(LWS_ROLE_H2)
|
||||
if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY)
|
||||
n = lws_read_h2(wsi, ah->rx + ah->rxpos,
|
||||
ah->rxlen - ah->rxpos);
|
||||
n = lws_read_h2(wsi, (uint8_t *)ebuf.token, ebuf.len);
|
||||
else
|
||||
#endif
|
||||
n = lws_read_h1(wsi, ah->rx + ah->rxpos,
|
||||
ah->rxlen - ah->rxpos);
|
||||
n = lws_read_h1(wsi, (uint8_t *)ebuf.token, ebuf.len);
|
||||
if (n < 0) /* we closed wsi */
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
|
||||
if (!wsi->ah)
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
if (wsi->ah->rxlen)
|
||||
wsi->ah->rxpos += n;
|
||||
lwsl_debug("%s: consumed %d\n", __func__, n);
|
||||
|
||||
lwsl_debug("%s: wsi %p: ah read rxpos %d, rxlen %d\n",
|
||||
__func__, wsi, wsi->ah->rxpos,
|
||||
wsi->ah->rxlen);
|
||||
if (lws_buflist_aware_consume(wsi, &ebuf, n, buffered))
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
if (lws_header_table_is_in_detachable_state(wsi) &&
|
||||
(wsi->role_ops == &role_ops_raw_skt ||
|
||||
wsi->role_ops == &role_ops_raw_file)) // ???
|
||||
lws_header_table_detach(wsi, 1);
|
||||
/*
|
||||
* during the parsing our role changed to something non-http,
|
||||
* so the ah has no further meaning
|
||||
*/
|
||||
|
||||
/* during the parsing we upgraded to ws */
|
||||
|
||||
if (wsi->ah && wsi->ah->rxpos == wsi->ah->rxlen &&
|
||||
lwsi_role_ws(wsi)) {
|
||||
lwsl_info("%s: %p: dropping ah on ws post-upgrade\n",
|
||||
__func__, wsi);
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
if (wsi->ah &&
|
||||
!lwsi_role_h1(wsi) &&
|
||||
!lwsi_role_h2(wsi) &&
|
||||
!lwsi_role_cgi(wsi))
|
||||
lws_header_table_detach(wsi, 0);
|
||||
}
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
||||
len = lws_read_or_use_preamble(pt, wsi);
|
||||
if (len < 0)
|
||||
goto fail;
|
||||
|
||||
if (!len)
|
||||
goto try_pollout;
|
||||
|
||||
/* just ignore incoming if waiting for close */
|
||||
if (lwsi_state(wsi) != LRS_FLUSHING_BEFORE_CLOSE &&
|
||||
lwsi_state(wsi) != LRS_ISSUING_FILE) {
|
||||
/*
|
||||
* this may want to send
|
||||
* (via HTTP callback for example)
|
||||
*
|
||||
* returns number of bytes used
|
||||
*/
|
||||
#if defined(LWS_ROLE_H2)
|
||||
if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY)
|
||||
n = lws_read_h2(wsi, pt->serv_buf, len);
|
||||
else
|
||||
#endif
|
||||
n = lws_read_h1(wsi, pt->serv_buf, len);
|
||||
if (n < 0) /* we closed wsi */
|
||||
return LWS_HPI_RET_DIE;
|
||||
|
||||
if (n != len) {
|
||||
if (wsi->preamble_rx) {
|
||||
lwsl_err("DISCARDING %d (ah %p)\n", len - n, wsi->ah);
|
||||
|
||||
goto fail;
|
||||
}
|
||||
assert(n < len);
|
||||
wsi->preamble_rx = lws_malloc(len - n, "preamble_rx");
|
||||
if (!wsi->preamble_rx) {
|
||||
lwsl_err("OOM\n");
|
||||
goto fail;
|
||||
}
|
||||
memcpy(wsi->preamble_rx, pt->serv_buf + n, len - n);
|
||||
wsi->preamble_rx_len = (int)len - n;
|
||||
lwsl_debug("stashed %d\n", (int)wsi->preamble_rx_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* he may have used up the
|
||||
* writability above, if we will defer POLLOUT
|
||||
* processing in favour of POLLIN, note it
|
||||
* He may have used up the writability above, if we will defer
|
||||
* POLLOUT processing in favour of POLLIN, note it
|
||||
*/
|
||||
|
||||
if (pollfd->revents & LWS_POLLOUT)
|
||||
wsi->favoured_pollin = 1;
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* he may have used up the
|
||||
* writability above, if we will defer POLLOUT
|
||||
* He may have used up the writability above, if we will defer POLLOUT
|
||||
* processing in favour of POLLIN, note it
|
||||
*/
|
||||
|
||||
if (pollfd->revents & LWS_POLLOUT)
|
||||
wsi->favoured_pollin = 1;
|
||||
|
||||
|
@ -463,12 +398,8 @@ try_pollout:
|
|||
wsi->could_have_pending = 0;
|
||||
|
||||
if (lwsi_state(wsi) == LRS_DEFERRING_ACTION) {
|
||||
lwsl_debug("%s: LRS_DEFERRING_ACTION now writable\n",
|
||||
__func__);
|
||||
lwsl_debug("%s: LRS_DEFERRING_ACTION now writable\n", __func__);
|
||||
|
||||
if (wsi->ah)
|
||||
lwsl_debug(" existing ah rxpos %d / rxlen %d\n",
|
||||
wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
lwsi_set_state(wsi, LRS_ESTABLISHED);
|
||||
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
|
||||
lwsl_info("failed at set pollfd\n");
|
||||
|
@ -496,9 +427,9 @@ try_pollout:
|
|||
}
|
||||
#endif
|
||||
|
||||
n = user_callback_handle_rxflow(wsi->protocol->callback,
|
||||
wsi, LWS_CALLBACK_HTTP_WRITEABLE,
|
||||
wsi->user_space, NULL, 0);
|
||||
n = user_callback_handle_rxflow(wsi->protocol->callback, wsi,
|
||||
LWS_CALLBACK_HTTP_WRITEABLE,
|
||||
wsi->user_space, NULL, 0);
|
||||
if (n < 0) {
|
||||
lwsl_info("writeable_fail\n");
|
||||
goto fail;
|
||||
|
@ -523,7 +454,7 @@ try_pollout:
|
|||
fail:
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "server socket svc fail");
|
||||
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -537,7 +468,7 @@ rops_handle_POLLIN_h1(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
#ifdef LWS_WITH_CGI
|
||||
if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
|
||||
if (lws_handle_POLLOUT_event(wsi, pollfd))
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -572,7 +503,7 @@ rops_handle_POLLIN_h1(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
return n;
|
||||
if (lwsi_state(wsi) != LRS_SSL_INIT)
|
||||
if (lws_server_socket_service_ssl(wsi, LWS_SOCK_INVALID))
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -606,7 +537,7 @@ rops_handle_POLLIN_h1(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
|
||||
wsi->user_space, NULL, 0)) {
|
||||
lwsl_info("RECEIVE_CLIENT_HTTP closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
@ -620,11 +551,11 @@ rops_handle_POLLIN_h1(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
if ((pollfd->revents & LWS_POLLOUT) &&
|
||||
lws_handle_POLLOUT_event(wsi, pollfd)) {
|
||||
lwsl_debug("POLLOUT event closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
if (lws_client_socket_service(wsi, pollfd, NULL))
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
#endif
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
@ -641,40 +572,6 @@ int rops_handle_POLLOUT_h1(struct lws *wsi)
|
|||
return LWS_HP_RET_BAIL_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
rops_service_flag_pending_h1(struct lws_context *context, int tsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
struct allocated_headers *ah;
|
||||
int forced = 0;
|
||||
|
||||
/* POLLIN faking (the pt lock is taken by the parent) */
|
||||
|
||||
/*
|
||||
* 3) For any wsi who have an ah with pending RX who did not
|
||||
* complete their current headers, and are not flowcontrolled,
|
||||
* fake their POLLIN status so they will be able to drain the
|
||||
* rx buffered in the ah
|
||||
*/
|
||||
ah = pt->ah_list;
|
||||
while (ah) {
|
||||
if ((ah->rxpos != ah->rxlen &&
|
||||
!ah->wsi->hdr_parsing_completed) || ah->wsi->preamble_rx) {
|
||||
pt->fds[ah->wsi->position_in_fds_table].revents |=
|
||||
pt->fds[ah->wsi->position_in_fds_table].events &
|
||||
LWS_POLLIN;
|
||||
if (pt->fds[ah->wsi->position_in_fds_table].revents &
|
||||
LWS_POLLIN) {
|
||||
forced = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ah = ah->next;
|
||||
}
|
||||
|
||||
return forced;
|
||||
}
|
||||
|
||||
static int
|
||||
rops_write_role_protocol_h1(struct lws *wsi, unsigned char *buf, size_t len,
|
||||
enum lws_write_protocol *wp)
|
||||
|
@ -721,14 +618,13 @@ struct lws_role_ops role_ops_h1 = {
|
|||
/* init_context */ NULL,
|
||||
/* init_vhost */ NULL,
|
||||
/* periodic_checks */ NULL,
|
||||
/* service_flag_pending */ rops_service_flag_pending_h1,
|
||||
/* service_flag_pending */ NULL,
|
||||
/* handle_POLLIN */ rops_handle_POLLIN_h1,
|
||||
/* handle_POLLOUT */ rops_handle_POLLOUT_h1,
|
||||
/* perform_user_POLLOUT */ NULL,
|
||||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ rops_write_role_protocol_h1,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ rops_alpn_negotiated_h1,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -1715,7 +1715,7 @@ lws_h2_parser(struct lws *wsi, unsigned char *in, lws_filepos_t inlen,
|
|||
|
||||
case LWS_H2_FRAME_TYPE_DATA:
|
||||
|
||||
// lwsl_notice("%s: LWS_H2_FRAME_TYPE_DATA\n", __func__);
|
||||
lwsl_notice("%s: LWS_H2_FRAME_TYPE_DATA\n", __func__);
|
||||
|
||||
/* let the network wsi live a bit longer if subs are active...
|
||||
* our frame may take a long time to chew through */
|
||||
|
@ -1725,6 +1725,9 @@ lws_h2_parser(struct lws *wsi, unsigned char *in, lws_filepos_t inlen,
|
|||
if (!h2n->swsi)
|
||||
break;
|
||||
|
||||
if (lws_buflist_next_segment_len(&h2n->swsi->buflist, NULL))
|
||||
lwsl_err("%s: substream has pending !!!\n", __func__);
|
||||
|
||||
if (lwsi_role_http(h2n->swsi) &&
|
||||
lwsi_state(h2n->swsi) == LRS_ESTABLISHED) {
|
||||
lwsi_set_state(h2n->swsi, LRS_BODY);
|
||||
|
@ -1774,15 +1777,16 @@ lws_h2_parser(struct lws *wsi, unsigned char *in, lws_filepos_t inlen,
|
|||
} else {
|
||||
|
||||
if (lwsi_state(h2n->swsi) == LRS_DEFERRING_ACTION) {
|
||||
lwsl_notice("appending because we are in LRS_DEFERRING_ACTION\n");
|
||||
m = lws_buflist_append_segment(
|
||||
&h2n->swsi->buflist_rxflow,
|
||||
&h2n->swsi->buflist,
|
||||
in - 1, n);
|
||||
if (m < 0)
|
||||
return -1;
|
||||
if (m) {
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
|
||||
lws_dll_lws_add_front(&h2n->swsi->dll_rxflow, &pt->dll_head_rxflow);
|
||||
lws_dll_lws_add_front(&h2n->swsi->dll_buflist, &pt->dll_head_buflist);
|
||||
}
|
||||
in += n - 1;
|
||||
h2n->inside += n;
|
||||
|
@ -1801,13 +1805,14 @@ lws_h2_parser(struct lws *wsi, unsigned char *in, lws_filepos_t inlen,
|
|||
*/
|
||||
|
||||
n = lws_read_h1(h2n->swsi, in - 1, n);
|
||||
lwsl_notice("%s: lws_read_h1 %d\n", __func__, n);
|
||||
h2n->swsi->outer_will_close = 0;
|
||||
/*
|
||||
* can return 0 in POST body with
|
||||
* content len exhausted somehow.
|
||||
*/
|
||||
if (n < 0 ||
|
||||
(!n && !lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL))) {
|
||||
(!n && !lws_buflist_next_segment_len(&wsi->buflist, NULL))) {
|
||||
lwsl_info("%s: lws_read_h1 told %d %d / %d\n",
|
||||
__func__, n, h2n->count, h2n->length);
|
||||
in += h2n->length - h2n->count;
|
||||
|
|
|
@ -83,20 +83,24 @@ const struct http2_settings lws_h2_stock_settings = { {
|
|||
/* H2SET_ENABLE_CONNECT_PROTOCOL */ 1,
|
||||
}};
|
||||
|
||||
/*
|
||||
* The wsi at this level is the network wsi
|
||||
*/
|
||||
|
||||
static int
|
||||
rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
struct lws_pollfd *pollfd)
|
||||
{
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
unsigned int pending = 0;
|
||||
char draining_flow = 0;
|
||||
char buffered = 0;
|
||||
struct lws *wsi1;
|
||||
int n, m;
|
||||
|
||||
#ifdef LWS_WITH_CGI
|
||||
if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
|
||||
if (lws_handle_POLLOUT_event(wsi, pollfd))
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -111,7 +115,7 @@ rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
*/
|
||||
if (lwsi_state(wsi) == LRS_PRE_WS_SERVING_ACCEPT) {
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
if (lwsi_state(wsi) == LRS_WAITING_CONNECT) {
|
||||
|
@ -119,12 +123,12 @@ rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
if ((pollfd->revents & LWS_POLLOUT) &&
|
||||
lws_handle_POLLOUT_event(wsi, pollfd)) {
|
||||
lwsl_debug("POLLOUT event closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
n = lws_client_socket_service(wsi, pollfd, NULL);
|
||||
if (n)
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
#endif
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -139,7 +143,7 @@ rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
/* the write failed... it's had it */
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
|
||||
|
@ -155,18 +159,11 @@ rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
wsi->ws->tx_draining_ext = 0;
|
||||
}
|
||||
|
||||
#if 0 /* not so for h2 */
|
||||
if (lws_is_flowcontrolled(wsi))
|
||||
/* We cannot deal with any kind of new RX because we are
|
||||
* RX-flowcontrolled.
|
||||
*/
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
#endif
|
||||
|
||||
if (wsi->http2_substream || wsi->upgraded_to_http2) {
|
||||
wsi1 = lws_get_network_wsi(wsi);
|
||||
if (wsi1 && wsi1->trunc_len)
|
||||
/* We cannot deal with any kind of new RX
|
||||
/*
|
||||
* We cannot deal with any kind of new RX
|
||||
* because we are dealing with a partial send
|
||||
* (new RX may trigger new http_action() that
|
||||
* expect to be able to send)
|
||||
|
@ -174,108 +171,47 @@ rops_handle_POLLIN_h2(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
||||
/* 3: RX Flowcontrol buffer / h2 rx scratch needs to be drained
|
||||
*/
|
||||
read:
|
||||
/* 3: network wsi buflist needs to be drained */
|
||||
|
||||
eff_buf.token_len = (int)lws_buflist_next_segment_len(&wsi->buflist_rxflow,
|
||||
(uint8_t **)&eff_buf.token);
|
||||
if (eff_buf.token_len) {
|
||||
lwsl_info("draining rxflow (len %d)\n", eff_buf.token_len);
|
||||
draining_flow = 1;
|
||||
// lws_buflist_describe(&wsi->buflist, wsi);
|
||||
|
||||
ebuf.len = (int)lws_buflist_next_segment_len(&wsi->buflist,
|
||||
(uint8_t **)&ebuf.token);
|
||||
if (ebuf.len) {
|
||||
lwsl_info("draining buflist (len %d)\n", ebuf.len);
|
||||
buffered = 1;
|
||||
goto drain;
|
||||
}
|
||||
|
||||
if (wsi->upgraded_to_http2) {
|
||||
struct lws_h2_netconn *h2n = wsi->h2.h2n;
|
||||
|
||||
if (h2n->rx_scratch_len) {
|
||||
lwsl_info("%s: %p: h2 rx pos = %d len = %d\n",
|
||||
__func__, wsi, h2n->rx_scratch_pos,
|
||||
h2n->rx_scratch_len);
|
||||
eff_buf.token = (char *)h2n->rx_scratch +
|
||||
h2n->rx_scratch_pos;
|
||||
eff_buf.token_len = h2n->rx_scratch_len;
|
||||
|
||||
h2n->rx_scratch_len = 0;
|
||||
goto drain;
|
||||
}
|
||||
}
|
||||
|
||||
/* 4: any incoming (or ah-stashed incoming rx) data ready?
|
||||
* notice if rx flow going off raced poll(), rx flow wins
|
||||
*/
|
||||
|
||||
if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
|
||||
if (!lws_ssl_pending(wsi) &&
|
||||
!(pollfd->revents & pollfd->events & LWS_POLLIN))
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
||||
read:
|
||||
if (lws_is_flowcontrolled(wsi)) {
|
||||
lwsl_info("%s: %p should be rxflow (bm 0x%x)..\n",
|
||||
__func__, wsi, wsi->rxflow_bitmap);
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
if (!(lwsi_role_client(wsi) &&
|
||||
(lwsi_state(wsi) != LRS_ESTABLISHED &&
|
||||
lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
|
||||
|
||||
if (wsi->ah && wsi->ah->rxlen - wsi->ah->rxpos) {
|
||||
lwsl_info("%s: %p: inherited ah rx %d\n", __func__,
|
||||
wsi, wsi->ah->rxlen - wsi->ah->rxpos);
|
||||
eff_buf.token_len = wsi->ah->rxlen - wsi->ah->rxpos;
|
||||
eff_buf.token = (char *)wsi->ah->rx + wsi->ah->rxpos;
|
||||
} else {
|
||||
if (!(lwsi_role_client(wsi) &&
|
||||
(lwsi_state(wsi) != LRS_ESTABLISHED &&
|
||||
lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
|
||||
/*
|
||||
* extension may not consume everything
|
||||
* (eg, pmd may be constrained
|
||||
* as to what it can output...) has to go in
|
||||
* per-wsi rx buf area.
|
||||
* Otherwise in large temp serv_buf area.
|
||||
*/
|
||||
|
||||
if (wsi->upgraded_to_http2) {
|
||||
if (!wsi->h2.h2n->rx_scratch) {
|
||||
wsi->h2.h2n->rx_scratch =
|
||||
lws_malloc(
|
||||
wsi->vhost->h2_rx_scratch_size,
|
||||
"h2 rx scratch");
|
||||
if (!wsi->h2.h2n->rx_scratch)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
}
|
||||
eff_buf.token = wsi->h2.h2n->rx_scratch;
|
||||
eff_buf.token_len = wsi->vhost->h2_rx_scratch_size;
|
||||
} else {
|
||||
eff_buf.token = (char *)pt->serv_buf;
|
||||
eff_buf.token_len =
|
||||
wsi->context->pt_serv_buf_size;
|
||||
|
||||
if ((unsigned int)eff_buf.token_len >
|
||||
wsi->context->pt_serv_buf_size)
|
||||
eff_buf.token_len =
|
||||
wsi->context->pt_serv_buf_size;
|
||||
}
|
||||
|
||||
if ((int)pending > eff_buf.token_len)
|
||||
pending = eff_buf.token_len;
|
||||
|
||||
eff_buf.token_len = lws_ssl_capable_read(wsi,
|
||||
(unsigned char *)eff_buf.token,
|
||||
pending ? (int)pending :
|
||||
eff_buf.token_len);
|
||||
switch (eff_buf.token_len) {
|
||||
case 0:
|
||||
lwsl_info("%s: zero length read\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
lwsl_info("SSL Capable more service\n");
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
}
|
||||
// lwsl_notice("Actual RX %d\n", eff_buf.token_len);
|
||||
ebuf.token = (char *)pt->serv_buf;
|
||||
ebuf.len = lws_ssl_capable_read(wsi,
|
||||
(unsigned char *)ebuf.token,
|
||||
wsi->context->pt_serv_buf_size);
|
||||
switch (ebuf.len) {
|
||||
case 0:
|
||||
lwsl_info("%s: zero length read\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
lwsl_info("SSL Capable more service\n");
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
// lwsl_notice("Actual RX %d\n", ebuf.len);
|
||||
// lwsl_hexdump_notice(ebuf.token, 64);
|
||||
}
|
||||
|
||||
drain:
|
||||
|
@ -305,7 +241,7 @@ drain:
|
|||
wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
|
||||
wsi->user_space, NULL, 0)) {
|
||||
lwsl_info("RECEIVE_CLIENT_HTTP closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
@ -314,64 +250,58 @@ drain:
|
|||
|
||||
/* service incoming data */
|
||||
|
||||
if (eff_buf.token_len) {
|
||||
if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY)
|
||||
n = lws_read_h2(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
else
|
||||
n = lws_read_h1(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
if (ebuf.len) {
|
||||
n = 0;
|
||||
if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY) {
|
||||
n = lws_read_h2(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
// lwsl_notice("h2 n = %d\n", n);
|
||||
} else {
|
||||
n = lws_read_h1(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
// lwsl_notice("h1 n = %d\n", n);
|
||||
}
|
||||
|
||||
if (n < 0) {
|
||||
/* we closed wsi */
|
||||
n = 0;
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
|
||||
if (draining_flow) {
|
||||
m = lws_buflist_use_segment(&wsi->buflist_rxflow, n);
|
||||
if (buffered) {
|
||||
m = lws_buflist_use_segment(&wsi->buflist, n);
|
||||
lwsl_info("%s: draining rxflow: used %d, next %d\n",
|
||||
__func__, n, m);
|
||||
__func__, n, m);
|
||||
if (!m) {
|
||||
lwsl_notice("%s: removed wsi %p from rxflow list\n",
|
||||
lwsl_notice("%s: removed %p from dll_buflist\n",
|
||||
__func__, wsi);
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
}
|
||||
}
|
||||
} else
|
||||
if (n != ebuf.len &&
|
||||
lws_buflist_append_segment(&wsi->buflist,
|
||||
(uint8_t *)ebuf.token + n,
|
||||
ebuf.len - n) < 0)
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
// lws_buflist_describe(&wsi->buflist, wsi);
|
||||
|
||||
if (wsi->ah
|
||||
#if !defined(LWS_NO_CLIENT)
|
||||
&& !wsi->client_h2_alpn
|
||||
#endif
|
||||
) {
|
||||
lwsl_info("%s: %p: detaching ah\n", __func__, wsi);
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
)
|
||||
lws_header_table_detach(wsi, 0);
|
||||
}
|
||||
|
||||
pending = lws_ssl_pending(wsi);
|
||||
if (pending) {
|
||||
pending = pending > wsi->context->pt_serv_buf_size ?
|
||||
wsi->context->pt_serv_buf_size : pending;
|
||||
lwsl_err("going around\n");
|
||||
goto read;
|
||||
}
|
||||
|
||||
if (draining_flow && /* were draining, now nothing left */
|
||||
!lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL)) {
|
||||
lwsl_info("%s: %p flow buf: drained\n", __func__, wsi);
|
||||
/* having drained the rxflow buffer, can rearm POLLIN */
|
||||
#ifdef LWS_NO_SERVER
|
||||
n =
|
||||
#endif
|
||||
__lws_rx_flow_control(wsi);
|
||||
/* n ignored, needed for NO_SERVER case */
|
||||
}
|
||||
|
||||
/* n = 0 */
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -418,50 +348,6 @@ int rops_handle_POLLOUT_h2(struct lws *wsi)
|
|||
return LWS_HP_RET_USER_SERVICE;
|
||||
}
|
||||
|
||||
static int
|
||||
rops_service_flag_pending_h2(struct lws_context *context, int tsi)
|
||||
{
|
||||
/* h1 will deal with this if both h1 and h2 enabled */
|
||||
|
||||
#if !defined(LWS_ROLE_H1)
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
struct allocated_headers *ah;
|
||||
int forced = 0;
|
||||
#endif
|
||||
|
||||
/* POLLIN faking (the pt lock is taken by the parent) */
|
||||
|
||||
|
||||
|
||||
#if !defined(LWS_ROLE_H1)
|
||||
/*
|
||||
* 3) For any wsi who have an ah with pending RX who did not
|
||||
* complete their current headers, and are not flowcontrolled,
|
||||
* fake their POLLIN status so they will be able to drain the
|
||||
* rx buffered in the ah
|
||||
*/
|
||||
ah = pt->ah_list;
|
||||
while (ah) {
|
||||
if ((ah->rxpos != ah->rxlen &&
|
||||
!ah->wsi->hdr_parsing_completed) || ah->wsi->preamble_rx) {
|
||||
pt->fds[ah->wsi->position_in_fds_table].revents |=
|
||||
pt->fds[ah->wsi->position_in_fds_table].events &
|
||||
LWS_POLLIN;
|
||||
if (pt->fds[ah->wsi->position_in_fds_table].revents &
|
||||
LWS_POLLIN) {
|
||||
forced = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ah = ah->next;
|
||||
}
|
||||
|
||||
return forced;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int
|
||||
rops_write_role_protocol_h2(struct lws *wsi, unsigned char *buf, size_t len,
|
||||
enum lws_write_protocol *wp)
|
||||
|
@ -585,11 +471,6 @@ static int
|
|||
rops_init_vhost_h2(struct lws_vhost *vh,
|
||||
struct lws_context_creation_info *info)
|
||||
{
|
||||
if (!info->h2_rx_scratch_size)
|
||||
vh->h2_rx_scratch_size = LWS_H2_RX_SCRATCH_SIZE;
|
||||
else
|
||||
vh->h2_rx_scratch_size = info->h2_rx_scratch_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -708,10 +589,6 @@ rops_close_kill_connection_h2(struct lws *wsi, enum lws_close_status reason)
|
|||
PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE, 31);
|
||||
}
|
||||
|
||||
if (wsi->upgraded_to_http2 && wsi->h2.h2n &&
|
||||
wsi->h2.h2n->rx_scratch)
|
||||
lws_free_set_NULL(wsi->h2.h2n->rx_scratch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1086,31 +963,6 @@ next_child:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
rops_rxflow_cache_h2(struct lws *wsi, unsigned char *buf, int n, int len)
|
||||
{
|
||||
struct lws_h2_netconn *h2n;
|
||||
|
||||
if (!wsi->upgraded_to_http2)
|
||||
return 0; /* parent interprets as continue */
|
||||
|
||||
h2n = wsi->h2.h2n;
|
||||
|
||||
assert(h2n->rx_scratch);
|
||||
buf += n;
|
||||
len -= n;
|
||||
assert ((char *)buf >= (char *)h2n->rx_scratch &&
|
||||
(char *)&buf[len] <=
|
||||
(char *)&h2n->rx_scratch[wsi->vhost->h2_rx_scratch_size]);
|
||||
|
||||
h2n->rx_scratch_pos = lws_ptr_diff(buf, h2n->rx_scratch);
|
||||
h2n->rx_scratch_len = len;
|
||||
|
||||
lwsl_info("%s: %p: pausing h2 rx_scratch\n", __func__, wsi);
|
||||
|
||||
return 1; /* parent interprets as return 0 */
|
||||
}
|
||||
|
||||
static struct lws *
|
||||
rops_encapsulation_parent_h2(struct lws *wsi)
|
||||
{
|
||||
|
@ -1172,14 +1024,13 @@ struct lws_role_ops role_ops_h2 = {
|
|||
/* init_context */ rops_init_context_h2,
|
||||
/* init_vhost */ rops_init_vhost_h2,
|
||||
/* periodic_checks */ NULL,
|
||||
/* service_flag_pending */ rops_service_flag_pending_h2,
|
||||
/* service_flag_pending */ NULL,
|
||||
/* handle_POLLIN */ rops_handle_POLLIN_h2,
|
||||
/* handle_POLLOUT */ rops_handle_POLLOUT_h2,
|
||||
/* perform_user_POLLOUT */ rops_perform_user_POLLOUT_h2,
|
||||
/* callback_on_writable */ rops_callback_on_writable_h2,
|
||||
/* tx_credit */ rops_tx_credit_h2,
|
||||
/* write_role_protocol */ rops_write_role_protocol_h2,
|
||||
/* rxflow_cache */ rops_rxflow_cache_h2,
|
||||
/* encapsulation_parent */ rops_encapsulation_parent_h2,
|
||||
/* alpn_negotiated */ rops_alpn_negotiated_h2,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -551,9 +551,6 @@ send_hs:
|
|||
return wsi;
|
||||
|
||||
oom4:
|
||||
/* we're closing, losing some rx is OK */
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
|
||||
if (lwsi_role_client(wsi) && lwsi_state_est(wsi)) {
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
|
|
|
@ -35,24 +35,16 @@ struct lws *
|
|||
lws_client_wsi_effective(struct lws *wsi)
|
||||
{
|
||||
struct lws *wsi_eff = wsi;
|
||||
struct lws_dll_lws *d;
|
||||
|
||||
if (!wsi->transaction_from_pipeline_queue ||
|
||||
!wsi->dll_client_transaction_queue_head.next)
|
||||
return wsi;
|
||||
|
||||
/*
|
||||
* The head is the last queued transaction... so
|
||||
* the guy we are fulfilling here is the tail
|
||||
*/
|
||||
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
if (d->next == NULL)
|
||||
wsi_eff = lws_container_of(d, struct lws,
|
||||
d = wsi->dll_client_transaction_queue_head.next;
|
||||
if (d)
|
||||
wsi_eff = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
return wsi_eff;
|
||||
}
|
||||
|
@ -103,6 +95,7 @@ lws_client_socket_service(struct lws *wsi, struct lws_pollfd *pollfd,
|
|||
if ((pollfd->revents & LWS_POLLOUT) &&
|
||||
wsi->keepalive_active &&
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
int found = 0;
|
||||
|
||||
lwsl_debug("%s: pollout HANDSHAKE2\n", __func__);
|
||||
|
||||
|
@ -113,6 +106,7 @@ lws_client_socket_service(struct lws *wsi, struct lws_pollfd *pollfd,
|
|||
struct lws *w = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
|
||||
lwsl_notice("%s: %p states 0x%x\n", __func__, w, w->wsistate);
|
||||
if (lwsi_state(w) == LRS_H1C_ISSUE_HANDSHAKE2) {
|
||||
/*
|
||||
* pollfd has the master sockfd in it... we
|
||||
|
@ -121,11 +115,15 @@ lws_client_socket_service(struct lws *wsi, struct lws_pollfd *pollfd,
|
|||
*/
|
||||
lws_client_socket_service(w, pollfd, wsi);
|
||||
lws_callback_on_writable(wsi);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
if (!found)
|
||||
lwsl_err("%s: didn't find anything in HS2\n", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -375,7 +373,7 @@ start_ws_handshake:
|
|||
lws_latency_pre(context, wsi);
|
||||
|
||||
w = lws_client_wsi_master(wsi);
|
||||
lwsl_debug("%s: HANDSHAKE2: %p: sending headers on %p (wsistate 0x%x 0x%x)\n",
|
||||
lwsl_info("%s: HANDSHAKE2: %p: sending headers on %p (wsistate 0x%x 0x%x)\n",
|
||||
__func__, wsi, w, wsi->wsistate, w->wsistate);
|
||||
|
||||
n = lws_ssl_capable_write(w, (unsigned char *)sb, (int)(p - sb));
|
||||
|
@ -908,11 +906,9 @@ lws_client_interpret_server_handshake(struct lws *wsi)
|
|||
* queued on him can drop it now though.
|
||||
*/
|
||||
|
||||
if (w != wsi) {
|
||||
if (w != wsi)
|
||||
/* free up parsing allocations for queued guy */
|
||||
lws_header_table_force_to_detachable_state(w);
|
||||
lws_header_table_detach(w, 0);
|
||||
}
|
||||
|
||||
lwsl_info("%s: client connection up\n", __func__);
|
||||
|
||||
|
@ -992,7 +988,6 @@ lws_generate_client_handshake(struct lws *wsi, char *pkt)
|
|||
wsi->user_space, NULL, 0))
|
||||
return NULL;
|
||||
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_role_transition(wsi, 0, LRS_ESTABLISHED, &role_ops_raw_skt);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
|
||||
|
|
|
@ -111,30 +111,19 @@ __lws_header_table_reset(struct lws *wsi, int autoservice)
|
|||
|
||||
time(&ah->assigned);
|
||||
|
||||
/*
|
||||
* if we inherited pending rx (from socket adoption deferred
|
||||
* processing), apply and free it.
|
||||
*/
|
||||
if (wsi->preamble_rx) {
|
||||
memcpy(ah->rx, wsi->preamble_rx, wsi->preamble_rx_len);
|
||||
ah->rxlen = wsi->preamble_rx_len;
|
||||
lws_free_set_NULL(wsi->preamble_rx);
|
||||
wsi->preamble_rx_len = 0;
|
||||
ah->rxpos = 0;
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist, NULL) &&
|
||||
autoservice) {
|
||||
lwsl_debug("%s: service on readbuf ah\n", __func__);
|
||||
|
||||
if (autoservice) {
|
||||
lwsl_debug("%s: service on readbuf ah\n", __func__);
|
||||
|
||||
pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
/*
|
||||
* Unlike a normal connect, we have the headers already
|
||||
* (or the first part of them anyway)
|
||||
*/
|
||||
pfd = &pt->fds[wsi->position_in_fds_table];
|
||||
pfd->revents |= LWS_POLLIN;
|
||||
lwsl_err("%s: calling service\n", __func__);
|
||||
lws_service_fd_tsi(wsi->context, pfd, wsi->tsi);
|
||||
}
|
||||
pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
/*
|
||||
* Unlike a normal connect, we have the headers already
|
||||
* (or the first part of them anyway)
|
||||
*/
|
||||
pfd = &pt->fds[wsi->position_in_fds_table];
|
||||
pfd->revents |= LWS_POLLIN;
|
||||
lwsl_err("%s: calling service\n", __func__);
|
||||
lws_service_fd_tsi(wsi->context, pfd, wsi->tsi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,11 +251,6 @@ lws_header_table_attach(struct lws *wsi, int autoservice)
|
|||
(void *)wsi, (void *)wsi->ah, pt->ah_count_in_use);
|
||||
|
||||
reset:
|
||||
|
||||
/* and reset the rx state */
|
||||
wsi->ah->rxpos = 0;
|
||||
wsi->ah->rxlen = 0;
|
||||
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
@ -288,24 +272,6 @@ bail:
|
|||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
lws_header_table_force_to_detachable_state(struct lws *wsi)
|
||||
{
|
||||
if (wsi->ah) {
|
||||
wsi->ah->rxpos = -1;
|
||||
wsi->ah->rxlen = -1;
|
||||
wsi->hdr_parsing_completed = 1;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
lws_header_table_is_in_detachable_state(struct lws *wsi)
|
||||
{
|
||||
struct allocated_headers *ah = wsi->ah;
|
||||
|
||||
return ah && ah->rxpos == ah->rxlen && wsi->hdr_parsing_completed;
|
||||
}
|
||||
|
||||
int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
||||
{
|
||||
struct lws_context *context = wsi->context;
|
||||
|
@ -324,19 +290,6 @@ int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
(void *)wsi, (void *)ah, wsi->tsi,
|
||||
pt->ah_count_in_use);
|
||||
|
||||
if (wsi->preamble_rx) {
|
||||
lws_free_set_NULL(wsi->preamble_rx);
|
||||
wsi->preamble_rx_len = 0;
|
||||
}
|
||||
|
||||
/* may not be detached while he still has unprocessed rx */
|
||||
if (!lws_header_table_is_in_detachable_state(wsi)) {
|
||||
lwsl_err("%s: %p: CANNOT DETACH rxpos:%d, rxlen:%d, "
|
||||
"wsi->hdr_parsing_completed = %d\n", __func__, wsi,
|
||||
ah->rxpos, ah->rxlen, wsi->hdr_parsing_completed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we did have an ah attached */
|
||||
time(&now);
|
||||
if (ah->assigned && now - ah->assigned > 3) {
|
||||
|
@ -344,11 +297,10 @@ int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
* we're detaching the ah, but it was held an
|
||||
* unreasonably long time
|
||||
*/
|
||||
lwsl_debug("%s: wsi %p: ah held %ds, "
|
||||
"ah.rxpos %d, ah.rxlen %d, role/state 0x%x 0x%x,"
|
||||
lwsl_debug("%s: wsi %p: ah held %ds, role/state 0x%x 0x%x,"
|
||||
"\n", __func__, wsi,
|
||||
(int)(now - ah->assigned),
|
||||
ah->rxpos, ah->rxlen, lwsi_role(wsi), lwsi_state(wsi));
|
||||
lwsi_role(wsi), lwsi_state(wsi));
|
||||
}
|
||||
|
||||
ah->assigned = 0;
|
||||
|
@ -403,9 +355,6 @@ int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
wsi->ah = ah;
|
||||
ah->wsi = wsi; /* new owner */
|
||||
|
||||
/* and reset the rx state */
|
||||
ah->rxpos = 0;
|
||||
ah->rxlen = 0;
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
if (wsi->peer)
|
||||
|
|
|
@ -1324,19 +1324,40 @@ deal_body:
|
|||
(long long)wsi->http.rx_content_length,
|
||||
wsi->upgraded_to_http2, wsi->http2_substream);
|
||||
if (wsi->http.rx_content_length > 0) {
|
||||
struct lws_tokens ebuf;
|
||||
int m;
|
||||
|
||||
lwsi_set_state(wsi, LRS_BODY);
|
||||
lwsl_info("%s: %p: LRS_BODY state set (0x%x)\n",
|
||||
__func__, wsi, wsi->wsistate);
|
||||
wsi->http.rx_content_remain =
|
||||
wsi->http.rx_content_length;
|
||||
|
||||
/*
|
||||
* At this point we have transitioned from deferred
|
||||
* action to expecting BODY on the stream wsi, if it's
|
||||
* in a bundle like h2. So if the stream wsi has its
|
||||
* own buflist, we need to deal with that first.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
ebuf.len = lws_buflist_next_segment_len(
|
||||
&wsi->buflist, (uint8_t **)&ebuf.token);
|
||||
if (!ebuf.len)
|
||||
break;
|
||||
lwsl_notice("%s: consuming %d\n", __func__, (int)ebuf.len);
|
||||
m = lws_read_h1(wsi, (uint8_t *)ebuf.token, ebuf.len);
|
||||
if (m < 0)
|
||||
return -1;
|
||||
|
||||
lws_buflist_aware_consume(wsi, &ebuf, m, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
bail_nuke_ah:
|
||||
/* we're closing, losing some rx is OK */
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
|
||||
return 1;
|
||||
|
@ -1400,7 +1421,6 @@ raw_transition:
|
|||
wsi->user_space, NULL, 0))
|
||||
goto bail_nuke_ah;
|
||||
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_role_transition(wsi, 0, LRS_ESTABLISHED,
|
||||
&role_ops_raw_skt);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
|
@ -1598,8 +1618,6 @@ upgrade_ws:
|
|||
|
||||
bail_nuke_ah:
|
||||
/* drop the header info */
|
||||
/* we're closing, losing some rx is OK */
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
|
||||
return 1;
|
||||
|
@ -1688,8 +1706,7 @@ lws_http_transaction_completed(struct lws *wsi)
|
|||
int n = NO_PENDING_TIMEOUT;
|
||||
|
||||
lwsl_info("%s: wsi %p\n", __func__, wsi);
|
||||
if (wsi->ah)
|
||||
lwsl_info("ah attached, pos %d, len %d\n", wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
|
||||
lws_access_log(wsi);
|
||||
|
||||
if (!wsi->hdr_parsing_completed) {
|
||||
|
@ -1744,8 +1761,9 @@ lws_http_transaction_completed(struct lws *wsi)
|
|||
* reset the existing header table and keep it.
|
||||
*/
|
||||
if (wsi->ah) {
|
||||
if (wsi->ah->rxpos == wsi->ah->rxlen && !wsi->preamble_rx) {
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
// lws_buflist_describe(&wsi->buflist, wsi);
|
||||
if (!lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
|
||||
lwsl_debug("%s: nothing in buflist so detaching ah\n", __func__);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
#ifdef LWS_WITH_TLS
|
||||
/*
|
||||
|
@ -1764,6 +1782,7 @@ lws_http_transaction_completed(struct lws *wsi)
|
|||
}
|
||||
#endif
|
||||
} else {
|
||||
lwsl_debug("%s: resetting and keeping ah as more pipeline stuff available\n", __func__);
|
||||
lws_header_table_reset(wsi, 0);
|
||||
/*
|
||||
* If we kept the ah, we should restrict the amount
|
||||
|
@ -1780,7 +1799,7 @@ lws_http_transaction_completed(struct lws *wsi)
|
|||
|
||||
lwsi_set_state(wsi, LRS_ESTABLISHED);
|
||||
} else
|
||||
if (wsi->preamble_rx)
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist, NULL))
|
||||
if (lws_header_table_attach(wsi, 0))
|
||||
lwsl_debug("acquired ah\n");
|
||||
|
||||
|
@ -2025,7 +2044,6 @@ static struct lws*
|
|||
adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
|
||||
{
|
||||
struct lws_context_per_thread *pt;
|
||||
struct allocated_headers *ah;
|
||||
struct lws_pollfd *pfd;
|
||||
|
||||
if (!wsi)
|
||||
|
@ -2034,10 +2052,8 @@ adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
|
|||
if (!readbuf || len == 0)
|
||||
return wsi;
|
||||
|
||||
if (len > sizeof(ah->rx)) {
|
||||
lwsl_err("%s: rx in too big\n", __func__);
|
||||
if (lws_buflist_append_segment(&wsi->buflist, (const uint8_t *)readbuf, len) < 0)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* we can't process the initial read data until we can attach an ah.
|
||||
|
@ -2050,10 +2066,6 @@ adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
|
|||
* the ah.
|
||||
*/
|
||||
if (wsi->ah || !lws_header_table_attach(wsi, 0)) {
|
||||
ah = wsi->ah;
|
||||
memcpy(ah->rx, readbuf, len);
|
||||
ah->rxpos = 0;
|
||||
ah->rxlen = (int16_t)len;
|
||||
|
||||
lwsl_notice("%s: calling service on readbuf ah\n", __func__);
|
||||
pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
@ -2073,20 +2085,6 @@ adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
|
|||
return wsi;
|
||||
}
|
||||
lwsl_err("%s: deferring handling ah\n", __func__);
|
||||
/*
|
||||
* hum if no ah came, we are on the wait list and must defer
|
||||
* dealing with this until the ah arrives.
|
||||
*
|
||||
* later successful lws_header_table_attach() will apply the
|
||||
* below to the rx buffer (via lws_header_table_reset()).
|
||||
*/
|
||||
wsi->preamble_rx = lws_malloc(len, "preamble_rx");
|
||||
if (!wsi->preamble_rx) {
|
||||
lwsl_err("OOM\n");
|
||||
goto bail;
|
||||
}
|
||||
memcpy(wsi->preamble_rx, readbuf, len);
|
||||
wsi->preamble_rx_len = (int)len;
|
||||
|
||||
return wsi;
|
||||
|
||||
|
|
|
@ -127,12 +127,12 @@ rops_handle_POLLIN_listen(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
NULL, NULL);
|
||||
if (!cwsi)
|
||||
/* already closed cleanly as necessary */
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
|
||||
if (lws_server_socket_service_ssl(cwsi, accept_fd)) {
|
||||
lws_close_free_wsi(cwsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
"listen svc fail");
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
|
||||
lwsl_info("%s: new wsi %p: wsistate 0x%x, role_ops %s\n",
|
||||
|
@ -163,7 +163,6 @@ struct lws_role_ops role_ops_listen = {
|
|||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ NULL,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -37,7 +37,7 @@ rops_handle_POLLIN_pipe(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
n = read(wsi->desc.sockfd, s, sizeof(s));
|
||||
(void)n;
|
||||
if (n < 0)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
#endif
|
||||
/*
|
||||
* the poll() wait, or the event loop for libuv etc is a
|
||||
|
@ -48,7 +48,7 @@ rops_handle_POLLIN_pipe(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
if (lws_broadcast(wsi->context, LWS_CALLBACK_EVENT_WAIT_CANCELLED,
|
||||
NULL, 0)) {
|
||||
lwsl_info("closed in event cancel\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
@ -68,7 +68,6 @@ struct lws_role_ops role_ops_pipe = {
|
|||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ NULL,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -25,7 +25,8 @@ static int
|
|||
rops_handle_POLLIN_raw_skt(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
struct lws_pollfd *pollfd)
|
||||
{
|
||||
int len, n;
|
||||
struct lws_tokens ebuf;
|
||||
int n, buffered;
|
||||
|
||||
/* pending truncated sends have uber priority */
|
||||
|
||||
|
@ -50,21 +51,33 @@ rops_handle_POLLIN_raw_skt(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
!(wsi->favoured_pollin &&
|
||||
(pollfd->revents & pollfd->events & LWS_POLLOUT))) {
|
||||
|
||||
len = lws_read_or_use_preamble(pt, wsi);
|
||||
if (len < 0)
|
||||
goto fail;
|
||||
|
||||
if (!len)
|
||||
buffered = lws_buflist_aware_read(pt, wsi, &ebuf);
|
||||
switch (ebuf.len) {
|
||||
case 0:
|
||||
lwsl_info("%s: read 0 len a\n",
|
||||
__func__);
|
||||
wsi->seen_zero_length_recv = 1;
|
||||
lws_change_pollfd(wsi, LWS_POLLIN, 0);
|
||||
goto try_pollout;
|
||||
//goto fail;
|
||||
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
goto fail;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
goto try_pollout;
|
||||
}
|
||||
|
||||
n = user_callback_handle_rxflow(wsi->protocol->callback,
|
||||
wsi, LWS_CALLBACK_RAW_RX,
|
||||
wsi->user_space, pt->serv_buf,
|
||||
len);
|
||||
wsi->user_space, ebuf.token,
|
||||
ebuf.len);
|
||||
if (n < 0) {
|
||||
lwsl_info("LWS_CALLBACK_RAW_RX_fail\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (lws_buflist_aware_consume(wsi, &ebuf, ebuf.len, buffered))
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
} else
|
||||
if (wsi->favoured_pollin &&
|
||||
(pollfd->revents & pollfd->events & LWS_POLLOUT))
|
||||
|
@ -114,7 +127,7 @@ try_pollout:
|
|||
fail:
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "raw svc fail");
|
||||
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
|
||||
|
@ -128,10 +141,10 @@ rops_handle_POLLIN_raw_file(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
n = lws_callback_as_writeable(wsi);
|
||||
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
|
||||
lwsl_info("failed at set pollfd\n");
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
if (n)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
if (pollfd->revents & LWS_POLLIN) {
|
||||
|
@ -139,12 +152,12 @@ rops_handle_POLLIN_raw_file(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
wsi, LWS_CALLBACK_RAW_RX_FILE,
|
||||
wsi->user_space, NULL, 0)) {
|
||||
lwsl_debug("raw rx callback closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
}
|
||||
|
||||
if (pollfd->revents & LWS_POLLHUP)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -164,7 +177,6 @@ struct lws_role_ops role_ops_raw_skt = {
|
|||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ NULL,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
@ -191,7 +203,6 @@ struct lws_role_ops role_ops_raw_file = {
|
|||
/* callback_on_writable */ NULL,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ NULL,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ NULL,
|
||||
|
|
|
@ -31,13 +31,13 @@ int lws_client_rx_sm(struct lws *wsi, unsigned char c)
|
|||
int callback_action = LWS_CALLBACK_CLIENT_RECEIVE;
|
||||
int handled, n, m, rx_draining_ext = 0;
|
||||
unsigned short close_code;
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
unsigned char *pp;
|
||||
|
||||
if (wsi->ws->rx_draining_ext) {
|
||||
assert(!c);
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
lws_remove_wsi_from_draining_ext_list(wsi);
|
||||
rx_draining_ext = 1;
|
||||
lwsl_debug("%s: doing draining flow\n", __func__);
|
||||
|
@ -476,12 +476,12 @@ ping_drop:
|
|||
* state machine.
|
||||
*/
|
||||
|
||||
eff_buf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
eff_buf.token_len = wsi->ws->rx_ubuf_head;
|
||||
ebuf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
ebuf.len = wsi->ws->rx_ubuf_head;
|
||||
|
||||
if (lws_ext_cb_active(wsi,
|
||||
LWS_EXT_CB_EXTENDED_PAYLOAD_RX,
|
||||
&eff_buf, 0) <= 0) {
|
||||
&ebuf, 0) <= 0) {
|
||||
/* not handled or failed */
|
||||
lwsl_ext("Unhandled ext opc 0x%x\n",
|
||||
wsi->ws->opcode);
|
||||
|
@ -501,17 +501,17 @@ ping_drop:
|
|||
if (handled)
|
||||
goto already_done;
|
||||
|
||||
eff_buf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
eff_buf.token_len = wsi->ws->rx_ubuf_head;
|
||||
ebuf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
ebuf.len = wsi->ws->rx_ubuf_head;
|
||||
|
||||
if (wsi->ws->opcode == LWSWSOPC_PONG && !eff_buf.token_len)
|
||||
if (wsi->ws->opcode == LWSWSOPC_PONG && !ebuf.len)
|
||||
goto already_done;
|
||||
|
||||
drain_extension:
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
lwsl_ext("%s: passing %d to ext\n", __func__, eff_buf.token_len);
|
||||
lwsl_ext("%s: passing %d to ext\n", __func__, ebuf.len);
|
||||
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &eff_buf, 0);
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &ebuf, 0);
|
||||
lwsl_ext("Ext RX returned %d\n", n);
|
||||
if (n < 0) {
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
|
@ -520,17 +520,17 @@ drain_extension:
|
|||
#else
|
||||
n = 0;
|
||||
#endif
|
||||
lwsl_ext("post inflate eff_buf len %d\n", eff_buf.token_len);
|
||||
lwsl_ext("post inflate ebuf len %d\n", ebuf.len);
|
||||
|
||||
if (rx_draining_ext && !eff_buf.token_len) {
|
||||
if (rx_draining_ext && !ebuf.len) {
|
||||
lwsl_debug(" --- ending drain on 0 read result\n");
|
||||
goto already_done;
|
||||
}
|
||||
|
||||
if (wsi->ws->check_utf8 && !wsi->ws->defeat_check_utf8) {
|
||||
if (lws_check_utf8(&wsi->ws->utf8,
|
||||
(unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len))
|
||||
(unsigned char *)ebuf.token,
|
||||
ebuf.len))
|
||||
goto utf8_fail;
|
||||
|
||||
/* we are ending partway through utf-8 character? */
|
||||
|
@ -543,14 +543,14 @@ utf8_fail:
|
|||
}
|
||||
}
|
||||
|
||||
if (eff_buf.token_len < 0 &&
|
||||
if (ebuf.len < 0 &&
|
||||
callback_action != LWS_CALLBACK_CLIENT_RECEIVE_PONG)
|
||||
goto already_done;
|
||||
|
||||
if (!eff_buf.token)
|
||||
if (!ebuf.token)
|
||||
goto already_done;
|
||||
|
||||
eff_buf.token[eff_buf.token_len] = '\0';
|
||||
ebuf.token[ebuf.len] = '\0';
|
||||
|
||||
if (!wsi->protocol->callback)
|
||||
goto already_done;
|
||||
|
@ -563,7 +563,7 @@ utf8_fail:
|
|||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
n &&
|
||||
#endif
|
||||
eff_buf.token_len)
|
||||
ebuf.len)
|
||||
/* extension had more... main loop will come back
|
||||
* we want callback to be done with this set, if so,
|
||||
* because lws_is_final() hides it was final until the
|
||||
|
@ -580,7 +580,7 @@ utf8_fail:
|
|||
|
||||
m = wsi->protocol->callback(wsi,
|
||||
(enum lws_callback_reasons)callback_action,
|
||||
wsi->user_space, eff_buf.token, eff_buf.token_len);
|
||||
wsi->user_space, ebuf.token, ebuf.len);
|
||||
|
||||
/* if user code wants to close, let caller know */
|
||||
if (m)
|
||||
|
|
|
@ -72,7 +72,7 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
{
|
||||
struct lws_ext_pm_deflate_priv *priv =
|
||||
(struct lws_ext_pm_deflate_priv *)user;
|
||||
struct lws_tokens *eff_buf = (struct lws_tokens *)in;
|
||||
struct lws_tokens *ebuf = (struct lws_tokens *)in;
|
||||
static unsigned char trail[] = { 0, 0, 0xff, 0xff };
|
||||
int n, ret = 0, was_fin = 0, extra;
|
||||
struct lws_ext_option_arg *oa;
|
||||
|
@ -175,13 +175,13 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
|
||||
case LWS_EXT_CB_PAYLOAD_RX:
|
||||
lwsl_ext(" %s: LWS_EXT_CB_PAYLOAD_RX: in %d, existing in %d\n",
|
||||
__func__, eff_buf->token_len, priv->rx.avail_in);
|
||||
__func__, ebuf->len, priv->rx.avail_in);
|
||||
if (!(wsi->ws->rsv_first_msg & 0x40))
|
||||
return 0;
|
||||
|
||||
#if 0
|
||||
for (n = 0; n < eff_buf->token_len; n++) {
|
||||
printf("%02X ", (unsigned char)eff_buf->token[n]);
|
||||
for (n = 0; n < ebuf->len; n++) {
|
||||
printf("%02X ", (unsigned char)ebuf->token[n]);
|
||||
if ((n & 15) == 15)
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -209,13 +209,13 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
* rx buffer by the caller, so this assumption is safe while
|
||||
* we block new rx while draining the existing rx
|
||||
*/
|
||||
if (!priv->rx.avail_in && eff_buf->token &&
|
||||
eff_buf->token_len) {
|
||||
priv->rx.next_in = (unsigned char *)eff_buf->token;
|
||||
priv->rx.avail_in = eff_buf->token_len;
|
||||
if (!priv->rx.avail_in && ebuf->token &&
|
||||
ebuf->len) {
|
||||
priv->rx.next_in = (unsigned char *)ebuf->token;
|
||||
priv->rx.avail_in = ebuf->len;
|
||||
}
|
||||
priv->rx.next_out = priv->buf_rx_inflated + LWS_PRE;
|
||||
eff_buf->token = (char *)priv->rx.next_out;
|
||||
ebuf->token = (char *)priv->rx.next_out;
|
||||
priv->rx.avail_out = 1 << priv->args[PMD_RX_BUF_PWR2];
|
||||
|
||||
if (priv->rx_held_valid) {
|
||||
|
@ -307,13 +307,13 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
priv->rx_held_valid = 1;
|
||||
}
|
||||
|
||||
eff_buf->token_len = lws_ptr_diff(priv->rx.next_out,
|
||||
eff_buf->token);
|
||||
priv->count_rx_between_fin += eff_buf->token_len;
|
||||
ebuf->len = lws_ptr_diff(priv->rx.next_out,
|
||||
ebuf->token);
|
||||
priv->count_rx_between_fin += ebuf->len;
|
||||
|
||||
lwsl_ext(" %s: RX leaving with new effbuff len %d, "
|
||||
"ret %d, rx.avail_in=%d, TOTAL RX since FIN %lu\n",
|
||||
__func__, eff_buf->token_len, priv->rx_held_valid,
|
||||
__func__, ebuf->len, priv->rx_held_valid,
|
||||
priv->rx.avail_in,
|
||||
(unsigned long)priv->count_rx_between_fin);
|
||||
|
||||
|
@ -325,8 +325,8 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
}
|
||||
}
|
||||
#if 0
|
||||
for (n = 0; n < eff_buf->token_len; n++)
|
||||
putchar(eff_buf->token[n]);
|
||||
for (n = 0; n < ebuf->len; n++)
|
||||
putchar(ebuf->token[n]);
|
||||
puts("\n");
|
||||
#endif
|
||||
|
||||
|
@ -356,16 +356,16 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (eff_buf->token) {
|
||||
lwsl_ext("%s: TX: eff_buf length %d\n", __func__,
|
||||
eff_buf->token_len);
|
||||
priv->tx.next_in = (unsigned char *)eff_buf->token;
|
||||
priv->tx.avail_in = eff_buf->token_len;
|
||||
if (ebuf->token) {
|
||||
lwsl_ext("%s: TX: ebuf length %d\n", __func__,
|
||||
ebuf->len);
|
||||
priv->tx.next_in = (unsigned char *)ebuf->token;
|
||||
priv->tx.avail_in = ebuf->len;
|
||||
}
|
||||
|
||||
#if 0
|
||||
for (n = 0; n < eff_buf->token_len; n++) {
|
||||
printf("%02X ", (unsigned char)eff_buf->token[n]);
|
||||
for (n = 0; n < ebuf->len; n++) {
|
||||
printf("%02X ", (unsigned char)ebuf->token[n]);
|
||||
if ((n & 15) == 15)
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
#endif
|
||||
|
||||
priv->tx.next_out = priv->buf_tx_deflated + LWS_PRE + 5;
|
||||
eff_buf->token = (char *)priv->tx.next_out;
|
||||
ebuf->token = (char *)priv->tx.next_out;
|
||||
priv->tx.avail_out = 1 << priv->args[PMD_TX_BUF_PWR2];
|
||||
|
||||
n = deflate(&priv->tx, Z_SYNC_FLUSH);
|
||||
|
@ -395,18 +395,18 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
* place the pending trailer 00 00 FF FF, just
|
||||
* the 1 byte of live data
|
||||
*/
|
||||
*(--eff_buf->token) = priv->tx_held[0];
|
||||
*(--ebuf->token) = priv->tx_held[0];
|
||||
else {
|
||||
/* he generated data, prepend whole pending */
|
||||
eff_buf->token -= 5;
|
||||
ebuf->token -= 5;
|
||||
for (n = 0; n < 5; n++)
|
||||
eff_buf->token[n] = priv->tx_held[n];
|
||||
ebuf->token[n] = priv->tx_held[n];
|
||||
|
||||
}
|
||||
}
|
||||
priv->compressed_out = 1;
|
||||
eff_buf->token_len = lws_ptr_diff(priv->tx.next_out,
|
||||
eff_buf->token);
|
||||
ebuf->len = lws_ptr_diff(priv->tx.next_out,
|
||||
ebuf->token);
|
||||
|
||||
/*
|
||||
* we must announce in our returncode now if there is more
|
||||
|
@ -431,15 +431,15 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
|
||||
extra = !!(len & LWS_WRITE_NO_FIN) || !priv->tx.avail_out;
|
||||
|
||||
if (eff_buf->token_len >= 4 + extra) {
|
||||
if (ebuf->len >= 4 + extra) {
|
||||
lwsl_ext("tx held %d\n", 4 + extra);
|
||||
priv->tx_held_valid = extra;
|
||||
for (n = 3 + extra; n >= 0; n--)
|
||||
priv->tx_held[n] = *(--priv->tx.next_out);
|
||||
eff_buf->token_len -= 4 + extra;
|
||||
ebuf->len -= 4 + extra;
|
||||
}
|
||||
lwsl_ext(" TX rewritten with new effbuff len %d, ret %d\n",
|
||||
eff_buf->token_len, !priv->tx.avail_out);
|
||||
ebuf->len, !priv->tx.avail_out);
|
||||
|
||||
return !priv->tx.avail_out; /* 1 == have more tx pending */
|
||||
|
||||
|
@ -448,27 +448,27 @@ lws_extension_callback_pm_deflate(struct lws_context *context,
|
|||
break;
|
||||
priv->compressed_out = 0;
|
||||
|
||||
if ((*(eff_buf->token) & 0x80) &&
|
||||
if ((*(ebuf->token) & 0x80) &&
|
||||
priv->args[PMD_CLIENT_NO_CONTEXT_TAKEOVER]) {
|
||||
lwsl_debug("PMD_CLIENT_NO_CONTEXT_TAKEOVER\n");
|
||||
(void)deflateEnd(&priv->tx);
|
||||
priv->tx_init = 0;
|
||||
}
|
||||
|
||||
n = *(eff_buf->token) & 15;
|
||||
n = *(ebuf->token) & 15;
|
||||
/* set RSV1, but not on CONTINUATION */
|
||||
if (n == LWSWSOPC_TEXT_FRAME || n == LWSWSOPC_BINARY_FRAME)
|
||||
*eff_buf->token |= 0x40;
|
||||
*ebuf->token |= 0x40;
|
||||
#if 0
|
||||
for (n = 0; n < eff_buf->token_len; n++) {
|
||||
printf("%02X ", (unsigned char)eff_buf->token[n]);
|
||||
for (n = 0; n < ebuf->len; n++) {
|
||||
printf("%02X ", (unsigned char)ebuf->token[n]);
|
||||
if ((n & 15) == 15)
|
||||
puts("\n");
|
||||
}
|
||||
puts("\n");
|
||||
#endif
|
||||
lwsl_ext("%s: tx opcode 0x%02X\n", __func__,
|
||||
(unsigned char)*eff_buf->token);
|
||||
(unsigned char)*ebuf->token);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -214,11 +214,11 @@ int lws_ext_cb_all_exts(struct lws_context *context, struct lws *wsi,
|
|||
int
|
||||
lws_issue_raw_ext_access(struct lws *wsi, unsigned char *buf, size_t len)
|
||||
{
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
int ret, m, n = 0;
|
||||
|
||||
eff_buf.token = (char *)buf;
|
||||
eff_buf.token_len = (int)len;
|
||||
ebuf.token = (char *)buf;
|
||||
ebuf.len = (int)len;
|
||||
|
||||
/*
|
||||
* while we have original buf to spill ourselves, or extensions report
|
||||
|
@ -234,13 +234,13 @@ lws_issue_raw_ext_access(struct lws *wsi, unsigned char *buf, size_t len)
|
|||
|
||||
/* show every extension the new incoming data */
|
||||
m = lws_ext_cb_active(wsi,
|
||||
LWS_EXT_CB_PACKET_TX_PRESEND, &eff_buf, 0);
|
||||
LWS_EXT_CB_PACKET_TX_PRESEND, &ebuf, 0);
|
||||
if (m < 0)
|
||||
return -1;
|
||||
if (m) /* handled */
|
||||
ret = 1;
|
||||
|
||||
if ((char *)buf != eff_buf.token)
|
||||
if ((char *)buf != ebuf.token)
|
||||
/*
|
||||
* extension recreated it:
|
||||
* need to buffer this if not all sent
|
||||
|
@ -249,9 +249,9 @@ lws_issue_raw_ext_access(struct lws *wsi, unsigned char *buf, size_t len)
|
|||
|
||||
/* assuming they left us something to send, send it */
|
||||
|
||||
if (eff_buf.token_len) {
|
||||
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
if (ebuf.len) {
|
||||
n = lws_issue_raw(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
if (n < 0) {
|
||||
lwsl_info("closing from ext access\n");
|
||||
return -1;
|
||||
|
@ -271,8 +271,8 @@ lws_issue_raw_ext_access(struct lws *wsi, unsigned char *buf, size_t len)
|
|||
|
||||
/* we used up what we had */
|
||||
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
|
||||
/*
|
||||
* Did that leave the pipe choked?
|
||||
|
|
|
@ -33,21 +33,21 @@ lws_ws_rx_sm(struct lws *wsi, unsigned char c)
|
|||
{
|
||||
int callback_action = LWS_CALLBACK_RECEIVE;
|
||||
int ret = 0, rx_draining_ext = 0;
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
int n;
|
||||
#endif
|
||||
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
if (wsi->socket_is_permanently_unusable)
|
||||
return -1;
|
||||
|
||||
switch (wsi->lws_rx_parse_state) {
|
||||
case LWS_RXPS_NEW:
|
||||
if (wsi->ws->rx_draining_ext) {
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
lws_remove_wsi_from_draining_ext_list(wsi);
|
||||
rx_draining_ext = 1;
|
||||
lwsl_debug("%s: doing draining flow\n", __func__);
|
||||
|
@ -453,12 +453,12 @@ ping_drop:
|
|||
* state machine.
|
||||
*/
|
||||
|
||||
eff_buf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
eff_buf.token_len = wsi->ws->rx_ubuf_head;
|
||||
ebuf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
ebuf.len = wsi->ws->rx_ubuf_head;
|
||||
|
||||
if (lws_ext_cb_active(wsi,
|
||||
LWS_EXT_CB_EXTENDED_PAYLOAD_RX,
|
||||
&eff_buf, 0) <= 0)
|
||||
&ebuf, 0) <= 0)
|
||||
/* not handle or fail */
|
||||
lwsl_ext("ext opc opcode 0x%x unknown\n",
|
||||
wsi->ws->opcode);
|
||||
|
@ -473,23 +473,23 @@ ping_drop:
|
|||
* so it can be sent straight out again using lws_write
|
||||
*/
|
||||
|
||||
eff_buf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
eff_buf.token_len = wsi->ws->rx_ubuf_head;
|
||||
ebuf.token = &wsi->ws->rx_ubuf[LWS_PRE];
|
||||
ebuf.len = wsi->ws->rx_ubuf_head;
|
||||
|
||||
if (wsi->ws->opcode == LWSWSOPC_PONG && !eff_buf.token_len)
|
||||
if (wsi->ws->opcode == LWSWSOPC_PONG && !ebuf.len)
|
||||
goto already_done;
|
||||
|
||||
drain_extension:
|
||||
lwsl_ext("%s: passing %d to ext\n", __func__, eff_buf.token_len);
|
||||
lwsl_ext("%s: passing %d to ext\n", __func__, ebuf.len);
|
||||
|
||||
if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
|
||||
lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK)
|
||||
goto already_done;
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &eff_buf, 0);
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &ebuf, 0);
|
||||
#endif
|
||||
/*
|
||||
* eff_buf may be pointing somewhere completely different now,
|
||||
* ebuf may be pointing somewhere completely different now,
|
||||
* it's the output
|
||||
*/
|
||||
wsi->ws->first_fragment = 0;
|
||||
|
@ -502,22 +502,22 @@ drain_extension:
|
|||
return -1;
|
||||
}
|
||||
#endif
|
||||
if (rx_draining_ext && eff_buf.token_len == 0)
|
||||
if (rx_draining_ext && ebuf.len == 0)
|
||||
goto already_done;
|
||||
|
||||
if (
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
n &&
|
||||
#endif
|
||||
eff_buf.token_len)
|
||||
ebuf.len)
|
||||
/* extension had more... main loop will come back */
|
||||
lws_add_wsi_to_draining_ext_list(wsi);
|
||||
else
|
||||
lws_remove_wsi_from_draining_ext_list(wsi);
|
||||
|
||||
if (eff_buf.token_len > 0 ||
|
||||
if (ebuf.len > 0 ||
|
||||
callback_action == LWS_CALLBACK_RECEIVE_PONG) {
|
||||
eff_buf.token[eff_buf.token_len] = '\0';
|
||||
ebuf.token[ebuf.len] = '\0';
|
||||
|
||||
if (wsi->protocol->callback) {
|
||||
if (callback_action == LWS_CALLBACK_RECEIVE_PONG)
|
||||
|
@ -528,8 +528,8 @@ drain_extension:
|
|||
wsi, (enum lws_callback_reasons)
|
||||
callback_action,
|
||||
wsi->user_space,
|
||||
eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
ebuf.token,
|
||||
ebuf.len);
|
||||
}
|
||||
else
|
||||
lwsl_err("No callback on payload spill!\n");
|
||||
|
@ -819,9 +819,9 @@ static int
|
|||
rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
struct lws_pollfd *pollfd)
|
||||
{
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
unsigned int pending = 0;
|
||||
char draining_flow = 0;
|
||||
char draining_flow = 0, buffered = 0;
|
||||
int n = 0, m;
|
||||
#if defined(LWS_WITH_HTTP2)
|
||||
struct lws *wsi1;
|
||||
|
@ -838,20 +838,23 @@ rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
*/
|
||||
if (lwsi_state(wsi) == LRS_PRE_WS_SERVING_ACCEPT) {
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
|
||||
if (lwsi_state(wsi) == LRS_WAITING_CONNECT) {
|
||||
#if !defined(LWS_NO_CLIENT)
|
||||
if ((pollfd->revents & LWS_POLLOUT) &&
|
||||
lws_handle_POLLOUT_event(wsi, pollfd)) {
|
||||
lwsl_debug("POLLOUT event closed it\n");
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
n = lws_client_socket_service(wsi, pollfd, NULL);
|
||||
if (n)
|
||||
return LWS_HPI_RET_DIE;
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
#endif
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
@ -865,7 +868,7 @@ rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
|
||||
/* the write failed... it's had it */
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
|
||||
|
@ -938,39 +941,17 @@ rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
|
|||
*/
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
||||
/* 3: RX Flowcontrol buffer / h2 rx scratch needs to be drained
|
||||
/* 3: buflist needs to be drained
|
||||
*/
|
||||
|
||||
eff_buf.token_len = (int)lws_buflist_next_segment_len(&wsi->buflist_rxflow,
|
||||
(uint8_t **)&eff_buf.token);
|
||||
if (eff_buf.token_len) {
|
||||
lwsl_info("draining rxflow (len %d)\n", eff_buf.token_len);
|
||||
draining_flow = 1;
|
||||
ebuf.len = (int)lws_buflist_next_segment_len(&wsi->buflist,
|
||||
(uint8_t **)&ebuf.token);
|
||||
if (ebuf.len) {
|
||||
lwsl_info("draining buflist (len %d)\n", ebuf.len);
|
||||
buffered = 1;
|
||||
goto drain;
|
||||
}
|
||||
|
||||
#if defined(LWS_WITH_HTTP2)
|
||||
if (wsi->upgraded_to_http2) {
|
||||
struct lws_h2_netconn *h2n = wsi->h2.h2n;
|
||||
|
||||
if (h2n->rx_scratch_len) {
|
||||
lwsl_info("%s: %p: h2 rx pos = %d len = %d\n",
|
||||
__func__, wsi, h2n->rx_scratch_pos,
|
||||
h2n->rx_scratch_len);
|
||||
eff_buf.token = (char *)h2n->rx_scratch +
|
||||
h2n->rx_scratch_pos;
|
||||
eff_buf.token_len = h2n->rx_scratch_len;
|
||||
|
||||
h2n->rx_scratch_len = 0;
|
||||
goto drain;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* 4: any incoming (or ah-stashed incoming rx) data ready?
|
||||
* notice if rx flow going off raced poll(), rx flow wins
|
||||
*/
|
||||
|
||||
if (!(pollfd->revents & pollfd->events & LWS_POLLIN) && !wsi->ah)
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
|
||||
|
@ -981,91 +962,53 @@ read:
|
|||
return LWS_HPI_RET_HANDLED;
|
||||
}
|
||||
|
||||
if (wsi->ah && wsi->ah->rxlen == wsi->ah->rxpos) {
|
||||
/* we drained the excess data in the ah */
|
||||
lwsl_info("%s: %p: dropping ah on ws post-upgrade\n", __func__, wsi);
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 0);
|
||||
} else
|
||||
if (wsi->ah)
|
||||
lwsl_info("%s: %p: unable to drop yet %d vs %d\n",
|
||||
__func__, wsi, wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
if (!(lwsi_role_client(wsi) &&
|
||||
(lwsi_state(wsi) != LRS_ESTABLISHED &&
|
||||
lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
|
||||
/*
|
||||
* extension may not consume everything
|
||||
* (eg, pmd may be constrained
|
||||
* as to what it can output...) has to go in
|
||||
* per-wsi rx buf area.
|
||||
* Otherwise in large temp serv_buf area.
|
||||
*/
|
||||
|
||||
if (wsi->ah && wsi->ah->rxlen - wsi->ah->rxpos) {
|
||||
lwsl_info("%s: %p: inherited ah rx %d\n", __func__,
|
||||
wsi, wsi->ah->rxlen - wsi->ah->rxpos);
|
||||
eff_buf.token_len = wsi->ah->rxlen - wsi->ah->rxpos;
|
||||
eff_buf.token = (char *)wsi->ah->rx + wsi->ah->rxpos;
|
||||
} else {
|
||||
if (!(lwsi_role_client(wsi) &&
|
||||
(lwsi_state(wsi) != LRS_ESTABLISHED &&
|
||||
lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
|
||||
/*
|
||||
* extension may not consume everything
|
||||
* (eg, pmd may be constrained
|
||||
* as to what it can output...) has to go in
|
||||
* per-wsi rx buf area.
|
||||
* Otherwise in large temp serv_buf area.
|
||||
*/
|
||||
buffered = 0;
|
||||
ebuf.token = (char *)pt->serv_buf;
|
||||
if (lws_is_ws_with_ext(wsi))
|
||||
ebuf.len = wsi->ws->rx_ubuf_alloc;
|
||||
else
|
||||
ebuf.len = wsi->context->pt_serv_buf_size;
|
||||
|
||||
#if defined(LWS_WITH_HTTP2)
|
||||
if (wsi->upgraded_to_http2) {
|
||||
if (!wsi->h2.h2n->rx_scratch) {
|
||||
wsi->h2.h2n->rx_scratch =
|
||||
lws_malloc(
|
||||
wsi->vhost->h2_rx_scratch_size,
|
||||
"h2 rx scratch");
|
||||
if (!wsi->h2.h2n->rx_scratch)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
}
|
||||
eff_buf.token = wsi->h2.h2n->rx_scratch;
|
||||
eff_buf.token_len = wsi->vhost->h2_rx_scratch_size;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
eff_buf.token = (char *)pt->serv_buf;
|
||||
if (lws_is_ws_with_ext(wsi)) {
|
||||
eff_buf.token_len =
|
||||
wsi->ws->rx_ubuf_alloc;
|
||||
} else {
|
||||
eff_buf.token_len =
|
||||
wsi->context->pt_serv_buf_size;
|
||||
}
|
||||
if ((unsigned int)ebuf.len > wsi->context->pt_serv_buf_size)
|
||||
ebuf.len = wsi->context->pt_serv_buf_size;
|
||||
|
||||
if ((unsigned int)eff_buf.token_len >
|
||||
wsi->context->pt_serv_buf_size)
|
||||
eff_buf.token_len =
|
||||
wsi->context->pt_serv_buf_size;
|
||||
}
|
||||
if ((int)pending > ebuf.len)
|
||||
pending = ebuf.len;
|
||||
|
||||
if ((int)pending > eff_buf.token_len)
|
||||
pending = eff_buf.token_len;
|
||||
|
||||
eff_buf.token_len = lws_ssl_capable_read(wsi,
|
||||
(unsigned char *)eff_buf.token,
|
||||
pending ? (int)pending :
|
||||
eff_buf.token_len);
|
||||
switch (eff_buf.token_len) {
|
||||
case 0:
|
||||
lwsl_info("%s: zero length read\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
lwsl_info("SSL Capable more service\n");
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
}
|
||||
// lwsl_notice("Actual RX %d\n", eff_buf.token_len);
|
||||
|
||||
/*
|
||||
* coverity thinks ssl_capable_read() may read over
|
||||
* 2GB. Dissuade it...
|
||||
*/
|
||||
eff_buf.token_len &= 0x7fffffff;
|
||||
ebuf.len = lws_ssl_capable_read(wsi, (uint8_t *)ebuf.token,
|
||||
pending ? (int)pending :
|
||||
ebuf.len);
|
||||
switch (ebuf.len) {
|
||||
case 0:
|
||||
lwsl_info("%s: zero length read\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
lwsl_info("SSL Capable more service\n");
|
||||
return LWS_HPI_RET_HANDLED;
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
|
||||
__func__);
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
// lwsl_notice("Actual RX %d\n", ebuf.len);
|
||||
|
||||
/*
|
||||
* coverity thinks ssl_capable_read() may read over
|
||||
* 2GB. Dissuade it...
|
||||
*/
|
||||
ebuf.len &= 0x7fffffff;
|
||||
}
|
||||
|
||||
drain:
|
||||
|
@ -1085,40 +1028,34 @@ drain:
|
|||
do {
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
|
||||
&eff_buf, 0);
|
||||
&ebuf, 0);
|
||||
if (m < 0)
|
||||
return LWS_HPI_RET_CLOSE_HANDLED;
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
#endif
|
||||
|
||||
/* service incoming data */
|
||||
|
||||
if (eff_buf.token_len) {
|
||||
if (ebuf.len) {
|
||||
#if defined(LWS_ROLE_H2)
|
||||
if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY)
|
||||
n = lws_read_h2(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
n = lws_read_h2(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
else
|
||||
#endif
|
||||
n = lws_read_h1(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
n = lws_read_h1(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
|
||||
if (n < 0) {
|
||||
/* we closed wsi */
|
||||
n = 0;
|
||||
return LWS_HPI_RET_DIE;
|
||||
}
|
||||
if (draining_flow) {
|
||||
m = lws_buflist_use_segment(&wsi->buflist_rxflow, n);
|
||||
lwsl_debug("%s: draining rxflow: used %d, next %d\n", __func__, n, m);
|
||||
if (!m) {
|
||||
lwsl_notice("%s: removed wsi %p from rxflow list\n", __func__, wsi);
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
}
|
||||
return LWS_HPI_RET_WSI_ALREADY_DIED;
|
||||
}
|
||||
if (lws_buflist_aware_consume(wsi, &ebuf, n, buffered))
|
||||
return LWS_HPI_RET_PLEASE_CLOSE_ME;
|
||||
}
|
||||
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
} while (m);
|
||||
|
||||
if (wsi->ah
|
||||
|
@ -1127,7 +1064,6 @@ drain:
|
|||
#endif
|
||||
) {
|
||||
lwsl_info("%s: %p: detaching ah\n", __func__, wsi);
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 0);
|
||||
}
|
||||
|
||||
|
@ -1143,7 +1079,7 @@ drain:
|
|||
}
|
||||
|
||||
if (draining_flow && /* were draining, now nothing left */
|
||||
!lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL)) {
|
||||
!lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
|
||||
lwsl_info("%s: %p flow buf: drained\n", __func__, wsi);
|
||||
/* having drained the rxflow buffer, can rearm POLLIN */
|
||||
#ifdef LWS_NO_SERVER
|
||||
|
@ -1162,7 +1098,7 @@ int rops_handle_POLLOUT_ws(struct lws *wsi)
|
|||
{
|
||||
int write_type = LWS_WRITE_PONG;
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
int ret, m;
|
||||
#endif
|
||||
int n;
|
||||
|
@ -1275,7 +1211,7 @@ int rops_handle_POLLOUT_ws(struct lws *wsi)
|
|||
* had pending stuff to spill... they need to get the
|
||||
* first look-in otherwise sequence will be disordered
|
||||
*
|
||||
* NULL, zero-length eff_buf means just spill pending
|
||||
* NULL, zero-length ebuf means just spill pending
|
||||
*/
|
||||
|
||||
ret = 1;
|
||||
|
@ -1288,13 +1224,13 @@ int rops_handle_POLLOUT_ws(struct lws *wsi)
|
|||
/* default to nobody has more to spill */
|
||||
|
||||
ret = 0;
|
||||
eff_buf.token = NULL;
|
||||
eff_buf.token_len = 0;
|
||||
ebuf.token = NULL;
|
||||
ebuf.len = 0;
|
||||
|
||||
/* give every extension a chance to spill */
|
||||
|
||||
m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_PRESEND,
|
||||
&eff_buf, 0);
|
||||
&ebuf, 0);
|
||||
if (m < 0) {
|
||||
lwsl_err("ext reports fatal error\n");
|
||||
return LWS_HP_RET_BAIL_DIE;
|
||||
|
@ -1308,9 +1244,9 @@ int rops_handle_POLLOUT_ws(struct lws *wsi)
|
|||
|
||||
/* assuming they gave us something to send, send it */
|
||||
|
||||
if (eff_buf.token_len) {
|
||||
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
|
||||
eff_buf.token_len);
|
||||
if (ebuf.len) {
|
||||
n = lws_issue_raw(wsi, (unsigned char *)ebuf.token,
|
||||
ebuf.len);
|
||||
if (n < 0) {
|
||||
lwsl_info("closing from POLLOUT spill\n");
|
||||
return LWS_HP_RET_BAIL_DIE;
|
||||
|
@ -1318,9 +1254,9 @@ int rops_handle_POLLOUT_ws(struct lws *wsi)
|
|||
/*
|
||||
* Keep amount spilled small to minimize chance of this
|
||||
*/
|
||||
if (n != eff_buf.token_len) {
|
||||
if (n != ebuf.len) {
|
||||
lwsl_err("Unable to spill ext %d vs %d\n",
|
||||
eff_buf.token_len, n);
|
||||
ebuf.len, n);
|
||||
return LWS_HP_RET_BAIL_DIE;
|
||||
}
|
||||
} else
|
||||
|
@ -1513,7 +1449,7 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
int masked7 = lwsi_role_client(wsi);
|
||||
unsigned char is_masked_bit = 0;
|
||||
unsigned char *dropmask = NULL;
|
||||
struct lws_tokens eff_buf;
|
||||
struct lws_tokens ebuf;
|
||||
size_t orig_len = len;
|
||||
int pre = 0, n;
|
||||
|
||||
|
@ -1569,8 +1505,8 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
* a size that can be sent without partial sends or blocking, allows
|
||||
* interleaving of control frames and other connection service.
|
||||
*/
|
||||
eff_buf.token = (char *)buf;
|
||||
eff_buf.token_len = (int)len;
|
||||
ebuf.token = (char *)buf;
|
||||
ebuf.len = (int)len;
|
||||
|
||||
switch ((int)*wp) {
|
||||
case LWS_WRITE_PING:
|
||||
|
@ -1580,12 +1516,12 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
default:
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
lwsl_debug("LWS_EXT_CB_PAYLOAD_TX\n");
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_TX, &eff_buf, *wp);
|
||||
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_TX, &ebuf, *wp);
|
||||
if (n < 0)
|
||||
return -1;
|
||||
|
||||
if (n && eff_buf.token_len) {
|
||||
lwsl_debug("drain len %d\n", (int)eff_buf.token_len);
|
||||
if (n && ebuf.len) {
|
||||
lwsl_debug("drain len %d\n", (int)ebuf.len);
|
||||
/* extension requires further draining */
|
||||
wsi->ws->tx_draining_ext = 1;
|
||||
wsi->ws->tx_draining_ext_list =
|
||||
|
@ -1607,7 +1543,7 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
*wp |= LWS_WRITE_NO_FIN;
|
||||
}
|
||||
#endif
|
||||
if (eff_buf.token_len && wsi->ws->stashed_write_pending) {
|
||||
if (ebuf.len && wsi->ws->stashed_write_pending) {
|
||||
wsi->ws->stashed_write_pending = 0;
|
||||
*wp = ((*wp) & 0xc0) | (int)wsi->ws->stashed_write_type;
|
||||
}
|
||||
|
@ -1618,13 +1554,13 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
* compression extension, it has already updated its state according
|
||||
* to this being issued
|
||||
*/
|
||||
if ((char *)buf != eff_buf.token) {
|
||||
if ((char *)buf != ebuf.token) {
|
||||
/*
|
||||
* ext might eat it, but not have anything to issue yet.
|
||||
* In that case we have to follow his lead, but stash and
|
||||
* replace the write type that was lost here the first time.
|
||||
*/
|
||||
if (len && !eff_buf.token_len) {
|
||||
if (len && !ebuf.len) {
|
||||
if (!wsi->ws->stashed_write_pending)
|
||||
wsi->ws->stashed_write_type = (char)(*wp) & 0x3f;
|
||||
wsi->ws->stashed_write_pending = 1;
|
||||
|
@ -1637,8 +1573,8 @@ rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
|
|||
wsi->ws->clean_buffer = 0;
|
||||
}
|
||||
|
||||
buf = (unsigned char *)eff_buf.token;
|
||||
len = eff_buf.token_len;
|
||||
buf = (unsigned char *)ebuf.token;
|
||||
len = ebuf.len;
|
||||
|
||||
if (!buf) {
|
||||
lwsl_err("null buf (%d)\n", (int)len);
|
||||
|
@ -1856,7 +1792,6 @@ struct lws_role_ops role_ops_ws = {
|
|||
/* callback_on_writable */ rops_callback_on_writable_ws,
|
||||
/* tx_credit */ NULL,
|
||||
/* write_role_protocol */ rops_write_role_protocol_ws,
|
||||
/* rxflow_cache */ NULL,
|
||||
/* encapsulation_parent */ NULL,
|
||||
/* alpn_negotiated */ NULL,
|
||||
/* close_via_role_protocol */ rops_close_via_role_protocol_ws,
|
||||
|
|
|
@ -387,8 +387,6 @@ lws_process_ws_upgrade(struct lws *wsi)
|
|||
* ah rx buffer.
|
||||
*/
|
||||
|
||||
lwsl_debug("%s: %p: inheriting ws ah (rxpos:%d, rxlen:%d)\n",
|
||||
__func__, wsi, wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
if (wsi->h2_stream_carries_ws)
|
||||
|
@ -397,24 +395,14 @@ lws_process_ws_upgrade(struct lws *wsi)
|
|||
else
|
||||
lws_role_transition(wsi, LWSIFR_SERVER, LRS_ESTABLISHED,
|
||||
&role_ops_ws);
|
||||
/*
|
||||
* Because rxpos/rxlen shows something in the ah, we will get
|
||||
* service guaranteed next time around the event loop
|
||||
*/
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
lws_server_init_wsi_for_ws(wsi);
|
||||
lwsl_parser("accepted v%02d connection\n", wsi->ws->ietf_spec_revision);
|
||||
|
||||
/* !!! drop ah unreservedly after ESTABLISHED */
|
||||
if (wsi->ah->rxpos == wsi->ah->rxlen) {
|
||||
lwsl_info("%s: %p: dropping ah on ws upgrade\n", __func__, wsi);
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
} else
|
||||
lwsl_info("%s: %p: unable to drop ah at ws upgrade %d vs %d\n",
|
||||
__func__, wsi, wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
lwsl_info("%s: %p: dropping ah on ws upgrade\n", __func__, wsi);
|
||||
lws_header_table_detach(wsi, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -560,7 +548,7 @@ lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
{
|
||||
int m, draining_flow = 0;
|
||||
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist_rxflow, NULL))
|
||||
if (lws_buflist_next_segment_len(&wsi->buflist, NULL))
|
||||
draining_flow = 1;
|
||||
|
||||
lwsl_parser("%s: received %d byte packet\n", __func__, (int)len);
|
||||
|
@ -590,8 +578,8 @@ lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED) {
|
||||
m = lws_payload_until_length_exhausted(wsi, buf, &len);
|
||||
if (draining_flow &&
|
||||
!lws_buflist_use_segment(&wsi->buflist_rxflow, m))
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
!lws_buflist_use_segment(&wsi->buflist, m))
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
}
|
||||
|
||||
/* process the byte */
|
||||
|
@ -602,8 +590,8 @@ lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
|
||||
/* account for what we're using in rxflow buffer */
|
||||
if (draining_flow &&
|
||||
!lws_buflist_use_segment(&wsi->buflist_rxflow, 1)) {
|
||||
lws_dll_lws_remove(&wsi->dll_rxflow);
|
||||
!lws_buflist_use_segment(&wsi->buflist, 1)) {
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
|
||||
lwsl_debug("%s: %p flow buf: drained\n", __func__, wsi);
|
||||
|
||||
|
|
126
lib/service.c
126
lib/service.c
|
@ -285,12 +285,8 @@ int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
|
|||
size_t blen;
|
||||
int ret = 0, m;
|
||||
|
||||
if (wsi->role_ops->rxflow_cache)
|
||||
if (wsi->role_ops->rxflow_cache(wsi, buf, n, len))
|
||||
return 0;
|
||||
|
||||
/* his RX is flowcontrolled, don't send remaining now */
|
||||
blen = lws_buflist_next_segment_len(&wsi->buflist_rxflow, &buffered);
|
||||
blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
|
||||
if (blen) {
|
||||
if (buf >= buffered && buf + len <= buffered + blen) {
|
||||
/* rxflow while we were spilling prev rxflow */
|
||||
|
@ -303,13 +299,13 @@ int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
|
|||
|
||||
/* a new rxflow, buffer it and warn caller */
|
||||
|
||||
m = lws_buflist_append_segment(&wsi->buflist_rxflow, buf + n, len - n);
|
||||
m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
|
||||
|
||||
if (m < 0)
|
||||
return -1;
|
||||
if (m) {
|
||||
lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
|
||||
lws_dll_lws_add_front(&wsi->dll_rxflow, &pt->dll_head_rxflow);
|
||||
lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -323,7 +319,6 @@ LWS_VISIBLE LWS_EXTERN int
|
|||
lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
struct allocated_headers *ah;
|
||||
|
||||
/* Figure out if we really want to wait in poll()
|
||||
* We only need to wait if really nothing already to do and we have
|
||||
|
@ -342,62 +337,70 @@ lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* 3) if any ah has pending rx, do not wait in poll */
|
||||
ah = pt->ah_list;
|
||||
while (ah) {
|
||||
if (ah->rxpos != ah->rxlen || (ah->wsi && ah->wsi->preamble_rx)) {
|
||||
if (!ah->wsi) {
|
||||
assert(0);
|
||||
}
|
||||
// lwsl_debug("ah pending force\n");
|
||||
/* 3) If there is any wsi with rxflow buffered and in a state to process
|
||||
* it, we should not wait in poll
|
||||
*/
|
||||
|
||||
lws_start_foreach_dll(struct lws_dll_lws *, d, pt->dll_head_buflist.next) {
|
||||
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
|
||||
|
||||
if (lwsi_state(wsi) != LRS_DEFERRING_ACTION)
|
||||
return 0;
|
||||
}
|
||||
ah = ah->next;
|
||||
}
|
||||
|
||||
} lws_end_foreach_dll(d);
|
||||
|
||||
return timeout_ms;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
lws_read_or_use_preamble(struct lws_context_per_thread *pt, struct lws *wsi)
|
||||
lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
struct lws_tokens *ebuf)
|
||||
{
|
||||
int len;
|
||||
ebuf->len = lws_buflist_next_segment_len(&wsi->buflist,
|
||||
(uint8_t **)&ebuf->token);
|
||||
if (!ebuf->len) {
|
||||
ebuf->token = (char *)pt->serv_buf;
|
||||
ebuf->len = lws_ssl_capable_read(wsi, pt->serv_buf,
|
||||
wsi->context->pt_serv_buf_size);
|
||||
|
||||
if (wsi->preamble_rx && wsi->preamble_rx_len) {
|
||||
memcpy(pt->serv_buf, wsi->preamble_rx, wsi->preamble_rx_len);
|
||||
lws_free_set_NULL(wsi->preamble_rx);
|
||||
len = wsi->preamble_rx_len;
|
||||
lwsl_debug("bringing %d out of stash\n", wsi->preamble_rx_len);
|
||||
wsi->preamble_rx_len = 0;
|
||||
// if (ebuf->len > 0)
|
||||
// lwsl_hexdump_notice(ebuf->token, ebuf->len);
|
||||
|
||||
return len;
|
||||
return 0; /* fresh */
|
||||
}
|
||||
|
||||
/*
|
||||
* ... in the case of pipelined HTTP, this may be
|
||||
* POST data followed by next headers...
|
||||
*/
|
||||
return 1; /* buffered */
|
||||
}
|
||||
|
||||
len = lws_ssl_capable_read(wsi, pt->serv_buf,
|
||||
wsi->context->pt_serv_buf_size);
|
||||
lwsl_debug("%s: wsi %p read %d (wsistate 0x%x)\n",
|
||||
__func__, wsi, len, wsi->wsistate);
|
||||
switch (len) {
|
||||
case 0:
|
||||
lwsl_info("%s: read 0 len b\n", __func__);
|
||||
int
|
||||
lws_buflist_aware_consume(struct lws *wsi, struct lws_tokens *ebuf, int used,
|
||||
int buffered)
|
||||
{
|
||||
int m;
|
||||
|
||||
|
||||
if (used && buffered) {
|
||||
m = lws_buflist_use_segment(&wsi->buflist, used);
|
||||
lwsl_info("%s: draining rxflow: used %d, next %d\n",
|
||||
__func__, used, m);
|
||||
if (m)
|
||||
return 0;
|
||||
|
||||
lwsl_notice("%s: removed %p from dll_buflist\n", __func__, wsi);
|
||||
lws_dll_lws_remove(&wsi->dll_buflist);
|
||||
|
||||
/* fallthru */
|
||||
case LWS_SSL_CAPABLE_ERROR:
|
||||
return -1;
|
||||
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (len < 0) /* coverity */
|
||||
return -1;
|
||||
/* any remainder goes on the buflist */
|
||||
|
||||
return len;
|
||||
if (used != ebuf->len &&
|
||||
lws_buflist_append_segment(&wsi->buflist, (uint8_t *)ebuf->token +
|
||||
used, ebuf->len - used) < 0)
|
||||
return 1; /* OOM */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -405,7 +408,7 @@ lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
|
|||
{
|
||||
struct lws_pollfd pfd;
|
||||
|
||||
if (!pt->dll_head_rxflow.next)
|
||||
if (!pt->dll_head_buflist.next)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -416,8 +419,8 @@ lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
|
|||
lws_pt_lock(pt, __func__);
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
pt->dll_head_rxflow.next) {
|
||||
struct lws *wsi = lws_container_of(d, struct lws, dll_rxflow);
|
||||
pt->dll_head_buflist.next) {
|
||||
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
|
||||
|
||||
pfd.events = LWS_POLLIN;
|
||||
pfd.revents = LWS_POLLIN;
|
||||
|
@ -429,7 +432,7 @@ lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
|
|||
if (!lws_is_flowcontrolled(wsi) &&
|
||||
lwsi_state(wsi) != LRS_DEFERRING_ACTION &&
|
||||
(wsi->role_ops->handle_POLLIN)(pt, wsi, &pfd) ==
|
||||
LWS_HPI_RET_CLOSE_HANDLED)
|
||||
LWS_HPI_RET_PLEASE_CLOSE_ME)
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
"close_and_handled");
|
||||
|
||||
|
@ -461,8 +464,8 @@ lws_service_flag_pending(struct lws_context *context, int tsi)
|
|||
* it, we should not wait in poll
|
||||
*/
|
||||
|
||||
lws_start_foreach_dll(struct lws_dll_lws *, d, pt->dll_head_rxflow.next) {
|
||||
struct lws *wsi = lws_container_of(d, struct lws, dll_rxflow);
|
||||
lws_start_foreach_dll(struct lws_dll_lws *, d, pt->dll_head_buflist.next) {
|
||||
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
|
||||
|
||||
if (lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
|
||||
forced = 1;
|
||||
|
@ -501,14 +504,6 @@ lws_service_flag_pending(struct lws_context *context, int tsi)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(LWS_ROLE_H1)
|
||||
forced |= role_ops_h1.service_flag_pending(context, tsi);
|
||||
#else /* they do the same thing... only need one or the other if h1 and h2 */
|
||||
#if defined(LWS_ROLE_H2)
|
||||
forced |= role_ops_h2.service_flag_pending(context, tsi);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
return forced;
|
||||
|
@ -653,9 +648,8 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
#endif
|
||||
lwsl_notice("ah excessive hold: wsi %p\n"
|
||||
" peer address: %s\n"
|
||||
" ah rxpos %u, rxlen %u, pos %u\n",
|
||||
wsi, buf, ah->rxpos, ah->rxlen,
|
||||
ah->pos);
|
||||
" ah pos %u\n",
|
||||
wsi, buf, ah->pos);
|
||||
buf[0] = '\0';
|
||||
m = 0;
|
||||
do {
|
||||
|
@ -682,8 +676,6 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
} while (1);
|
||||
|
||||
/* explicitly detach the ah */
|
||||
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 0);
|
||||
|
||||
/* ... and then drop the connection */
|
||||
|
@ -859,11 +851,11 @@ lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
|
|||
// wsi->wsistate);
|
||||
|
||||
switch ((wsi->role_ops->handle_POLLIN)(pt, wsi, pollfd)) {
|
||||
case LWS_HPI_RET_DIE:
|
||||
case LWS_HPI_RET_WSI_ALREADY_DIED:
|
||||
return 1;
|
||||
case LWS_HPI_RET_HANDLED:
|
||||
break;
|
||||
case LWS_HPI_RET_CLOSE_HANDLED:
|
||||
case LWS_HPI_RET_PLEASE_CLOSE_ME:
|
||||
close_and_handled:
|
||||
lwsl_debug("%p: Close and handled\n", wsi);
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
|
|
|
@ -202,7 +202,6 @@ int main(int argc, const char **argv)
|
|||
info.port = CONTEXT_PORT_NO_LISTEN; /* we do not run any server */
|
||||
info.protocols = protocols;
|
||||
info.max_http_header_pool = 20;
|
||||
info.h2_rx_scratch_size = 4096; /* trade h2 stream rx memory for speed */
|
||||
|
||||
#if defined(LWS_WITH_MBEDTLS)
|
||||
/*
|
||||
|
|
|
@ -437,6 +437,7 @@ int callback_http(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
break;
|
||||
|
||||
case LWS_CALLBACK_HTTP_BODY:
|
||||
lwsl_info("LWS_CALLBACK_HTTP_BODY: len %d\n", (int)len);
|
||||
/* create the POST argument parser if not already existing */
|
||||
if (!pss->spa) {
|
||||
pss->spa = lws_spa_create(wsi, param_names,
|
||||
|
|
Loading…
Add table
Reference in a new issue