2010-11-08 20:20:42 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
2010-11-13 10:03:47 +00:00
|
|
|
*
|
2018-04-11 13:39:42 +08:00
|
|
|
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
|
2010-11-08 20:20:42 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "private-libwebsockets.h"
|
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
static const unsigned char lextable[] = {
|
2018-04-11 13:39:42 +08:00
|
|
|
#include "../lextable.h"
|
2013-01-18 01:55:48 +08:00
|
|
|
};
|
|
|
|
|
2014-03-09 11:49:21 +08:00
|
|
|
#define FAIL_CHAR 0x08
|
|
|
|
|
2017-10-06 16:07:57 +08:00
|
|
|
static struct allocated_headers *
|
|
|
|
_lws_create_ah(struct lws_context_per_thread *pt, ah_data_idx_t data_size)
|
|
|
|
{
|
|
|
|
struct allocated_headers *ah = lws_zalloc(sizeof(*ah), "ah struct");
|
|
|
|
|
|
|
|
if (!ah)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ah->data = lws_malloc(data_size, "ah data");
|
|
|
|
if (!ah->data) {
|
|
|
|
lws_free(ah);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-04-27 15:20:56 +08:00
|
|
|
ah->next = pt->http.ah_list;
|
|
|
|
pt->http.ah_list = ah;
|
2017-10-06 16:07:57 +08:00
|
|
|
ah->data_length = data_size;
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_pool_length++;
|
2017-10-06 16:07:57 +08:00
|
|
|
|
|
|
|
lwsl_info("%s: created ah %p (size %d): pool length %d\n", __func__,
|
2018-04-27 15:20:56 +08:00
|
|
|
ah, (int)data_size, pt->http.ah_pool_length);
|
2017-10-06 16:07:57 +08:00
|
|
|
|
|
|
|
return ah;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_lws_destroy_ah(struct lws_context_per_thread *pt, struct allocated_headers *ah)
|
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
lws_start_foreach_llp(struct allocated_headers **, a, pt->http.ah_list) {
|
2017-10-06 16:07:57 +08:00
|
|
|
if ((*a) == ah) {
|
|
|
|
*a = ah->next;
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_pool_length--;
|
2017-10-06 16:07:57 +08:00
|
|
|
lwsl_info("%s: freed ah %p : pool length %d\n",
|
2018-04-27 15:20:56 +08:00
|
|
|
__func__, ah, pt->http.ah_pool_length);
|
2017-10-06 16:07:57 +08:00
|
|
|
if (ah->data)
|
|
|
|
lws_free(ah->data);
|
|
|
|
lws_free(ah);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} lws_end_foreach_llp(a, next);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-02-09 15:25:01 +08:00
|
|
|
void
|
|
|
|
_lws_header_table_reset(struct allocated_headers *ah)
|
|
|
|
{
|
|
|
|
/* init the ah to reflect no headers or data have appeared yet */
|
|
|
|
memset(ah->frag_index, 0, sizeof(ah->frag_index));
|
2017-10-13 10:33:02 +08:00
|
|
|
memset(ah->frags, 0, sizeof(ah->frags));
|
2017-02-09 15:25:01 +08:00
|
|
|
ah->nfrag = 0;
|
|
|
|
ah->pos = 0;
|
|
|
|
ah->http_response = 0;
|
|
|
|
}
|
|
|
|
|
2016-09-29 10:31:06 +08:00
|
|
|
// doesn't scrub the ah rxbuffer by default, parent must do if needed
|
|
|
|
|
ah http1.1 deal with pipelined headers properly
Connections must hold an ah for the whole time they are
processing one header set, even if eg, the headers are
fragmented and it involves network roundtrip times.
However on http1.1 / keepalive, it must drop the ah when
there are no more header sets to deal with, and reacquire
the ah later when more data appears. It's because the
time between header sets / http1.1 requests is unbounded
and the ah would be tied up forever.
But in the case that we got pipelined http1.1 requests,
even partial already buffered, we must keep the ah,
resetting it instead of dropping it. Because we store
the rx data conveniently in a per-tsi buffer since it only
does one thing at a time per thread, we cannot go back to
the event loop to await a new ah inside one service action.
But no problem since we definitely already have an ah,
let's just reuse it at http completion time if more rx is
already buffered.
NB: attack.sh makes request with echo | nc, this
accidentally sends a trailing '\n' from the echo showing
this problem. With this patch attack.sh can complete well.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-01-30 11:43:10 +08:00
|
|
|
void
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_header_table_reset(struct lws *wsi, int autoservice)
|
2016-01-26 20:56:56 +08:00
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
struct allocated_headers *ah = wsi->http.ah;
|
2016-02-27 11:42:22 +08:00
|
|
|
struct lws_context_per_thread *pt;
|
|
|
|
struct lws_pollfd *pfd;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
|
|
|
/* if we have the idea we're resetting 'our' ah, must be bound to one */
|
|
|
|
assert(ah);
|
|
|
|
/* ah also concurs with ownership */
|
|
|
|
assert(ah->wsi == wsi);
|
2016-02-09 09:15:02 +08:00
|
|
|
|
2017-02-09 15:25:01 +08:00
|
|
|
_lws_header_table_reset(ah);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->parser_state = WSI_TOKEN_NAME_PART;
|
|
|
|
ah->lextable_pos = 0;
|
2017-06-27 10:07:34 +08:00
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* since we will restart the ah, our new headers are not completed */
|
2017-06-28 09:55:34 +08:00
|
|
|
wsi->hdr_parsing_completed = 0;
|
2016-02-24 11:05:56 +08:00
|
|
|
|
2017-07-26 11:49:41 +08:00
|
|
|
/* while we hold the ah, keep a timeout on the wsi */
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_set_timeout(wsi, PENDING_TIMEOUT_HOLDING_AH,
|
2017-07-26 11:49:41 +08:00
|
|
|
wsi->vhost->timeout_secs_ah_idle);
|
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
time(&ah->assigned);
|
|
|
|
|
2018-04-17 15:35:15 +08:00
|
|
|
if (lws_buflist_next_segment_len(&wsi->buflist, NULL) &&
|
|
|
|
autoservice) {
|
|
|
|
lwsl_debug("%s: service on readbuf ah\n", __func__);
|
|
|
|
|
|
|
|
pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
/*
|
|
|
|
* Unlike a normal connect, we have the headers already
|
|
|
|
* (or the first part of them anyway)
|
|
|
|
*/
|
|
|
|
pfd = &pt->fds[wsi->position_in_fds_table];
|
|
|
|
pfd->revents |= LWS_POLLIN;
|
|
|
|
lwsl_err("%s: calling service\n", __func__);
|
|
|
|
lws_service_fd_tsi(wsi->context, pfd, wsi->tsi);
|
2016-02-24 11:05:56 +08:00
|
|
|
}
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
void
|
|
|
|
lws_header_table_reset(struct lws *wsi, int autoservice)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
|
|
|
|
__lws_header_table_reset(wsi, autoservice);
|
|
|
|
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
}
|
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
static void
|
|
|
|
_lws_header_ensure_we_are_on_waiting_list(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_pollargs pa;
|
2018-04-27 15:20:56 +08:00
|
|
|
struct lws **pwsi = &pt->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
while (*pwsi) {
|
|
|
|
if (*pwsi == wsi)
|
|
|
|
return;
|
2018-04-27 15:20:56 +08:00
|
|
|
pwsi = &(*pwsi)->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_info("%s: wsi: %p\n", __func__, wsi);
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah_wait_list = pt->http.ah_wait_list;
|
|
|
|
pt->http.ah_wait_list = wsi;
|
|
|
|
pt->http.ah_wait_list_length++;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
/* we cannot accept input then */
|
|
|
|
|
|
|
|
_lws_change_pollfd(wsi, LWS_POLLIN, 0, &pa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
__lws_remove_from_ah_waiting_list(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
2018-04-27 15:20:56 +08:00
|
|
|
struct lws **pwsi =&pt->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
while (*pwsi) {
|
|
|
|
if (*pwsi == wsi) {
|
|
|
|
lwsl_info("%s: wsi %p\n", __func__, wsi);
|
|
|
|
/* point prev guy to our next */
|
2018-04-27 15:20:56 +08:00
|
|
|
*pwsi = wsi->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
/* we shouldn't point anywhere now */
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah_wait_list = NULL;
|
|
|
|
pt->http.ah_wait_list_length--;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2018-04-27 15:20:56 +08:00
|
|
|
pwsi = &(*pwsi)->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-21 10:57:39 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
2016-02-27 11:42:22 +08:00
|
|
|
lws_header_table_attach(struct lws *wsi, int autoservice)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
2015-12-25 12:44:12 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2016-01-26 20:56:56 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_pollargs pa;
|
2015-12-25 12:44:12 +08:00
|
|
|
int n;
|
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p (tsi %d, count = %d) in\n", __func__,
|
2018-04-27 15:20:56 +08:00
|
|
|
(void *)wsi, (void *)wsi->http.ah, wsi->tsi,
|
|
|
|
pt->http.ah_count_in_use);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
|
2015-12-25 12:44:12 +08:00
|
|
|
/* if we are already bound to one, just clear it down */
|
2018-04-27 15:20:56 +08:00
|
|
|
if (wsi->http.ah) {
|
2017-09-14 13:14:11 +08:00
|
|
|
lwsl_info("%s: cleardown\n", __func__);
|
2015-12-25 12:44:12 +08:00
|
|
|
goto reset;
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
n = pt->http.ah_count_in_use == context->max_http_header_pool;
|
2017-09-14 13:14:11 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
if (!n) {
|
|
|
|
n = lws_peer_confirm_ah_attach_ok(context, wsi->peer);
|
|
|
|
if (n)
|
|
|
|
lws_stats_atomic_bump(wsi->context, pt,
|
|
|
|
LWSSTATS_C_PEER_LIMIT_AH_DENIED, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (n) {
|
|
|
|
/*
|
|
|
|
* Pool is either all busy, or we don't want to give this
|
|
|
|
* particular guy an ah right now...
|
|
|
|
*
|
|
|
|
* Make sure we are on the waiting list, and return that we
|
|
|
|
* weren't able to provide the ah
|
|
|
|
*/
|
|
|
|
_lws_header_ensure_we_are_on_waiting_list(wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
|
|
|
|
goto bail;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
__lws_remove_from_ah_waiting_list(wsi);
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah = _lws_create_ah(pt, context->max_http_header_data);
|
|
|
|
if (!wsi->http.ah) { /* we could not create an ah */
|
2017-10-06 16:07:57 +08:00
|
|
|
_lws_header_ensure_we_are_on_waiting_list(wsi);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2017-10-06 16:07:57 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->in_use = 1;
|
|
|
|
wsi->http.ah->wsi = wsi; /* mark our owner */
|
|
|
|
pt->http.ah_count_in_use++;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
|
|
|
|
lws_context_lock(context); /* <====================================== */
|
2017-09-14 13:14:11 +08:00
|
|
|
if (wsi->peer)
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->peer->http.count_ah++;
|
|
|
|
lws_context_unlock(context); /* ====================================> */
|
2017-09-14 13:14:11 +08:00
|
|
|
#endif
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2017-05-16 19:35:55 +08:00
|
|
|
lwsl_info("%s: did attach wsi %p: ah %p: count %d (on exit)\n", __func__,
|
2018-04-27 15:20:56 +08:00
|
|
|
(void *)wsi, (void *)wsi->http.ah, pt->http.ah_count_in_use);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
|
|
|
reset:
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_header_table_reset(wsi, autoservice);
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
2016-02-29 14:19:16 +08:00
|
|
|
#ifndef LWS_NO_CLIENT
|
2018-04-02 11:55:17 +08:00
|
|
|
if (lwsi_role_client(wsi) && lwsi_state(wsi) == LRS_UNCONNECTED)
|
2016-05-08 16:58:18 +08:00
|
|
|
if (!lws_client_connect_via_info2(wsi))
|
|
|
|
/* our client connect has failed, the wsi
|
|
|
|
* has been closed
|
|
|
|
*/
|
|
|
|
return -1;
|
2016-02-29 14:19:16 +08:00
|
|
|
#endif
|
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return 0;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
|
|
|
bail:
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
return 1;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
2014-11-07 11:20:59 +08:00
|
|
|
{
|
2015-12-25 12:44:12 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2018-04-27 15:20:56 +08:00
|
|
|
struct allocated_headers *ah = wsi->http.ah;
|
2016-01-26 20:56:56 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_pollargs pa;
|
2017-09-14 13:14:11 +08:00
|
|
|
struct lws **pwsi, **pwsi_eligible;
|
2016-01-26 20:56:56 +08:00
|
|
|
time_t now;
|
|
|
|
|
2017-07-19 04:06:15 +08:00
|
|
|
__lws_remove_from_ah_waiting_list(wsi);
|
|
|
|
|
2017-06-28 12:13:13 +08:00
|
|
|
if (!ah)
|
|
|
|
return 0;
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
|
2016-07-19 09:38:48 +08:00
|
|
|
(void *)wsi, (void *)ah, wsi->tsi,
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_count_in_use);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-02-25 21:39:01 +08:00
|
|
|
/* we did have an ah attached */
|
2016-01-26 20:56:56 +08:00
|
|
|
time(&now);
|
2016-07-19 09:38:48 +08:00
|
|
|
if (ah->assigned && now - ah->assigned > 3) {
|
2016-02-19 11:47:52 +08:00
|
|
|
/*
|
|
|
|
* we're detaching the ah, but it was held an
|
|
|
|
* unreasonably long time
|
|
|
|
*/
|
2018-04-17 15:35:15 +08:00
|
|
|
lwsl_debug("%s: wsi %p: ah held %ds, role/state 0x%x 0x%x,"
|
2018-04-27 12:49:42 +08:00
|
|
|
"\n", __func__, wsi, (int)(now - ah->assigned),
|
2018-04-17 15:35:15 +08:00
|
|
|
lwsi_role(wsi), lwsi_state(wsi));
|
2016-02-19 11:47:52 +08:00
|
|
|
}
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
2016-07-19 09:38:48 +08:00
|
|
|
ah->assigned = 0;
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* if we think we're detaching one, there should be one in use */
|
2018-04-27 15:20:56 +08:00
|
|
|
assert(pt->http.ah_count_in_use > 0);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* and this specific one should have been in use */
|
2016-07-19 09:38:48 +08:00
|
|
|
assert(ah->in_use);
|
2018-04-27 15:20:56 +08:00
|
|
|
memset(&wsi->http.ah, 0, sizeof(wsi->http.ah));
|
2018-04-27 12:49:42 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
2018-04-27 12:49:42 +08:00
|
|
|
if (ah->wsi)
|
|
|
|
lws_peer_track_ah_detach(context, wsi->peer);
|
2017-09-14 13:14:11 +08:00
|
|
|
#endif
|
2018-04-27 12:49:42 +08:00
|
|
|
ah->wsi = NULL; /* no owner */
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
pwsi = &pt->http.ah_wait_list;
|
2017-07-19 04:06:15 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
/* oh there is nobody on the waiting list... leave the ah unattached */
|
|
|
|
if (!*pwsi)
|
|
|
|
goto nobody_usable_waiting;
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
/*
|
|
|
|
* at least one wsi on the same tsi is waiting, give it to oldest guy
|
|
|
|
* who is allowed to take it (if any)
|
|
|
|
*/
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info("pt wait list %p\n", *pwsi);
|
2017-09-14 13:14:11 +08:00
|
|
|
wsi = NULL;
|
|
|
|
pwsi_eligible = NULL;
|
|
|
|
|
|
|
|
while (*pwsi) {
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
/* are we willing to give this guy an ah? */
|
|
|
|
if (!lws_peer_confirm_ah_attach_ok(context, (*pwsi)->peer))
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
wsi = *pwsi;
|
|
|
|
pwsi_eligible = pwsi;
|
|
|
|
}
|
|
|
|
#if defined(LWS_WITH_PEER_LIMITS)
|
|
|
|
else
|
2018-04-27 15:20:56 +08:00
|
|
|
if (!(*pwsi)->http.ah_wait_list)
|
2017-09-23 13:27:11 +08:00
|
|
|
lws_stats_atomic_bump(context, pt,
|
2017-09-14 13:14:11 +08:00
|
|
|
LWSSTATS_C_PEER_LIMIT_AH_DENIED, 1);
|
|
|
|
#endif
|
2018-04-27 15:20:56 +08:00
|
|
|
pwsi = &(*pwsi)->http.ah_wait_list;
|
2017-09-14 13:14:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!wsi) /* everybody waiting already has too many ah... */
|
|
|
|
goto nobody_usable_waiting;
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
lwsl_info("%s: last eligible wsi in wait list %p\n", __func__, wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah = ah;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
ah->wsi = wsi; /* new owner */
|
2017-09-14 13:14:11 +08:00
|
|
|
|
2018-03-02 14:22:49 +08:00
|
|
|
__lws_header_table_reset(wsi, autoservice);
|
2018-04-27 15:20:56 +08:00
|
|
|
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
|
2018-04-27 12:49:42 +08:00
|
|
|
lws_context_lock(context); /* <====================================== */
|
2017-09-14 13:14:11 +08:00
|
|
|
if (wsi->peer)
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->peer->http.count_ah++;
|
2018-04-27 12:49:42 +08:00
|
|
|
lws_context_unlock(context); /* ====================================> */
|
2017-09-14 13:14:11 +08:00
|
|
|
#endif
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2016-08-07 08:33:08 +08:00
|
|
|
/* clients acquire the ah and then insert themselves in fds table... */
|
|
|
|
if (wsi->position_in_fds_table != -1) {
|
|
|
|
lwsl_info("%s: Enabling %p POLLIN\n", __func__, wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2017-09-14 13:14:11 +08:00
|
|
|
/* he has been stuck waiting for an ah, but now his wait is
|
|
|
|
* over, let him progress */
|
2017-10-13 10:33:02 +08:00
|
|
|
|
2016-08-07 08:33:08 +08:00
|
|
|
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
|
|
|
|
}
|
2016-01-26 20:56:56 +08:00
|
|
|
|
|
|
|
/* point prev guy to next guy in list instead */
|
2018-04-27 15:20:56 +08:00
|
|
|
*pwsi_eligible = wsi->http.ah_wait_list;
|
2016-02-25 21:39:01 +08:00
|
|
|
/* the guy who got one is out of the list */
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah_wait_list = NULL;
|
|
|
|
pt->http.ah_wait_list_length--;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2016-02-29 14:19:16 +08:00
|
|
|
#ifndef LWS_NO_CLIENT
|
2018-04-02 11:55:17 +08:00
|
|
|
if (lwsi_role_client(wsi) && lwsi_state(wsi) == LRS_UNCONNECTED) {
|
2017-06-12 13:36:24 +08:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
2016-05-08 16:58:18 +08:00
|
|
|
if (!lws_client_connect_via_info2(wsi)) {
|
|
|
|
/* our client connect has failed, the wsi
|
|
|
|
* has been closed
|
|
|
|
*/
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2017-06-12 13:36:24 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-02-29 14:19:16 +08:00
|
|
|
#endif
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
assert(!!pt->http.ah_wait_list_length == !!(lws_intptr_t)pt->http.ah_wait_list);
|
2016-01-26 20:56:56 +08:00
|
|
|
bail:
|
2016-07-23 14:18:25 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
|
2018-04-27 15:20:56 +08:00
|
|
|
(void *)wsi, (void *)ah, pt->tid, pt->http.ah_count_in_use);
|
2017-09-14 13:14:11 +08:00
|
|
|
|
2014-12-04 23:59:35 +01:00
|
|
|
return 0;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
nobody_usable_waiting:
|
|
|
|
lwsl_info("%s: nobody usable waiting\n", __func__);
|
2017-10-06 16:07:57 +08:00
|
|
|
_lws_destroy_ah(pt, ah);
|
2018-04-27 15:20:56 +08:00
|
|
|
pt->http.ah_count_in_use--;
|
2017-09-14 13:14:11 +08:00
|
|
|
|
|
|
|
goto bail;
|
2015-12-25 12:44:12 +08:00
|
|
|
}
|
2014-11-07 11:20:59 +08:00
|
|
|
|
2018-03-05 16:49:28 +08:00
|
|
|
int lws_header_table_detach(struct lws *wsi, int autoservice)
|
|
|
|
{
|
|
|
|
struct lws_context *context = wsi->context;
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
int n;
|
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
n = __lws_header_table_detach(wsi, autoservice);
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2015-12-19 07:35:23 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_hdr_fragment_length(struct lws *wsi, enum lws_token_indexes h, int frag_idx)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if (!wsi->http.ah)
|
2017-07-19 04:39:14 +08:00
|
|
|
return 0;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
n = wsi->http.ah->frag_index[h];
|
2015-12-19 07:35:23 +08:00
|
|
|
if (!n)
|
|
|
|
return 0;
|
|
|
|
do {
|
|
|
|
if (!frag_idx)
|
2018-04-27 15:20:56 +08:00
|
|
|
return wsi->http.ah->frags[n].len;
|
|
|
|
n = wsi->http.ah->frags[n].nfrag;
|
2015-12-19 07:35:23 +08:00
|
|
|
} while (frag_idx-- && n);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_total_length(struct lws *wsi, enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
int len = 0;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if (!wsi->http.ah)
|
2017-07-19 04:39:14 +08:00
|
|
|
return 0;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
n = wsi->http.ah->frag_index[h];
|
2014-04-02 19:45:42 +08:00
|
|
|
if (!n)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return 0;
|
|
|
|
do {
|
2018-04-27 15:20:56 +08:00
|
|
|
len += wsi->http.ah->frags[n].len;
|
|
|
|
n = wsi->http.ah->frags[n].nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
} while (n);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2015-12-15 22:59:23 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_copy_fragment(struct lws *wsi, char *dst, int len,
|
|
|
|
enum lws_token_indexes h, int frag_idx)
|
|
|
|
{
|
|
|
|
int n = 0;
|
2017-07-19 04:39:14 +08:00
|
|
|
int f;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if (!wsi->http.ah)
|
2017-07-19 04:39:14 +08:00
|
|
|
return -1;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
f = wsi->http.ah->frag_index[h];
|
2015-12-15 22:59:23 +08:00
|
|
|
|
2016-01-20 09:44:04 +08:00
|
|
|
if (!f)
|
|
|
|
return -1;
|
|
|
|
|
2015-12-15 22:59:23 +08:00
|
|
|
while (n < frag_idx) {
|
2018-04-27 15:20:56 +08:00
|
|
|
f = wsi->http.ah->frags[f].nfrag;
|
2015-12-15 22:59:23 +08:00
|
|
|
if (!f)
|
|
|
|
return -1;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if (wsi->http.ah->frags[f].len >= len)
|
2015-12-15 22:59:23 +08:00
|
|
|
return -1;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
memcpy(dst, wsi->http.ah->data + wsi->http.ah->frags[f].offset,
|
|
|
|
wsi->http.ah->frags[f].len);
|
|
|
|
dst[wsi->http.ah->frags[f].len] = '\0';
|
2015-12-15 22:59:23 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
return wsi->http.ah->frags[f].len;
|
2015-12-15 22:59:23 +08:00
|
|
|
}
|
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_copy(struct lws *wsi, char *dst, int len,
|
2015-12-04 11:30:53 +08:00
|
|
|
enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int toklen = lws_hdr_total_length(wsi, h);
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (toklen >= len)
|
|
|
|
return -1;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if (!wsi->http.ah)
|
2017-07-19 04:39:14 +08:00
|
|
|
return -1;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
n = wsi->http.ah->frag_index[h];
|
2014-04-02 19:45:42 +08:00
|
|
|
if (!n)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
do {
|
2018-04-27 15:20:56 +08:00
|
|
|
if (wsi->http.ah->frags[n].len >= len)
|
2017-11-02 08:10:41 +08:00
|
|
|
return -1;
|
2018-04-27 15:20:56 +08:00
|
|
|
strncpy(dst, &wsi->http.ah->data[wsi->http.ah->frags[n].offset],
|
|
|
|
wsi->http.ah->frags[n].len);
|
|
|
|
dst += wsi->http.ah->frags[n].len;
|
|
|
|
len -= wsi->http.ah->frags[n].len;
|
|
|
|
n = wsi->http.ah->frags[n].nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
} while (n);
|
2017-11-02 08:10:41 +08:00
|
|
|
*dst = '\0';
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
|
|
|
return toklen;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
char *lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
n = wsi->http.ah->frag_index[h];
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (!n)
|
|
|
|
return NULL;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
return wsi->http.ah->data + wsi->http.ah->frags[n].offset;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
2013-01-18 01:55:48 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
static int LWS_WARN_UNUSED_RESULT
|
2016-01-21 10:57:39 +08:00
|
|
|
lws_pos_in_bounds(struct lws *wsi)
|
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
if (wsi->http.ah->pos <
|
2017-09-23 12:55:21 +08:00
|
|
|
(unsigned int)wsi->context->max_http_header_data)
|
2016-01-21 10:57:39 +08:00
|
|
|
return 0;
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
if ((int)wsi->http.ah->pos == wsi->context->max_http_header_data) {
|
2016-01-21 10:57:39 +08:00
|
|
|
lwsl_err("Ran out of header data space\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* with these tests everywhere, it should never be able to exceed
|
2017-09-23 12:55:21 +08:00
|
|
|
* the limit, only meet it
|
2016-01-21 10:57:39 +08:00
|
|
|
*/
|
2018-04-27 15:20:56 +08:00
|
|
|
lwsl_err("%s: pos %d, limit %d\n", __func__, wsi->http.ah->pos,
|
2016-01-21 10:57:39 +08:00
|
|
|
wsi->context->max_http_header_data);
|
|
|
|
assert(0);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_hdr_simple_create(struct lws *wsi, enum lws_token_indexes h, const char *s)
|
2013-02-11 13:04:45 +08:00
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->nfrag++;
|
|
|
|
if (wsi->http.ah->nfrag == ARRAY_SIZE(wsi->http.ah->frags)) {
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_warn("More hdr frags than we can deal with, dropping\n");
|
2013-02-11 13:04:45 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->frag_index[h] = wsi->http.ah->nfrag;
|
2013-02-11 13:04:45 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->frags[wsi->http.ah->nfrag].offset = wsi->http.ah->pos;
|
|
|
|
wsi->http.ah->frags[wsi->http.ah->nfrag].len = 0;
|
|
|
|
wsi->http.ah->frags[wsi->http.ah->nfrag].nfrag = 0;
|
2013-02-11 13:04:45 +08:00
|
|
|
|
|
|
|
do {
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2013-02-11 13:04:45 +08:00
|
|
|
return -1;
|
2016-01-21 10:57:39 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->data[wsi->http.ah->pos++] = *s;
|
2013-02-11 13:04:45 +08:00
|
|
|
if (*s)
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->frags[wsi->http.ah->nfrag].len++;
|
2013-02-11 13:04:45 +08:00
|
|
|
} while (*s++);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
static int LWS_WARN_UNUSED_RESULT
|
|
|
|
issue_char(struct lws *wsi, unsigned char c)
|
2013-11-10 15:15:21 +08:00
|
|
|
{
|
2015-11-29 19:26:01 +08:00
|
|
|
unsigned short frag_len;
|
2015-12-15 21:15:58 +08:00
|
|
|
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2013-11-10 15:15:21 +08:00
|
|
|
return -1;
|
2014-06-29 00:25:19 -04:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
frag_len = wsi->http.ah->frags[wsi->http.ah->nfrag].len;
|
2015-12-15 21:15:58 +08:00
|
|
|
/*
|
|
|
|
* If we haven't hit the token limit, just copy the character into
|
|
|
|
* the header
|
|
|
|
*/
|
2018-04-27 15:20:56 +08:00
|
|
|
if (frag_len < wsi->http.ah->current_token_limit) {
|
|
|
|
wsi->http.ah->data[wsi->http.ah->pos++] = c;
|
2015-11-29 19:24:04 +08:00
|
|
|
if (c)
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->frags[wsi->http.ah->nfrag].len++;
|
2015-11-29 19:24:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-12-15 21:15:58 +08:00
|
|
|
|
|
|
|
/* Insert a null character when we *hit* the limit: */
|
2018-04-27 15:20:56 +08:00
|
|
|
if (frag_len == wsi->http.ah->current_token_limit) {
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2016-01-20 07:40:13 +08:00
|
|
|
return -1;
|
2017-10-13 10:33:02 +08:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->data[wsi->http.ah->pos++] = '\0';
|
2015-12-25 12:44:12 +08:00
|
|
|
lwsl_warn("header %i exceeds limit %d\n",
|
2018-04-27 15:20:56 +08:00
|
|
|
wsi->http.ah->parser_state,
|
|
|
|
wsi->http.ah->current_token_limit);
|
2015-12-15 21:15:58 +08:00
|
|
|
}
|
|
|
|
|
2015-11-29 19:24:04 +08:00
|
|
|
return 1;
|
2013-11-10 15:15:21 +08:00
|
|
|
}
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
int
|
|
|
|
lws_parse_urldecode(struct lws *wsi, uint8_t *_c)
|
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
struct allocated_headers *ah = wsi->http.ah;
|
2017-10-13 10:33:02 +08:00
|
|
|
unsigned int enc = 0;
|
|
|
|
uint8_t c = *_c;
|
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
// lwsl_notice("ah->ups %d\n", ah->ups);
|
|
|
|
|
2017-10-13 10:33:02 +08:00
|
|
|
/*
|
|
|
|
* PRIORITY 1
|
|
|
|
* special URI processing... convert %xx
|
|
|
|
*/
|
2017-11-16 11:26:00 +08:00
|
|
|
switch (ah->ues) {
|
2017-10-13 10:33:02 +08:00
|
|
|
case URIES_IDLE:
|
|
|
|
if (c == '%') {
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ues = URIES_SEEN_PERCENT;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case URIES_SEEN_PERCENT:
|
|
|
|
if (char_to_hex(c) < 0)
|
|
|
|
/* illegal post-% char */
|
|
|
|
goto forbid;
|
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->esc_stash = c;
|
|
|
|
ah->ues = URIES_SEEN_PERCENT_H1;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
|
|
|
|
case URIES_SEEN_PERCENT_H1:
|
|
|
|
if (char_to_hex(c) < 0)
|
|
|
|
/* illegal post-% char */
|
|
|
|
goto forbid;
|
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
*_c = (char_to_hex(ah->esc_stash) << 4) |
|
2017-10-13 10:33:02 +08:00
|
|
|
char_to_hex(c);
|
|
|
|
c = *_c;
|
|
|
|
enc = 1;
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ues = URIES_IDLE;
|
2017-10-13 10:33:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PRIORITY 2
|
|
|
|
* special URI processing...
|
|
|
|
* convert /.. or /... or /../ etc to /
|
|
|
|
* convert /./ to /
|
|
|
|
* convert // or /// etc to /
|
|
|
|
* leave /.dir or whatever alone
|
|
|
|
*/
|
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
switch (ah->ups) {
|
2017-10-13 10:33:02 +08:00
|
|
|
case URIPS_IDLE:
|
|
|
|
if (!c)
|
|
|
|
return -1;
|
|
|
|
/* genuine delimiter */
|
|
|
|
if ((c == '&' || c == ';') && !enc) {
|
|
|
|
if (issue_char(wsi, c) < 0)
|
|
|
|
return -1;
|
|
|
|
/* swallow the terminator */
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
/* link to next fragment */
|
|
|
|
ah->frags[ah->nfrag].nfrag = ah->nfrag + 1;
|
|
|
|
ah->nfrag++;
|
|
|
|
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
|
|
|
|
goto excessive;
|
|
|
|
/* start next fragment after the & */
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->post_literal_equal = 0;
|
2017-10-13 10:33:02 +08:00
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
/* uriencoded = in the name part, disallow */
|
|
|
|
if (c == '=' && enc &&
|
|
|
|
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] &&
|
2017-11-16 11:26:00 +08:00
|
|
|
!ah->post_literal_equal) {
|
2017-10-13 10:33:02 +08:00
|
|
|
c = '_';
|
|
|
|
*_c =c;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* after the real =, we don't care how many = */
|
|
|
|
if (c == '=' && !enc)
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->post_literal_equal = 1;
|
2017-10-13 10:33:02 +08:00
|
|
|
|
|
|
|
/* + to space */
|
|
|
|
if (c == '+' && !enc) {
|
|
|
|
c = ' ';
|
|
|
|
*_c = c;
|
|
|
|
}
|
|
|
|
/* issue the first / always */
|
|
|
|
if (c == '/' && !ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS])
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_SEEN_SLASH;
|
2017-10-13 10:33:02 +08:00
|
|
|
break;
|
|
|
|
case URIPS_SEEN_SLASH:
|
|
|
|
/* swallow subsequent slashes */
|
|
|
|
if (c == '/')
|
|
|
|
goto swallow;
|
|
|
|
/* track and swallow the first . after / */
|
|
|
|
if (c == '.') {
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_SEEN_SLASH_DOT;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_IDLE;
|
2017-10-13 10:33:02 +08:00
|
|
|
break;
|
|
|
|
case URIPS_SEEN_SLASH_DOT:
|
|
|
|
/* swallow second . */
|
|
|
|
if (c == '.') {
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_SEEN_SLASH_DOT_DOT;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
/* change /./ to / */
|
|
|
|
if (c == '/') {
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_SEEN_SLASH;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
/* it was like /.dir ... regurgitate the . */
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_IDLE;
|
2017-10-13 10:33:02 +08:00
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case URIPS_SEEN_SLASH_DOT_DOT:
|
|
|
|
|
|
|
|
/* /../ or /..[End of URI] --> backup to last / */
|
|
|
|
if (c == '/' || c == '?') {
|
|
|
|
/*
|
|
|
|
* back up one dir level if possible
|
|
|
|
* safe against header fragmentation because
|
|
|
|
* the method URI can only be in 1 fragment
|
|
|
|
*/
|
|
|
|
if (ah->frags[ah->nfrag].len > 2) {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
do {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
} while (ah->frags[ah->nfrag].len > 1 &&
|
|
|
|
ah->data[ah->pos] != '/');
|
|
|
|
}
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_SEEN_SLASH;
|
2017-10-13 10:33:02 +08:00
|
|
|
if (ah->frags[ah->nfrag].len > 1)
|
|
|
|
break;
|
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* /..[^/] ... regurgitate and allow */
|
|
|
|
|
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_IDLE;
|
2017-10-13 10:33:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c == '?' && !enc &&
|
2017-12-01 11:09:32 +08:00
|
|
|
!ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS]) { /* start of URI args */
|
2017-11-16 11:26:00 +08:00
|
|
|
if (ah->ues != URIES_IDLE)
|
2017-10-13 10:33:02 +08:00
|
|
|
goto forbid;
|
|
|
|
|
|
|
|
/* seal off uri header */
|
|
|
|
if (issue_char(wsi, '\0') < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* move to using WSI_TOKEN_HTTP_URI_ARGS */
|
|
|
|
ah->nfrag++;
|
|
|
|
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
|
|
|
|
goto excessive;
|
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
|
|
|
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->post_literal_equal = 0;
|
2017-10-13 10:33:02 +08:00
|
|
|
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] = ah->nfrag;
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->ups = URIPS_IDLE;
|
2017-10-13 10:33:02 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
|
|
|
|
return LPUR_CONTINUE;
|
|
|
|
|
|
|
|
swallow:
|
|
|
|
return LPUR_SWALLOW;
|
|
|
|
|
|
|
|
forbid:
|
|
|
|
return LPUR_FORBID;
|
|
|
|
|
|
|
|
excessive:
|
|
|
|
return LPUR_EXCESSIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const unsigned char methods[] = {
|
|
|
|
WSI_TOKEN_GET_URI,
|
|
|
|
WSI_TOKEN_POST_URI,
|
|
|
|
WSI_TOKEN_OPTIONS_URI,
|
|
|
|
WSI_TOKEN_PUT_URI,
|
|
|
|
WSI_TOKEN_PATCH_URI,
|
|
|
|
WSI_TOKEN_DELETE_URI,
|
|
|
|
WSI_TOKEN_CONNECT,
|
|
|
|
WSI_TOKEN_HEAD_URI,
|
|
|
|
};
|
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/*
|
|
|
|
* possible returns:, -1 fail, 0 ok or 2, transition to raw
|
|
|
|
*/
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
2018-03-07 18:15:17 +08:00
|
|
|
lws_parse(struct lws *wsi, unsigned char *buf, int *len)
|
2010-11-08 20:20:42 +00:00
|
|
|
{
|
2018-04-27 15:20:56 +08:00
|
|
|
struct allocated_headers *ah = wsi->http.ah;
|
2015-12-15 21:15:58 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2017-10-13 10:33:02 +08:00
|
|
|
unsigned int n, m;
|
2018-03-07 18:15:17 +08:00
|
|
|
unsigned char c;
|
|
|
|
int r, pos;
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2018-04-27 15:20:56 +08:00
|
|
|
assert(wsi->http.ah);
|
2016-01-26 20:56:56 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
do {
|
|
|
|
(*len)--;
|
|
|
|
c = *buf++;
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
switch (ah->parser_state) {
|
|
|
|
default:
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
lwsl_parser("WSI_TOK_(%d) '%c'\n", ah->parser_state, c);
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/* collect into malloc'd buffers */
|
|
|
|
/* optional initial space swallow */
|
|
|
|
if (!ah->frags[ah->frag_index[ah->parser_state]].len &&
|
|
|
|
c == ' ')
|
2015-01-10 19:01:52 -08:00
|
|
|
break;
|
2013-11-10 15:15:21 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
|
|
|
if (ah->parser_state == methods[m])
|
|
|
|
break;
|
|
|
|
if (m == ARRAY_SIZE(methods))
|
|
|
|
/* it was not any of the methods */
|
|
|
|
goto check_eol;
|
2013-11-10 15:15:21 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/* special URI processing... end at space */
|
HTTP Version, Keep-alive support, No-copy POST
This is a squashed commit from https://github.com/andrew-canaday/libwebsockets,
dev/http_keepalive branch (strategies changed a few times, so the commit
history is clutteread). This branch is submitted for clarity, but the other
can be used as a reference or alternative.
* added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests
* added **enum http_connection_type** to track keep-alive vs close
* replaced content_length_seen and body_index with **content_remain**
* removed **post_buffer** (see handshake.c modifications)
* removed post_buffer free
* switch state to WSI_TOKEN_SKIPPING after URI is complete to store version
* delete *spill* label (unused)
* add vars to track HTTP version and connection type
* HTTP version defaults to 1.0
* connection type defaults to 'close' for 1.0, keep-alive for 1.1
* additional checks in **cleanup:** label:
* if HTTP version string is present and valid, set enum val appropriately
* override connection default with the "Connection:" header, if present
* set state to WSI_STATE_HTTP_BODY if content_length > 0
* return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise
* add vars to track remaining content_length and body chunk size
* re-arrange switch case order to facilitate creation of jump-table
* added new labels:
* **read_ok**: normal location reach on break from switch; just return 0
* **http_complete**: check for keep-alive + init state, mode, hdr table
* **http_new**: jump location for keep-alive when http_complete sees len>0
* after libwebsocket_parse, jump to one of those labels based on state
* POST body handling:
* don't bother iterating over input byte-by-byte or using memcpy
* just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (c == ' ') {
|
|
|
|
/* enforce starting with / */
|
|
|
|
if (!ah->frags[ah->nfrag].len)
|
|
|
|
if (issue_char(wsi, '/') < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ah->ups == URIPS_SEEN_SLASH_DOT_DOT) {
|
|
|
|
/*
|
|
|
|
* back up one dir level if possible
|
|
|
|
* safe against header fragmentation because
|
|
|
|
* the method URI can only be in 1 fragment
|
|
|
|
*/
|
|
|
|
if (ah->frags[ah->nfrag].len > 2) {
|
2016-04-07 10:08:35 +08:00
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
2018-03-07 18:15:17 +08:00
|
|
|
do {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
} while (ah->frags[ah->nfrag].len > 1 &&
|
|
|
|
ah->data[ah->pos] != '/');
|
|
|
|
}
|
2016-04-07 10:08:35 +08:00
|
|
|
}
|
2018-03-07 18:15:17 +08:00
|
|
|
|
|
|
|
/* begin parsing HTTP version: */
|
|
|
|
if (issue_char(wsi, '\0') < 0)
|
|
|
|
return -1;
|
|
|
|
ah->parser_state = WSI_TOKEN_HTTP;
|
|
|
|
goto start_fragment;
|
2016-04-07 10:08:35 +08:00
|
|
|
}
|
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
r = lws_parse_urldecode(wsi, &c);
|
|
|
|
switch (r) {
|
|
|
|
case LPUR_CONTINUE:
|
|
|
|
break;
|
|
|
|
case LPUR_SWALLOW:
|
|
|
|
goto swallow;
|
|
|
|
case LPUR_FORBID:
|
|
|
|
goto forbid;
|
|
|
|
case LPUR_EXCESSIVE:
|
|
|
|
goto excessive;
|
|
|
|
default:
|
HTTP Version, Keep-alive support, No-copy POST
This is a squashed commit from https://github.com/andrew-canaday/libwebsockets,
dev/http_keepalive branch (strategies changed a few times, so the commit
history is clutteread). This branch is submitted for clarity, but the other
can be used as a reference or alternative.
* added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests
* added **enum http_connection_type** to track keep-alive vs close
* replaced content_length_seen and body_index with **content_remain**
* removed **post_buffer** (see handshake.c modifications)
* removed post_buffer free
* switch state to WSI_TOKEN_SKIPPING after URI is complete to store version
* delete *spill* label (unused)
* add vars to track HTTP version and connection type
* HTTP version defaults to 1.0
* connection type defaults to 'close' for 1.0, keep-alive for 1.1
* additional checks in **cleanup:** label:
* if HTTP version string is present and valid, set enum val appropriately
* override connection default with the "Connection:" header, if present
* set state to WSI_STATE_HTTP_BODY if content_length > 0
* return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise
* add vars to track remaining content_length and body chunk size
* re-arrange switch case order to facilitate creation of jump-table
* added new labels:
* **read_ok**: normal location reach on break from switch; just return 0
* **http_complete**: check for keep-alive + init state, mode, hdr table
* **http_new**: jump location for keep-alive when http_complete sees len>0
* after libwebsocket_parse, jump to one of those labels based on state
* POST body handling:
* don't bother iterating over input byte-by-byte or using memcpy
* just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
|
|
|
return -1;
|
2018-03-07 18:15:17 +08:00
|
|
|
}
|
2014-07-19 06:58:53 +08:00
|
|
|
check_eol:
|
2018-03-07 18:15:17 +08:00
|
|
|
/* bail at EOL */
|
|
|
|
if (ah->parser_state != WSI_TOKEN_CHALLENGE &&
|
|
|
|
c == '\x0d') {
|
|
|
|
if (ah->ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
|
|
|
|
|
|
|
c = '\0';
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
|
|
|
|
lwsl_parser("*\n");
|
|
|
|
}
|
2014-07-19 06:58:53 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
n = issue_char(wsi, c);
|
|
|
|
if ((int)n < 0)
|
|
|
|
return -1;
|
|
|
|
if (n > 0)
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING;
|
2014-08-19 18:34:31 +08:00
|
|
|
|
2013-11-10 15:15:21 +08:00
|
|
|
swallow:
|
2018-03-07 18:15:17 +08:00
|
|
|
/* per-protocol end of headers management */
|
2011-01-18 15:39:02 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (ah->parser_state == WSI_TOKEN_CHALLENGE)
|
|
|
|
goto set_parsing_complete;
|
|
|
|
break;
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/* collecting and checking a name part */
|
|
|
|
case WSI_TOKEN_NAME_PART:
|
2018-04-02 11:55:17 +08:00
|
|
|
lwsl_parser("WSI_TOKEN_NAME_PART '%c' 0x%02X (role=0x%x) "
|
|
|
|
"wsi->lextable_pos=%d\n", c, c, lwsi_role(wsi),
|
2018-03-07 18:15:17 +08:00
|
|
|
ah->lextable_pos);
|
|
|
|
|
|
|
|
if (c >= 'A' && c <= 'Z')
|
|
|
|
c += 'a' - 'A';
|
|
|
|
|
|
|
|
pos = ah->lextable_pos;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (lextable[pos] & (1 << 7)) { /* 1-byte, fail on mismatch */
|
|
|
|
if ((lextable[pos] & 0x7f) != c) {
|
|
|
|
nope:
|
|
|
|
ah->lextable_pos = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fall thru */
|
|
|
|
pos++;
|
|
|
|
if (lextable[pos] == FAIL_CHAR)
|
|
|
|
goto nope;
|
|
|
|
|
|
|
|
ah->lextable_pos = pos;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (lextable[pos] == FAIL_CHAR)
|
|
|
|
goto nope;
|
|
|
|
|
|
|
|
/* b7 = 0, end or 3-byte */
|
|
|
|
if (lextable[pos] < FAIL_CHAR) { /* terminal marker */
|
|
|
|
ah->lextable_pos = pos;
|
2015-01-10 19:01:52 -08:00
|
|
|
break;
|
|
|
|
}
|
2018-03-07 18:15:17 +08:00
|
|
|
|
|
|
|
if (lextable[pos] == c) { /* goto */
|
|
|
|
ah->lextable_pos = pos + (lextable[pos + 1]) +
|
|
|
|
(lextable[pos + 2] << 8);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fall thru goto */
|
|
|
|
pos += 3;
|
|
|
|
/* continue */
|
|
|
|
}
|
|
|
|
|
2013-02-12 12:52:39 +08:00
|
|
|
/*
|
2018-04-02 11:55:17 +08:00
|
|
|
* If it's h1, server needs to look out for unknown
|
|
|
|
* methods...
|
2013-02-12 12:52:39 +08:00
|
|
|
*/
|
2018-04-11 13:39:42 +08:00
|
|
|
if (ah->lextable_pos < 0 && lwsi_role_h1(wsi) &&
|
|
|
|
lwsi_role_server(wsi)) {
|
2018-03-07 18:15:17 +08:00
|
|
|
/* this is not a header we know about */
|
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
|
|
|
if (ah->frag_index[methods[m]]) {
|
|
|
|
/*
|
|
|
|
* already had the method, no idea what
|
|
|
|
* this crap from the client is, ignore
|
|
|
|
*/
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
break;
|
|
|
|
}
|
2017-03-07 16:06:05 +08:00
|
|
|
/*
|
2018-03-07 18:15:17 +08:00
|
|
|
* hm it's an unknown http method from a client in fact,
|
|
|
|
* it cannot be valid http
|
2017-03-07 16:06:05 +08:00
|
|
|
*/
|
2018-03-07 18:15:17 +08:00
|
|
|
if (m == ARRAY_SIZE(methods)) {
|
|
|
|
/*
|
|
|
|
* are we set up to accept raw in these cases?
|
|
|
|
*/
|
|
|
|
if (lws_check_opt(wsi->vhost->options,
|
|
|
|
LWS_SERVER_OPTION_FALLBACK_TO_RAW))
|
|
|
|
return 2; /* transition to raw */
|
2017-03-07 16:06:05 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
lwsl_info("Unknown method - dropping\n");
|
|
|
|
goto forbid;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* ...otherwise for a client, let him ignore unknown headers
|
|
|
|
* coming from the server
|
|
|
|
*/
|
|
|
|
if (ah->lextable_pos < 0) {
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
break;
|
2015-01-30 12:04:43 +00:00
|
|
|
}
|
2015-04-07 08:19:30 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (lextable[ah->lextable_pos] < FAIL_CHAR) {
|
|
|
|
/* terminal state */
|
2013-02-04 09:24:18 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
n = ((unsigned int)lextable[ah->lextable_pos] << 8) |
|
|
|
|
lextable[ah->lextable_pos + 1];
|
2013-01-18 01:55:48 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
lwsl_parser("known hdr %d\n", n);
|
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
|
|
|
if (n == methods[m] &&
|
|
|
|
ah->frag_index[methods[m]]) {
|
|
|
|
lwsl_warn("Duplicated method\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2013-02-12 13:10:19 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/*
|
|
|
|
* WSORIGIN is protocol equiv to ORIGIN,
|
|
|
|
* JWebSocket likes to send it, map to ORIGIN
|
|
|
|
*/
|
|
|
|
if (n == WSI_TOKEN_SWORIGIN)
|
|
|
|
n = WSI_TOKEN_ORIGIN;
|
2012-04-03 17:02:20 +02:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
ah->parser_state = (enum lws_token_indexes)
|
|
|
|
(WSI_TOKEN_GET_URI + n);
|
|
|
|
ah->ups = URIPS_IDLE;
|
2014-06-29 01:34:24 -04:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (context->token_limits)
|
|
|
|
ah->current_token_limit = context->
|
|
|
|
token_limits->token_limit[
|
|
|
|
ah->parser_state];
|
|
|
|
else
|
|
|
|
ah->current_token_limit =
|
|
|
|
wsi->context->max_http_header_data;
|
2014-06-29 01:34:24 -04:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (ah->parser_state == WSI_TOKEN_CHALLENGE)
|
|
|
|
goto set_parsing_complete;
|
2011-01-23 16:50:33 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
goto start_fragment;
|
|
|
|
}
|
|
|
|
break;
|
2012-04-05 10:31:48 +08:00
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
start_fragment:
|
2018-03-07 18:15:17 +08:00
|
|
|
ah->nfrag++;
|
2015-12-15 22:57:19 +08:00
|
|
|
excessive:
|
2018-03-07 18:15:17 +08:00
|
|
|
if (ah->nfrag == ARRAY_SIZE(ah->frags)) {
|
|
|
|
lwsl_warn("More hdr frags than we can deal with\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
|
|
|
ah->frags[ah->nfrag].flags = 2;
|
|
|
|
|
|
|
|
n = ah->frag_index[ah->parser_state];
|
|
|
|
if (!n) { /* first fragment */
|
|
|
|
ah->frag_index[ah->parser_state] = ah->nfrag;
|
|
|
|
ah->hdr_token_idx = ah->parser_state;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* continuation */
|
|
|
|
while (ah->frags[n].nfrag)
|
|
|
|
n = ah->frags[n].nfrag;
|
|
|
|
ah->frags[n].nfrag = ah->nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (issue_char(wsi, ' ') < 0)
|
|
|
|
return -1;
|
improve minilex use external parsing header
Clean up minilex
Move the header output to stdout
Introduce lexfile.h as the header output
Use lexfile.h in both minilex itself and lws
Add the following header support
"Accept:",
"If-Modified-Since:",
"Accept-Encoding:",
"Accept-Language:",
"Pragma:",
"Cache-Control:",
"Authorization:",
"Cookie:",
"Content-Type:",
"Date:",
"Range:",
"Referer:"
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-11-09 10:09:09 +08:00
|
|
|
break;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
/* skipping arg part of a name we didn't recognize */
|
|
|
|
case WSI_TOKEN_SKIPPING:
|
|
|
|
lwsl_parser("WSI_TOKEN_SKIPPING '%c'\n", c);
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
if (c == '\x0d')
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
|
|
|
|
break;
|
2013-11-19 13:38:16 +01:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
case WSI_TOKEN_SKIPPING_SAW_CR:
|
|
|
|
lwsl_parser("WSI_TOKEN_SKIPPING_SAW_CR '%c'\n", c);
|
|
|
|
if (ah->ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
|
|
|
if (c == '\x0a') {
|
|
|
|
ah->parser_state = WSI_TOKEN_NAME_PART;
|
|
|
|
ah->lextable_pos = 0;
|
|
|
|
} else
|
|
|
|
ah->parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
break;
|
|
|
|
/* we're done, ignore anything else */
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
case WSI_PARSING_COMPLETE:
|
|
|
|
lwsl_parser("WSI_PARSING_COMPLETE '%c'\n", c);
|
|
|
|
break;
|
|
|
|
}
|
2013-11-19 13:38:16 +01:00
|
|
|
|
2018-03-07 18:15:17 +08:00
|
|
|
} while (*len);
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2013-02-04 09:09:19 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
set_parsing_complete:
|
2017-11-16 11:26:00 +08:00
|
|
|
if (ah->ues != URIES_IDLE)
|
2016-04-07 10:08:35 +08:00
|
|
|
goto forbid;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (lws_hdr_total_length(wsi, WSI_TOKEN_UPGRADE)) {
|
2013-02-11 17:13:32 +08:00
|
|
|
if (lws_hdr_total_length(wsi, WSI_TOKEN_VERSION))
|
2017-12-01 11:09:32 +08:00
|
|
|
wsi->rx_frame_type = /* temp for ws version index */
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
atoi(lws_hdr_simple_ptr(wsi, WSI_TOKEN_VERSION));
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2017-12-01 11:09:32 +08:00
|
|
|
lwsl_parser("v%02d hdrs done\n", wsi->rx_frame_type);
|
2013-02-06 15:15:25 +09:00
|
|
|
}
|
2017-11-16 11:26:00 +08:00
|
|
|
ah->parser_state = WSI_PARSING_COMPLETE;
|
2013-02-11 21:43:41 +08:00
|
|
|
wsi->hdr_parsing_completed = 1;
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
return 0;
|
2016-04-07 10:08:35 +08:00
|
|
|
|
|
|
|
forbid:
|
|
|
|
lwsl_notice(" forbidding on uri sanitation\n");
|
|
|
|
lws_return_http_status(wsi, HTTP_STATUS_FORBIDDEN, NULL);
|
2017-10-13 10:33:02 +08:00
|
|
|
|
2016-04-07 10:08:35 +08:00
|
|
|
return -1;
|
2010-11-08 20:20:42 +00:00
|
|
|
}
|
|
|
|
|