2010-11-08 20:20:42 +00:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
2010-11-13 10:03:47 +00:00
|
|
|
*
|
2013-01-18 11:43:21 +08:00
|
|
|
* Copyright (C) 2010-2013 Andy Green <andy@warmcat.com>
|
2010-11-08 20:20:42 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "private-libwebsockets.h"
|
|
|
|
|
2013-01-18 01:55:48 +08:00
|
|
|
unsigned char lextable[] = {
|
improve minilex use external parsing header
Clean up minilex
Move the header output to stdout
Introduce lexfile.h as the header output
Use lexfile.h in both minilex itself and lws
Add the following header support
"Accept:",
"If-Modified-Since:",
"Accept-Encoding:",
"Accept-Language:",
"Pragma:",
"Cache-Control:",
"Authorization:",
"Cookie:",
"Content-Type:",
"Date:",
"Range:",
"Referer:"
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-11-09 10:09:09 +08:00
|
|
|
#include "lextable.h"
|
2013-01-18 01:55:48 +08:00
|
|
|
};
|
|
|
|
|
2014-03-09 11:49:21 +08:00
|
|
|
#define FAIL_CHAR 0x08
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
|
|
|
lextable_decode(int pos, char c)
|
2013-01-18 01:55:48 +08:00
|
|
|
{
|
2016-01-20 07:55:41 +08:00
|
|
|
if (c >= 'A' && c <= 'Z')
|
|
|
|
c += 'a' - 'A';
|
2013-01-18 01:55:48 +08:00
|
|
|
|
2014-03-09 11:49:21 +08:00
|
|
|
while (1) {
|
|
|
|
if (lextable[pos] & (1 << 7)) { /* 1-byte, fail on mismatch */
|
|
|
|
if ((lextable[pos] & 0x7f) != c)
|
|
|
|
return -1;
|
|
|
|
/* fall thru */
|
|
|
|
pos++;
|
|
|
|
if (lextable[pos] == FAIL_CHAR)
|
|
|
|
return -1;
|
|
|
|
return pos;
|
2014-04-02 19:45:42 +08:00
|
|
|
}
|
2014-08-22 19:38:17 +08:00
|
|
|
|
|
|
|
if (lextable[pos] == FAIL_CHAR)
|
|
|
|
return -1;
|
|
|
|
|
2014-04-02 19:45:42 +08:00
|
|
|
/* b7 = 0, end or 3-byte */
|
|
|
|
if (lextable[pos] < FAIL_CHAR) /* terminal marker */
|
|
|
|
return pos;
|
2014-03-09 11:49:21 +08:00
|
|
|
|
2014-04-02 19:45:42 +08:00
|
|
|
if (lextable[pos] == c) /* goto */
|
|
|
|
return pos + (lextable[pos + 1]) +
|
2014-03-09 11:49:21 +08:00
|
|
|
(lextable[pos + 2] << 8);
|
2014-04-02 19:45:42 +08:00
|
|
|
/* fall thru goto */
|
|
|
|
pos += 3;
|
|
|
|
/* continue */
|
2013-01-18 01:55:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ah http1.1 deal with pipelined headers properly
Connections must hold an ah for the whole time they are
processing one header set, even if eg, the headers are
fragmented and it involves network roundtrip times.
However on http1.1 / keepalive, it must drop the ah when
there are no more header sets to deal with, and reacquire
the ah later when more data appears. It's because the
time between header sets / http1.1 requests is unbounded
and the ah would be tied up forever.
But in the case that we got pipelined http1.1 requests,
even partial already buffered, we must keep the ah,
resetting it instead of dropping it. Because we store
the rx data conveniently in a per-tsi buffer since it only
does one thing at a time per thread, we cannot go back to
the event loop to await a new ah inside one service action.
But no problem since we definitely already have an ah,
let's just reuse it at http completion time if more rx is
already buffered.
NB: attack.sh makes request with echo | nc, this
accidentally sends a trailing '\n' from the echo showing
this problem. With this patch attack.sh can complete well.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-01-30 11:43:10 +08:00
|
|
|
void
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lws_header_table_reset(struct lws *wsi)
|
2016-01-26 20:56:56 +08:00
|
|
|
{
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
struct allocated_headers *ah = wsi->u.hdr.ah;
|
|
|
|
|
|
|
|
/* if we have the idea we're resetting 'our' ah, must be bound to one */
|
|
|
|
assert(ah);
|
|
|
|
/* ah also concurs with ownership */
|
|
|
|
assert(ah->wsi == wsi);
|
2016-02-09 09:15:02 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
/* init the ah to reflect no headers or data have appeared yet */
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
memset(ah->frag_index, 0, sizeof(ah->frag_index));
|
|
|
|
ah->nfrag = 0;
|
|
|
|
ah->pos = 0;
|
|
|
|
|
|
|
|
/* and reset the rx state */
|
|
|
|
ah->rxpos = 0;
|
|
|
|
ah->rxlen = 0;
|
|
|
|
|
|
|
|
/* since we will restart the ah, our new headers are not completed */
|
|
|
|
wsi->hdr_parsing_completed = 0;
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 10:57:39 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lws_header_table_attach(struct lws *wsi)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
2015-12-25 12:44:12 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2016-01-26 20:56:56 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_pollargs pa;
|
|
|
|
struct lws **pwsi;
|
2015-12-25 12:44:12 +08:00
|
|
|
int n;
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p (tsi %d)\n", __func__, (void *)wsi,
|
|
|
|
(void *)wsi->u.hdr.ah, wsi->tsi);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
|
|
|
/* if we are already bound to one, just clear it down */
|
2016-01-26 20:56:56 +08:00
|
|
|
if (wsi->u.hdr.ah) {
|
2016-02-25 21:54:31 +08:00
|
|
|
lwsl_info("cleardown\n");
|
2015-12-25 12:44:12 +08:00
|
|
|
goto reset;
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
lws_pt_lock(pt);
|
|
|
|
pwsi = &pt->ah_wait_list;
|
|
|
|
while (*pwsi) {
|
|
|
|
if (*pwsi == wsi) {
|
|
|
|
/* if already waiting on list, if no new ah just ret */
|
|
|
|
if (pt->ah_count_in_use ==
|
|
|
|
context->max_http_header_pool) {
|
|
|
|
lwsl_err("ah wl denied\n");
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
/* new ah.... remove ourselves from waiting list */
|
2016-02-25 21:54:31 +08:00
|
|
|
*pwsi = wsi->u.hdr.ah_wait_list; /* set our prev to our next */
|
|
|
|
wsi->u.hdr.ah_wait_list = NULL; /* no next any more */
|
2016-01-26 20:56:56 +08:00
|
|
|
pt->ah_wait_list_length--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pwsi = &(*pwsi)->u.hdr.ah_wait_list;
|
|
|
|
}
|
2015-12-25 12:44:12 +08:00
|
|
|
/*
|
2016-01-26 20:56:56 +08:00
|
|
|
* pool is all busy... add us to waiting list and return that we
|
|
|
|
* weren't able to deliver it right now
|
2015-12-25 12:44:12 +08:00
|
|
|
*/
|
2016-01-26 20:56:56 +08:00
|
|
|
if (pt->ah_count_in_use == context->max_http_header_pool) {
|
ah http1.1 deal with pipelined headers properly
Connections must hold an ah for the whole time they are
processing one header set, even if eg, the headers are
fragmented and it involves network roundtrip times.
However on http1.1 / keepalive, it must drop the ah when
there are no more header sets to deal with, and reacquire
the ah later when more data appears. It's because the
time between header sets / http1.1 requests is unbounded
and the ah would be tied up forever.
But in the case that we got pipelined http1.1 requests,
even partial already buffered, we must keep the ah,
resetting it instead of dropping it. Because we store
the rx data conveniently in a per-tsi buffer since it only
does one thing at a time per thread, we cannot go back to
the event loop to await a new ah inside one service action.
But no problem since we definitely already have an ah,
let's just reuse it at http completion time if more rx is
already buffered.
NB: attack.sh makes request with echo | nc, this
accidentally sends a trailing '\n' from the echo showing
this problem. With this patch attack.sh can complete well.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-01-30 11:43:10 +08:00
|
|
|
lwsl_info("%s: adding %p to ah waiting list\n", __func__, wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->u.hdr.ah_wait_list = pt->ah_wait_list;
|
|
|
|
pt->ah_wait_list = wsi;
|
|
|
|
pt->ah_wait_list_length++;
|
|
|
|
|
|
|
|
/* we cannot accept input then */
|
|
|
|
|
|
|
|
_lws_change_pollfd(wsi, LWS_POLLIN, 0, &pa);
|
|
|
|
goto bail;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
2015-12-25 12:44:12 +08:00
|
|
|
|
|
|
|
for (n = 0; n < context->max_http_header_pool; n++)
|
2016-01-26 20:56:56 +08:00
|
|
|
if (!pt->ah_pool[n].in_use)
|
2015-12-25 12:44:12 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* if the count of in use said something free... */
|
|
|
|
assert(n != context->max_http_header_pool);
|
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->u.hdr.ah = &pt->ah_pool[n];
|
2015-12-25 12:44:12 +08:00
|
|
|
wsi->u.hdr.ah->in_use = 1;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
pt->ah_pool[n].wsi = wsi; /* mark our owner */
|
2016-01-26 20:56:56 +08:00
|
|
|
pt->ah_count_in_use++;
|
|
|
|
|
|
|
|
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p: count %d (on exit)\n", __func__,
|
|
|
|
(void *)wsi, (void *)wsi->u.hdr.ah, pt->ah_count_in_use);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lws_pt_unlock(pt);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
|
|
|
reset:
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lws_header_table_reset(wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
time(&wsi->u.hdr.ah->assigned);
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-01-26 20:56:56 +08:00
|
|
|
|
|
|
|
bail:
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
|
|
|
return 1;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
int lws_header_table_detach(struct lws *wsi)
|
2014-11-07 11:20:59 +08:00
|
|
|
{
|
2015-12-25 12:44:12 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2016-01-26 20:56:56 +08:00
|
|
|
struct allocated_headers *ah = wsi->u.hdr.ah;
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
struct lws_pollargs pa;
|
|
|
|
struct lws **pwsi;
|
|
|
|
time_t now;
|
|
|
|
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
|
|
|
|
(void *)wsi, (void *)wsi->u.hdr.ah, wsi->tsi,
|
|
|
|
pt->ah_count_in_use);
|
|
|
|
|
|
|
|
/* may not be detached while he still has unprocessed rx */
|
2016-02-15 21:00:39 +08:00
|
|
|
if (ah && ah->rxpos != ah->rxlen) {
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
lwsl_err("%s: %p: rxpos:%d, rxlen:%d\n", __func__, wsi,
|
|
|
|
ah->rxpos, ah->rxlen);
|
|
|
|
assert(ah->rxpos == ah->rxlen);
|
|
|
|
}
|
2016-01-26 20:56:56 +08:00
|
|
|
|
|
|
|
lws_pt_lock(pt);
|
|
|
|
|
|
|
|
pwsi = &pt->ah_wait_list;
|
2016-02-25 21:54:31 +08:00
|
|
|
if (!ah) { /* remove from wait list if none attached */
|
2016-02-25 21:55:06 +08:00
|
|
|
while (*pwsi) {
|
|
|
|
if (*pwsi == wsi) {
|
|
|
|
lwsl_info("%s: wsi %p, remv wait\n",
|
|
|
|
__func__, wsi);
|
|
|
|
*pwsi = wsi->u.hdr.ah_wait_list;
|
|
|
|
wsi->u.hdr.ah_wait_list = NULL;
|
|
|
|
pt->ah_wait_list_length--;
|
|
|
|
goto bail;
|
2016-01-26 20:56:56 +08:00
|
|
|
}
|
2016-02-25 21:55:06 +08:00
|
|
|
pwsi = &(*pwsi)->u.hdr.ah_wait_list;
|
|
|
|
}
|
|
|
|
/* no ah, not on list... no more business here */
|
2016-01-26 20:56:56 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
2016-02-25 21:54:31 +08:00
|
|
|
/* we did have an ah attached */
|
2016-01-26 20:56:56 +08:00
|
|
|
time(&now);
|
2016-02-20 08:04:09 +08:00
|
|
|
if (now - wsi->u.hdr.ah->assigned > 3) {
|
|
|
|
/*
|
|
|
|
* we're detaching the ah, but it was held an
|
|
|
|
* unreasonably long time
|
|
|
|
*/
|
|
|
|
lwsl_notice("%s: wsi %p: ah held %ds, "
|
|
|
|
"ah.rxpos %d, ah.rxlen %d, mode/state %d %d,"
|
|
|
|
"wsi->more_rx_waiting %d\n", __func__, wsi,
|
|
|
|
(int)(now - wsi->u.hdr.ah->assigned),
|
|
|
|
ah->rxpos, ah->rxlen, wsi->mode, wsi->state,
|
2016-02-28 11:02:42 +08:00
|
|
|
wsi->more_rx_waiting);
|
2016-02-20 08:04:09 +08:00
|
|
|
}
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
|
|
|
/* if we think we're detaching one, there should be one in use */
|
2016-01-26 20:56:56 +08:00
|
|
|
assert(pt->ah_count_in_use > 0);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
/* and this specific one should have been in use */
|
2016-01-26 20:56:56 +08:00
|
|
|
assert(wsi->u.hdr.ah->in_use);
|
|
|
|
wsi->u.hdr.ah = NULL;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
ah->wsi = NULL; /* no owner */
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-02-25 21:54:31 +08:00
|
|
|
/* oh there is nobody on the waiting list... leave it at that then */
|
2016-01-26 20:56:56 +08:00
|
|
|
if (!*pwsi) {
|
|
|
|
ah->in_use = 0;
|
|
|
|
pt->ah_count_in_use--;
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-02-25 21:54:31 +08:00
|
|
|
/* somebody else on same tsi is waiting, give it to oldest guy */
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
lwsl_info("pt wait list %p\n", *pwsi);
|
|
|
|
while ((*pwsi)->u.hdr.ah_wait_list)
|
|
|
|
pwsi = &(*pwsi)->u.hdr.ah_wait_list;
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi = *pwsi;
|
|
|
|
lwsl_info("last wsi in wait list %p\n", wsi);
|
|
|
|
|
|
|
|
wsi->u.hdr.ah = ah;
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
ah->wsi = wsi; /* new owner */
|
|
|
|
lws_header_table_reset(wsi);
|
2016-01-26 20:56:56 +08:00
|
|
|
time(&wsi->u.hdr.ah->assigned);
|
|
|
|
|
|
|
|
assert(wsi->position_in_fds_table != -1);
|
|
|
|
|
|
|
|
lwsl_info("%s: Enabling %p POLLIN\n", __func__, wsi);
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
|
|
|
|
/* he has been stuck waiting for an ah, but now his wait is over,
|
|
|
|
* let him progress
|
|
|
|
*/
|
2016-01-26 20:56:56 +08:00
|
|
|
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
|
|
|
|
|
|
|
|
/* point prev guy to next guy in list instead */
|
|
|
|
*pwsi = wsi->u.hdr.ah_wait_list;
|
2016-02-25 21:54:31 +08:00
|
|
|
/* the guy who got one is out of the list */
|
2016-01-26 20:56:56 +08:00
|
|
|
wsi->u.hdr.ah_wait_list = NULL;
|
|
|
|
pt->ah_wait_list_length--;
|
|
|
|
|
|
|
|
assert(!!pt->ah_wait_list_length == !!(int)(long)pt->ah_wait_list);
|
|
|
|
bail:
|
|
|
|
lws_pt_unlock(pt);
|
2015-12-25 12:44:12 +08:00
|
|
|
|
2014-12-04 23:59:35 +01:00
|
|
|
return 0;
|
2015-12-25 12:44:12 +08:00
|
|
|
}
|
2014-11-07 11:20:59 +08:00
|
|
|
|
2015-12-30 14:53:50 +08:00
|
|
|
/**
|
|
|
|
* lws_hdr_fragment_length: report length of a single fragment of a header
|
|
|
|
* The returned length does not include the space for a
|
|
|
|
* terminating '\0'
|
|
|
|
*
|
|
|
|
* @wsi: websocket connection
|
|
|
|
* @h: which header index we are interested in
|
|
|
|
* @frag_idx: which fragment of @h we want to get the length of
|
|
|
|
*/
|
|
|
|
|
2015-12-19 07:35:23 +08:00
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_hdr_fragment_length(struct lws *wsi, enum lws_token_indexes h, int frag_idx)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = wsi->u.hdr.ah->frag_index[h];
|
|
|
|
if (!n)
|
|
|
|
return 0;
|
|
|
|
do {
|
|
|
|
if (!frag_idx)
|
|
|
|
return wsi->u.hdr.ah->frags[n].len;
|
|
|
|
n = wsi->u.hdr.ah->frags[n].nfrag;
|
|
|
|
} while (frag_idx-- && n);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-30 14:53:50 +08:00
|
|
|
/**
|
|
|
|
* lws_hdr_total_length: report length of all fragments of a header totalled up
|
|
|
|
* The returned length does not include the space for a
|
|
|
|
* terminating '\0'
|
|
|
|
*
|
|
|
|
* @wsi: websocket connection
|
|
|
|
* @h: which header index we are interested in
|
|
|
|
*/
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_total_length(struct lws *wsi, enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
int len = 0;
|
|
|
|
|
|
|
|
n = wsi->u.hdr.ah->frag_index[h];
|
2014-04-02 19:45:42 +08:00
|
|
|
if (!n)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return 0;
|
|
|
|
do {
|
|
|
|
len += wsi->u.hdr.ah->frags[n].len;
|
2015-12-15 21:15:58 +08:00
|
|
|
n = wsi->u.hdr.ah->frags[n].nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
} while (n);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2015-12-30 14:53:50 +08:00
|
|
|
/**
|
|
|
|
* lws_hdr_copy_fragment: copy a single fragment of the given header to a buffer
|
|
|
|
* The buffer length @len must include space for an additional
|
|
|
|
* terminating '\0', or it will fail returning -1.
|
|
|
|
* If the requested fragment index is not present, it fails
|
|
|
|
* returning -1.
|
|
|
|
*
|
|
|
|
* @wsi: websocket connection
|
|
|
|
* @dst: destination buffer
|
|
|
|
* @len: length of destination buffer
|
|
|
|
* @h: which header index we are interested in
|
|
|
|
* @frag_index: which fragment of @h we want to copy
|
|
|
|
*/
|
|
|
|
|
2015-12-15 22:59:23 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_copy_fragment(struct lws *wsi, char *dst, int len,
|
|
|
|
enum lws_token_indexes h, int frag_idx)
|
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
int f = wsi->u.hdr.ah->frag_index[h];
|
|
|
|
|
2016-01-20 09:44:04 +08:00
|
|
|
if (!f)
|
|
|
|
return -1;
|
|
|
|
|
2015-12-15 22:59:23 +08:00
|
|
|
while (n < frag_idx) {
|
|
|
|
f = wsi->u.hdr.ah->frags[f].nfrag;
|
|
|
|
if (!f)
|
|
|
|
return -1;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
2015-12-18 16:40:02 +08:00
|
|
|
if (wsi->u.hdr.ah->frags[f].len >= len)
|
2015-12-15 22:59:23 +08:00
|
|
|
return -1;
|
|
|
|
|
2015-12-25 12:44:12 +08:00
|
|
|
memcpy(dst, wsi->u.hdr.ah->data + wsi->u.hdr.ah->frags[f].offset,
|
2015-12-15 22:59:23 +08:00
|
|
|
wsi->u.hdr.ah->frags[f].len);
|
|
|
|
dst[wsi->u.hdr.ah->frags[f].len] = '\0';
|
|
|
|
|
|
|
|
return wsi->u.hdr.ah->frags[f].len;
|
|
|
|
}
|
|
|
|
|
2015-12-30 14:53:50 +08:00
|
|
|
/**
|
|
|
|
* lws_hdr_copy: copy a single fragment of the given header to a buffer
|
|
|
|
* The buffer length @len must include space for an additional
|
|
|
|
* terminating '\0', or it will fail returning -1.
|
|
|
|
*
|
|
|
|
* @wsi: websocket connection
|
|
|
|
* @dst: destination buffer
|
|
|
|
* @len: length of destination buffer
|
|
|
|
* @h: which header index we are interested in
|
|
|
|
*/
|
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
LWS_VISIBLE int lws_hdr_copy(struct lws *wsi, char *dst, int len,
|
2015-12-04 11:30:53 +08:00
|
|
|
enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int toklen = lws_hdr_total_length(wsi, h);
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (toklen >= len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
n = wsi->u.hdr.ah->frag_index[h];
|
2014-04-02 19:45:42 +08:00
|
|
|
if (!n)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
do {
|
2015-12-15 21:15:58 +08:00
|
|
|
strcpy(dst, &wsi->u.hdr.ah->data[wsi->u.hdr.ah->frags[n].offset]);
|
|
|
|
dst += wsi->u.hdr.ah->frags[n].len;
|
|
|
|
n = wsi->u.hdr.ah->frags[n].nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
} while (n);
|
|
|
|
|
|
|
|
return toklen;
|
|
|
|
}
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
char *lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h)
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = wsi->u.hdr.ah->frag_index[h];
|
|
|
|
if (!n)
|
|
|
|
return NULL;
|
|
|
|
|
2015-12-25 12:44:12 +08:00
|
|
|
return wsi->u.hdr.ah->data + wsi->u.hdr.ah->frags[n].offset;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
}
|
2013-01-18 01:55:48 +08:00
|
|
|
|
2016-01-21 10:57:39 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_pos_in_bounds(struct lws *wsi)
|
|
|
|
{
|
|
|
|
if (wsi->u.hdr.ah->pos < wsi->context->max_http_header_data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (wsi->u.hdr.ah->pos == wsi->context->max_http_header_data) {
|
|
|
|
lwsl_err("Ran out of header data space\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* with these tests everywhere, it should never be able to exceed
|
|
|
|
* the limit, only meet the limit
|
|
|
|
*/
|
|
|
|
|
|
|
|
lwsl_err("%s: pos %d, limit %d\n", __func__, wsi->u.hdr.ah->pos,
|
|
|
|
wsi->context->max_http_header_data);
|
|
|
|
assert(0);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_hdr_simple_create(struct lws *wsi, enum lws_token_indexes h, const char *s)
|
2013-02-11 13:04:45 +08:00
|
|
|
{
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->nfrag++;
|
|
|
|
if (wsi->u.hdr.ah->nfrag == ARRAY_SIZE(wsi->u.hdr.ah->frags)) {
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_warn("More hdr frags than we can deal with, dropping\n");
|
2013-02-11 13:04:45 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->frag_index[h] = wsi->u.hdr.ah->nfrag;
|
2013-02-11 13:04:45 +08:00
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].offset = wsi->u.hdr.ah->pos;
|
|
|
|
wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len = 0;
|
|
|
|
wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].nfrag = 0;
|
2013-02-11 13:04:45 +08:00
|
|
|
|
|
|
|
do {
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2013-02-11 13:04:45 +08:00
|
|
|
return -1;
|
2016-01-21 10:57:39 +08:00
|
|
|
|
2013-02-11 13:04:45 +08:00
|
|
|
wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = *s;
|
|
|
|
if (*s)
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len++;
|
2013-02-11 13:04:45 +08:00
|
|
|
} while (*s++);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-30 18:56:52 +08:00
|
|
|
static signed char char_to_hex(const char c)
|
2013-11-10 15:15:21 +08:00
|
|
|
{
|
|
|
|
if (c >= '0' && c <= '9')
|
|
|
|
return c - '0';
|
|
|
|
|
|
|
|
if (c >= 'a' && c <= 'f')
|
|
|
|
return c - 'a' + 10;
|
|
|
|
|
|
|
|
if (c >= 'A' && c <= 'F')
|
|
|
|
return c - 'A' + 10;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
static int LWS_WARN_UNUSED_RESULT
|
|
|
|
issue_char(struct lws *wsi, unsigned char c)
|
2013-11-10 15:15:21 +08:00
|
|
|
{
|
2015-11-29 19:26:01 +08:00
|
|
|
unsigned short frag_len;
|
2015-12-15 21:15:58 +08:00
|
|
|
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2013-11-10 15:15:21 +08:00
|
|
|
return -1;
|
2014-06-29 00:25:19 -04:00
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
frag_len = wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len;
|
|
|
|
/*
|
|
|
|
* If we haven't hit the token limit, just copy the character into
|
|
|
|
* the header
|
|
|
|
*/
|
|
|
|
if (frag_len < wsi->u.hdr.current_token_limit) {
|
2015-11-29 19:24:04 +08:00
|
|
|
wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = c;
|
|
|
|
if (c)
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len++;
|
2015-11-29 19:24:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-12-15 21:15:58 +08:00
|
|
|
|
|
|
|
/* Insert a null character when we *hit* the limit: */
|
|
|
|
if (frag_len == wsi->u.hdr.current_token_limit) {
|
2016-01-21 10:57:39 +08:00
|
|
|
if (lws_pos_in_bounds(wsi))
|
2016-01-20 07:40:13 +08:00
|
|
|
return -1;
|
2015-12-15 21:15:58 +08:00
|
|
|
wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = '\0';
|
2015-12-25 12:44:12 +08:00
|
|
|
lwsl_warn("header %i exceeds limit %d\n",
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
wsi->u.hdr.parser_state,
|
|
|
|
wsi->u.hdr.current_token_limit);
|
2015-12-15 21:15:58 +08:00
|
|
|
}
|
|
|
|
|
2015-11-29 19:24:04 +08:00
|
|
|
return 1;
|
2013-11-10 15:15:21 +08:00
|
|
|
}
|
|
|
|
|
2016-01-20 16:56:06 +08:00
|
|
|
int LWS_WARN_UNUSED_RESULT
|
|
|
|
lws_parse(struct lws *wsi, unsigned char c)
|
2010-11-08 20:20:42 +00:00
|
|
|
{
|
2015-01-10 19:01:52 -08:00
|
|
|
static const unsigned char methods[] = {
|
|
|
|
WSI_TOKEN_GET_URI,
|
|
|
|
WSI_TOKEN_POST_URI,
|
|
|
|
WSI_TOKEN_OPTIONS_URI,
|
|
|
|
WSI_TOKEN_PUT_URI,
|
|
|
|
WSI_TOKEN_PATCH_URI,
|
|
|
|
WSI_TOKEN_DELETE_URI,
|
|
|
|
};
|
2015-12-15 21:15:58 +08:00
|
|
|
struct allocated_headers *ah = wsi->u.hdr.ah;
|
|
|
|
struct lws_context *context = wsi->context;
|
2015-12-14 19:42:26 +08:00
|
|
|
unsigned int n, m, enc = 0;
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2016-01-26 20:56:56 +08:00
|
|
|
assert(wsi->u.hdr.ah);
|
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
switch (wsi->u.hdr.parser_state) {
|
2014-09-30 16:33:56 +08:00
|
|
|
default:
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_parser("WSI_TOK_(%d) '%c'\n", wsi->u.hdr.parser_state, c);
|
2010-11-08 20:20:42 +00:00
|
|
|
|
|
|
|
/* collect into malloc'd buffers */
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
/* optional initial space swallow */
|
2016-01-21 10:57:39 +08:00
|
|
|
if (!ah->frags[ah->frag_index[wsi->u.hdr.parser_state]].len &&
|
|
|
|
c == ' ')
|
2010-11-08 20:20:42 +00:00
|
|
|
break;
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2015-01-10 19:01:52 -08:00
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
|
|
|
if (wsi->u.hdr.parser_state == methods[m])
|
|
|
|
break;
|
|
|
|
if (m == ARRAY_SIZE(methods))
|
|
|
|
/* it was not any of the methods */
|
2013-11-10 15:15:21 +08:00
|
|
|
goto check_eol;
|
|
|
|
|
|
|
|
/* special URI processing... end at space */
|
|
|
|
|
|
|
|
if (c == ' ') {
|
|
|
|
/* enforce starting with / */
|
2015-12-15 21:15:58 +08:00
|
|
|
if (!ah->frags[ah->nfrag].len)
|
2014-06-29 01:34:24 -04:00
|
|
|
if (issue_char(wsi, '/') < 0)
|
2013-11-10 15:15:21 +08:00
|
|
|
return -1;
|
HTTP Version, Keep-alive support, No-copy POST
This is a squashed commit from https://github.com/andrew-canaday/libwebsockets,
dev/http_keepalive branch (strategies changed a few times, so the commit
history is clutteread). This branch is submitted for clarity, but the other
can be used as a reference or alternative.
* added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests
* added **enum http_connection_type** to track keep-alive vs close
* replaced content_length_seen and body_index with **content_remain**
* removed **post_buffer** (see handshake.c modifications)
* removed post_buffer free
* switch state to WSI_TOKEN_SKIPPING after URI is complete to store version
* delete *spill* label (unused)
* add vars to track HTTP version and connection type
* HTTP version defaults to 1.0
* connection type defaults to 'close' for 1.0, keep-alive for 1.1
* additional checks in **cleanup:** label:
* if HTTP version string is present and valid, set enum val appropriately
* override connection default with the "Connection:" header, if present
* set state to WSI_STATE_HTTP_BODY if content_length > 0
* return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise
* add vars to track remaining content_length and body chunk size
* re-arrange switch case order to facilitate creation of jump-table
* added new labels:
* **read_ok**: normal location reach on break from switch; just return 0
* **http_complete**: check for keep-alive + init state, mode, hdr table
* **http_new**: jump location for keep-alive when http_complete sees len>0
* after libwebsocket_parse, jump to one of those labels based on state
* POST body handling:
* don't bother iterating over input byte-by-byte or using memcpy
* just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
|
|
|
|
2016-04-07 18:57:28 +08:00
|
|
|
if (wsi->u.hdr.ups == URIPS_SEEN_SLASH_DOT_DOT) {
|
|
|
|
/*
|
|
|
|
* back up one dir level if possible
|
|
|
|
* safe against header fragmentation because
|
|
|
|
* the method URI can only be in 1 fragment
|
|
|
|
*/
|
|
|
|
if (ah->frags[ah->nfrag].len > 2) {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
do {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
} while (ah->frags[ah->nfrag].len > 1 &&
|
|
|
|
ah->data[ah->pos] != '/');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
HTTP Version, Keep-alive support, No-copy POST
This is a squashed commit from https://github.com/andrew-canaday/libwebsockets,
dev/http_keepalive branch (strategies changed a few times, so the commit
history is clutteread). This branch is submitted for clarity, but the other
can be used as a reference or alternative.
* added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests
* added **enum http_connection_type** to track keep-alive vs close
* replaced content_length_seen and body_index with **content_remain**
* removed **post_buffer** (see handshake.c modifications)
* removed post_buffer free
* switch state to WSI_TOKEN_SKIPPING after URI is complete to store version
* delete *spill* label (unused)
* add vars to track HTTP version and connection type
* HTTP version defaults to 1.0
* connection type defaults to 'close' for 1.0, keep-alive for 1.1
* additional checks in **cleanup:** label:
* if HTTP version string is present and valid, set enum val appropriately
* override connection default with the "Connection:" header, if present
* set state to WSI_STATE_HTTP_BODY if content_length > 0
* return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise
* add vars to track remaining content_length and body chunk size
* re-arrange switch case order to facilitate creation of jump-table
* added new labels:
* **read_ok**: normal location reach on break from switch; just return 0
* **http_complete**: check for keep-alive + init state, mode, hdr table
* **http_new**: jump location for keep-alive when http_complete sees len>0
* after libwebsocket_parse, jump to one of those labels based on state
* POST body handling:
* don't bother iterating over input byte-by-byte or using memcpy
* just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
|
|
|
/* begin parsing HTTP version: */
|
|
|
|
if (issue_char(wsi, '\0') < 0)
|
|
|
|
return -1;
|
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_HTTP;
|
|
|
|
goto start_fragment;
|
2010-11-08 20:20:42 +00:00
|
|
|
}
|
|
|
|
|
2016-04-07 18:57:28 +08:00
|
|
|
/*
|
|
|
|
* PRIORITY 1
|
|
|
|
* special URI processing... convert %xx
|
|
|
|
*/
|
2013-11-10 15:15:21 +08:00
|
|
|
|
|
|
|
switch (wsi->u.hdr.ues) {
|
|
|
|
case URIES_IDLE:
|
|
|
|
if (c == '%') {
|
|
|
|
wsi->u.hdr.ues = URIES_SEEN_PERCENT;
|
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case URIES_SEEN_PERCENT:
|
2016-04-07 18:57:28 +08:00
|
|
|
if (char_to_hex(c) < 0)
|
|
|
|
/* illegal post-% char */
|
|
|
|
goto forbid;
|
|
|
|
|
2013-11-10 15:15:21 +08:00
|
|
|
wsi->u.hdr.esc_stash = c;
|
|
|
|
wsi->u.hdr.ues = URIES_SEEN_PERCENT_H1;
|
2013-11-11 06:14:52 +08:00
|
|
|
goto swallow;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2013-11-10 15:15:21 +08:00
|
|
|
case URIES_SEEN_PERCENT_H1:
|
2016-04-07 18:57:28 +08:00
|
|
|
if (char_to_hex(c) < 0)
|
|
|
|
/* illegal post-% char */
|
|
|
|
goto forbid;
|
|
|
|
|
2013-11-10 15:15:21 +08:00
|
|
|
c = (char_to_hex(wsi->u.hdr.esc_stash) << 4) |
|
|
|
|
char_to_hex(c);
|
2015-12-14 19:42:26 +08:00
|
|
|
enc = 1;
|
2013-11-11 06:14:52 +08:00
|
|
|
wsi->u.hdr.ues = URIES_IDLE;
|
2013-11-10 15:15:21 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-04-07 18:57:28 +08:00
|
|
|
* PRIORITY 2
|
2015-12-14 08:52:03 +08:00
|
|
|
* special URI processing...
|
2013-11-11 06:14:52 +08:00
|
|
|
* convert /.. or /... or /../ etc to /
|
|
|
|
* convert /./ to /
|
2013-11-10 15:15:21 +08:00
|
|
|
* convert // or /// etc to /
|
|
|
|
* leave /.dir or whatever alone
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (wsi->u.hdr.ups) {
|
|
|
|
case URIPS_IDLE:
|
2016-01-20 07:40:13 +08:00
|
|
|
if (!c)
|
|
|
|
return -1;
|
2015-12-15 22:57:19 +08:00
|
|
|
/* genuine delimiter */
|
2015-12-18 15:20:09 +08:00
|
|
|
if ((c == '&' || c == ';') && !enc) {
|
2016-01-20 07:40:13 +08:00
|
|
|
if (issue_char(wsi, c) < 0)
|
|
|
|
return -1;
|
2015-12-15 22:57:19 +08:00
|
|
|
/* swallow the terminator */
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
/* link to next fragment */
|
|
|
|
ah->frags[ah->nfrag].nfrag = ah->nfrag + 1;
|
|
|
|
ah->nfrag++;
|
|
|
|
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
|
|
|
|
goto excessive;
|
|
|
|
/* start next fragment after the & */
|
2015-12-18 15:40:03 +08:00
|
|
|
wsi->u.hdr.post_literal_equal = 0;
|
2015-12-15 22:57:19 +08:00
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
|
|
|
goto swallow;
|
|
|
|
}
|
2015-12-18 15:40:03 +08:00
|
|
|
/* uriencoded = in the name part, disallow */
|
2016-05-07 08:33:07 +08:00
|
|
|
if (c == '=' && enc &&
|
|
|
|
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] &&
|
|
|
|
!wsi->u.hdr.post_literal_equal)
|
2015-12-18 15:40:03 +08:00
|
|
|
c = '_';
|
|
|
|
|
|
|
|
/* after the real =, we don't care how many = */
|
|
|
|
if (c == '=' && !enc)
|
|
|
|
wsi->u.hdr.post_literal_equal = 1;
|
|
|
|
|
2015-12-18 15:23:31 +08:00
|
|
|
/* + to space */
|
|
|
|
if (c == '+' && !enc)
|
|
|
|
c = ' ';
|
2013-11-10 15:15:21 +08:00
|
|
|
/* issue the first / always */
|
2015-12-15 22:57:19 +08:00
|
|
|
if (c == '/' && !ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS])
|
2013-11-10 15:15:21 +08:00
|
|
|
wsi->u.hdr.ups = URIPS_SEEN_SLASH;
|
|
|
|
break;
|
|
|
|
case URIPS_SEEN_SLASH:
|
|
|
|
/* swallow subsequent slashes */
|
|
|
|
if (c == '/')
|
|
|
|
goto swallow;
|
|
|
|
/* track and swallow the first . after / */
|
|
|
|
if (c == '.') {
|
|
|
|
wsi->u.hdr.ups = URIPS_SEEN_SLASH_DOT;
|
|
|
|
goto swallow;
|
2014-08-19 18:34:31 +08:00
|
|
|
}
|
|
|
|
wsi->u.hdr.ups = URIPS_IDLE;
|
2013-11-10 15:15:21 +08:00
|
|
|
break;
|
|
|
|
case URIPS_SEEN_SLASH_DOT:
|
|
|
|
/* swallow second . */
|
|
|
|
if (c == '.') {
|
2016-04-07 18:57:28 +08:00
|
|
|
wsi->u.hdr.ups = URIPS_SEEN_SLASH_DOT_DOT;
|
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
/* change /./ to / */
|
|
|
|
if (c == '/') {
|
|
|
|
wsi->u.hdr.ups = URIPS_SEEN_SLASH;
|
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
/* it was like /.dir ... regurgitate the . */
|
|
|
|
wsi->u.hdr.ups = URIPS_IDLE;
|
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case URIPS_SEEN_SLASH_DOT_DOT:
|
|
|
|
|
|
|
|
/* /../ or /..[End of URI] --> backup to last / */
|
|
|
|
if (c == '/' || c == '?') {
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2013-11-13 06:53:21 +08:00
|
|
|
* back up one dir level if possible
|
|
|
|
* safe against header fragmentation because
|
|
|
|
* the method URI can only be in 1 fragment
|
|
|
|
*/
|
2015-12-15 21:15:58 +08:00
|
|
|
if (ah->frags[ah->nfrag].len > 2) {
|
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
2013-11-13 06:53:21 +08:00
|
|
|
do {
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->pos--;
|
|
|
|
ah->frags[ah->nfrag].len--;
|
|
|
|
} while (ah->frags[ah->nfrag].len > 1 &&
|
|
|
|
ah->data[ah->pos] != '/');
|
2013-11-13 06:53:21 +08:00
|
|
|
}
|
2013-11-11 06:14:52 +08:00
|
|
|
wsi->u.hdr.ups = URIPS_SEEN_SLASH;
|
2016-04-07 18:57:28 +08:00
|
|
|
if (ah->frags[ah->nfrag].len > 1)
|
|
|
|
break;
|
2013-11-11 06:14:52 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
2016-04-07 18:57:28 +08:00
|
|
|
|
|
|
|
/* /..[^/] ... regurgitate and allow */
|
|
|
|
|
2015-12-15 22:57:19 +08:00
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
2016-04-07 18:57:28 +08:00
|
|
|
if (issue_char(wsi, '.') < 0)
|
|
|
|
return -1;
|
|
|
|
wsi->u.hdr.ups = URIPS_IDLE;
|
2013-11-10 15:15:21 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-12-15 22:57:19 +08:00
|
|
|
if (c == '?' && !enc &&
|
|
|
|
!ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS]) { /* start of URI arguments */
|
2016-04-07 18:57:28 +08:00
|
|
|
if (wsi->u.hdr.ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
|
|
|
|
2013-11-13 07:45:17 +08:00
|
|
|
/* seal off uri header */
|
2016-01-20 07:40:13 +08:00
|
|
|
if (issue_char(wsi, '\0') < 0)
|
|
|
|
return -1;
|
2013-11-13 07:45:17 +08:00
|
|
|
|
|
|
|
/* move to using WSI_TOKEN_HTTP_URI_ARGS */
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->nfrag++;
|
2015-12-15 22:57:19 +08:00
|
|
|
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
|
|
|
|
goto excessive;
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
|
|
|
|
2015-12-18 15:40:03 +08:00
|
|
|
wsi->u.hdr.post_literal_equal = 0;
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] = ah->nfrag;
|
2015-12-15 23:05:23 +08:00
|
|
|
wsi->u.hdr.ups = URIPS_IDLE;
|
2013-11-13 07:45:17 +08:00
|
|
|
goto swallow;
|
|
|
|
}
|
|
|
|
|
2014-07-19 06:58:53 +08:00
|
|
|
check_eol:
|
|
|
|
/* bail at EOL */
|
|
|
|
if (wsi->u.hdr.parser_state != WSI_TOKEN_CHALLENGE &&
|
ah owns rxbuf
This is intended to solve a longstanding problem with the
relationship between http/1.1 keep-alive and the service
loop.
Ah now contain an rx buffer which is used during header
processing, and the ah may not be detached from the wsi
until the rx buffer is exhausted.
Having the rx buffer in the ah means we can delay using the
rx until a later service loop.
Ah which have pending rx force POLLIN service on the wsi
they are attached to automatically, so we can interleave
general service / connections with draining each ah rx
buffer.
The possible http/1.1 situations and their dispositions are:
1) exactly one set of http headers come. After processing,
the ah is detached since no pending rx left. If more
headers come later, a fresh ah is aqcuired when available
and the rx flow control blocks the read until then.
2) more that one whole set of headers come and we remain in
http mode (no upgrade). The ah is left attached and
returns to the service loop after the first set of headers.
We will get forced service due to the ah having pending
content (respecting flowcontrol) and process the pending
rx in the ah. If we use it all up, we will detach the
ah.
3) one set of http headers come with ws traffic appended.
We service the headers, do the upgrade, and keep the ah
until the remaining ws content is used. When we
exhausted the ws traffix in the ah rx buffer, we
detach the ah.
Since there can be any amount of http/1.1 pipelining on a
connection, and each may be expensive to service, it's now
enforced there is a return to the service loop after each
header set is serviced on a connection.
When I added the forced service for ah with pending buffering,
I added support for it to the windows plat code. However this
is untested.
Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
|
|
|
c == '\x0d') {
|
2016-04-07 18:57:28 +08:00
|
|
|
if (wsi->u.hdr.ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
|
|
|
|
2014-07-19 06:58:53 +08:00
|
|
|
c = '\0';
|
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
|
|
|
|
lwsl_parser("*\n");
|
|
|
|
}
|
|
|
|
|
2014-08-19 18:34:31 +08:00
|
|
|
n = issue_char(wsi, c);
|
2015-11-02 13:10:33 +08:00
|
|
|
if ((int)n < 0)
|
2014-08-19 18:34:31 +08:00
|
|
|
return -1;
|
|
|
|
if (n > 0)
|
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
|
2013-11-10 15:15:21 +08:00
|
|
|
swallow:
|
2011-01-18 15:39:02 +00:00
|
|
|
/* per-protocol end of headers management */
|
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (wsi->u.hdr.parser_state == WSI_TOKEN_CHALLENGE)
|
|
|
|
goto set_parsing_complete;
|
2011-05-23 10:00:03 +01:00
|
|
|
break;
|
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
/* collecting and checking a name part */
|
|
|
|
case WSI_TOKEN_NAME_PART:
|
2015-04-07 08:19:30 +08:00
|
|
|
lwsl_parser("WSI_TOKEN_NAME_PART '%c' (mode=%d)\n", c, wsi->mode);
|
2010-11-08 20:20:42 +00:00
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
wsi->u.hdr.lextable_pos =
|
|
|
|
lextable_decode(wsi->u.hdr.lextable_pos, c);
|
2015-04-07 08:19:30 +08:00
|
|
|
/*
|
|
|
|
* Server needs to look out for unknown methods...
|
|
|
|
*/
|
|
|
|
if (wsi->u.hdr.lextable_pos < 0 &&
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->mode == LWSCM_HTTP_SERVING) {
|
2013-02-04 09:24:18 +08:00
|
|
|
/* this is not a header we know about */
|
2015-01-10 19:01:52 -08:00
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
2015-12-15 21:15:58 +08:00
|
|
|
if (ah->frag_index[methods[m]]) {
|
2015-01-10 19:01:52 -08:00
|
|
|
/*
|
|
|
|
* already had the method, no idea what
|
2015-04-07 08:19:30 +08:00
|
|
|
* this crap from the client is, ignore
|
2015-01-10 19:01:52 -08:00
|
|
|
*/
|
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
break;
|
|
|
|
}
|
2013-02-12 12:52:39 +08:00
|
|
|
/*
|
2015-04-07 08:19:30 +08:00
|
|
|
* hm it's an unknown http method from a client in fact,
|
2013-02-12 12:52:39 +08:00
|
|
|
* treat as dangerous
|
|
|
|
*/
|
2015-01-30 12:04:43 +00:00
|
|
|
if (m == ARRAY_SIZE(methods)) {
|
|
|
|
lwsl_info("Unknown method - dropping\n");
|
2016-04-07 18:57:28 +08:00
|
|
|
goto forbid;
|
2015-01-30 12:04:43 +00:00
|
|
|
}
|
|
|
|
break;
|
2013-02-04 09:24:18 +08:00
|
|
|
}
|
2015-04-07 08:19:30 +08:00
|
|
|
/*
|
|
|
|
* ...otherwise for a client, let him ignore unknown headers
|
|
|
|
* coming from the server
|
|
|
|
*/
|
|
|
|
if (wsi->u.hdr.lextable_pos < 0) {
|
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-03-09 11:49:21 +08:00
|
|
|
if (lextable[wsi->u.hdr.lextable_pos] < FAIL_CHAR) {
|
2013-02-04 09:24:18 +08:00
|
|
|
/* terminal state */
|
|
|
|
|
2014-11-30 12:36:09 +08:00
|
|
|
n = ((unsigned int)lextable[wsi->u.hdr.lextable_pos] << 8) |
|
|
|
|
lextable[wsi->u.hdr.lextable_pos + 1];
|
2013-01-18 01:55:48 +08:00
|
|
|
|
2013-02-18 10:22:42 +08:00
|
|
|
lwsl_parser("known hdr %d\n", n);
|
2015-01-10 19:01:52 -08:00
|
|
|
for (m = 0; m < ARRAY_SIZE(methods); m++)
|
|
|
|
if (n == methods[m] &&
|
2016-01-26 20:56:56 +08:00
|
|
|
ah->frag_index[methods[m]]) {
|
2015-01-10 19:01:52 -08:00
|
|
|
lwsl_warn("Duplicated method\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2013-02-12 13:10:19 +08:00
|
|
|
|
2012-04-03 17:02:20 +02:00
|
|
|
/*
|
|
|
|
* WSORIGIN is protocol equiv to ORIGIN,
|
|
|
|
* JWebSocket likes to send it, map to ORIGIN
|
|
|
|
*/
|
|
|
|
if (n == WSI_TOKEN_SWORIGIN)
|
|
|
|
n = WSI_TOKEN_ORIGIN;
|
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
wsi->u.hdr.parser_state = (enum lws_token_indexes)
|
|
|
|
(WSI_TOKEN_GET_URI + n);
|
2014-06-29 01:34:24 -04:00
|
|
|
|
2014-08-22 19:38:17 +08:00
|
|
|
if (context->token_limits)
|
|
|
|
wsi->u.hdr.current_token_limit =
|
2015-12-15 21:15:58 +08:00
|
|
|
context->token_limits->token_limit[
|
|
|
|
wsi->u.hdr.parser_state];
|
2014-08-22 19:38:17 +08:00
|
|
|
else
|
2015-12-25 12:44:12 +08:00
|
|
|
wsi->u.hdr.current_token_limit =
|
|
|
|
wsi->context->max_http_header_data;
|
2014-06-29 01:34:24 -04:00
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (wsi->u.hdr.parser_state == WSI_TOKEN_CHALLENGE)
|
|
|
|
goto set_parsing_complete;
|
2011-01-23 16:50:33 +00:00
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
goto start_fragment;
|
|
|
|
}
|
|
|
|
break;
|
2012-04-05 10:31:48 +08:00
|
|
|
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
start_fragment:
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->nfrag++;
|
2015-12-15 22:57:19 +08:00
|
|
|
excessive:
|
2015-12-15 21:15:58 +08:00
|
|
|
if (ah->nfrag == ARRAY_SIZE(ah->frags)) {
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_warn("More hdr frags than we can deal with\n");
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
return -1;
|
2010-11-08 20:20:42 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->frags[ah->nfrag].offset = ah->pos;
|
|
|
|
ah->frags[ah->nfrag].len = 0;
|
2016-01-21 10:57:39 +08:00
|
|
|
ah->frags[ah->nfrag].nfrag = 0;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
2015-12-15 21:15:58 +08:00
|
|
|
n = ah->frag_index[wsi->u.hdr.parser_state];
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (!n) { /* first fragment */
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->frag_index[wsi->u.hdr.parser_state] = ah->nfrag;
|
improve minilex use external parsing header
Clean up minilex
Move the header output to stdout
Introduce lexfile.h as the header output
Use lexfile.h in both minilex itself and lws
Add the following header support
"Accept:",
"If-Modified-Since:",
"Accept-Encoding:",
"Accept-Language:",
"Pragma:",
"Cache-Control:",
"Authorization:",
"Cookie:",
"Content-Type:",
"Date:",
"Range:",
"Referer:"
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-11-09 10:09:09 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* continuation */
|
2015-12-15 21:15:58 +08:00
|
|
|
while (ah->frags[n].nfrag)
|
2016-01-21 10:57:39 +08:00
|
|
|
n = ah->frags[n].nfrag;
|
2015-12-15 21:15:58 +08:00
|
|
|
ah->frags[n].nfrag = ah->nfrag;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
|
2016-01-20 07:40:13 +08:00
|
|
|
if (issue_char(wsi, ' ') < 0)
|
improve minilex use external parsing header
Clean up minilex
Move the header output to stdout
Introduce lexfile.h as the header output
Use lexfile.h in both minilex itself and lws
Add the following header support
"Accept:",
"If-Modified-Since:",
"Accept-Encoding:",
"Accept-Language:",
"Pragma:",
"Cache-Control:",
"Authorization:",
"Cookie:",
"Content-Type:",
"Date:",
"Range:",
"Referer:"
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-11-09 10:09:09 +08:00
|
|
|
return -1;
|
2013-02-04 09:24:18 +08:00
|
|
|
break;
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
/* skipping arg part of a name we didn't recognize */
|
|
|
|
case WSI_TOKEN_SKIPPING:
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("WSI_TOKEN_SKIPPING '%c'\n", c);
|
2013-11-19 13:38:16 +01:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
if (c == '\x0d')
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
|
2010-11-08 20:20:42 +00:00
|
|
|
break;
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
case WSI_TOKEN_SKIPPING_SAW_CR:
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("WSI_TOKEN_SKIPPING_SAW_CR '%c'\n", c);
|
2016-04-07 18:57:28 +08:00
|
|
|
if (wsi->u.hdr.ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
2013-01-18 01:55:48 +08:00
|
|
|
if (c == '\x0a') {
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_NAME_PART;
|
|
|
|
wsi->u.hdr.lextable_pos = 0;
|
2013-01-18 01:55:48 +08:00
|
|
|
} else
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
|
2010-11-08 20:20:42 +00:00
|
|
|
break;
|
|
|
|
/* we're done, ignore anything else */
|
2013-11-19 13:38:16 +01:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
case WSI_PARSING_COMPLETE:
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("WSI_PARSING_COMPLETE '%c'\n", c);
|
2010-11-08 20:20:42 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2013-02-04 09:09:19 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
set_parsing_complete:
|
2016-04-07 18:57:28 +08:00
|
|
|
if (wsi->u.hdr.ues != URIES_IDLE)
|
|
|
|
goto forbid;
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
if (lws_hdr_total_length(wsi, WSI_TOKEN_UPGRADE)) {
|
2013-02-11 17:13:32 +08:00
|
|
|
if (lws_hdr_total_length(wsi, WSI_TOKEN_VERSION))
|
2013-02-06 15:15:25 +09:00
|
|
|
wsi->ietf_spec_revision =
|
replace per header mallocs with single malloc 3 level struct
This big patch replaces the malloc / realloc per header
approach used until now with a single three-level struct
that gets malloc'd during the header union phase and freed
in one go when we transition to a different union phase.
It's more expensive in that we malloc a bit more than 4Kbytes,
but it's a lot cheaper in terms of malloc, frees, heap fragmentation,
no reallocs, nothing to configure. It also moves from arrays of
pointers (8 bytes on x86_64) to unsigned short offsets into the
data array, (2 bytes on all platforms).
The 3-level thing is all in one struct
- array indexed by the header enum, pointing to first "fragment" index
(ie, header type to fragment lookup, or 0 for none)
- array of fragments indexes, enough for 2 x the number of known headers
(fragment array... note that fragments can point to a "next"
fragment if the same header is spread across multiple entries)
- linear char array where the known header payload gets written
(fragments point into null-terminated strings stored in here,
only the known header content is stored)
http headers can legally be split over multiple headers of the same
name which should be concatenated. This scheme does not linearly
conatenate them but uses a linked list in the fragment structs to
link them. There are apis to get the total length and copy out a
linear, concatenated version to a buffer.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-02-10 18:02:31 +08:00
|
|
|
atoi(lws_hdr_simple_ptr(wsi, WSI_TOKEN_VERSION));
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_parser("v%02d hdrs completed\n", wsi->ietf_spec_revision);
|
2013-02-06 15:15:25 +09:00
|
|
|
}
|
2013-02-04 09:09:19 +08:00
|
|
|
wsi->u.hdr.parser_state = WSI_PARSING_COMPLETE;
|
2013-02-11 21:43:41 +08:00
|
|
|
wsi->hdr_parsing_completed = 1;
|
2013-02-04 09:09:19 +08:00
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
return 0;
|
2016-04-07 18:57:28 +08:00
|
|
|
|
|
|
|
forbid:
|
|
|
|
lwsl_notice(" forbidding on uri sanitation\n");
|
|
|
|
lws_return_http_status(wsi, HTTP_STATUS_FORBIDDEN, NULL);
|
|
|
|
return -1;
|
2010-11-08 20:20:42 +00:00
|
|
|
}
|
|
|
|
|
2011-02-09 08:49:14 +00:00
|
|
|
|
2013-01-18 09:49:20 +08:00
|
|
|
/**
|
|
|
|
* lws_frame_is_binary: true if the current frame was sent in binary mode
|
|
|
|
*
|
|
|
|
* @wsi: the connection we are inquiring about
|
|
|
|
*
|
|
|
|
* This is intended to be called from the LWS_CALLBACK_RECEIVE callback if
|
|
|
|
* it's interested to see if the frame it's dealing with was sent in binary
|
|
|
|
* mode.
|
|
|
|
*/
|
|
|
|
|
2015-12-04 11:08:32 +08:00
|
|
|
LWS_VISIBLE int lws_frame_is_binary(struct lws *wsi)
|
2013-01-18 09:49:20 +08:00
|
|
|
{
|
2013-01-21 11:04:23 +08:00
|
|
|
return wsi->u.ws.frame_is_binary;
|
2013-01-18 09:49:20 +08:00
|
|
|
}
|
2011-02-09 08:49:14 +00:00
|
|
|
|
2011-05-23 10:00:03 +01:00
|
|
|
int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_rx_sm(struct lws *wsi, unsigned char c)
|
2010-11-08 20:20:42 +00:00
|
|
|
{
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
2015-10-05 11:35:52 +01:00
|
|
|
int callback_action = LWS_CALLBACK_RECEIVE;
|
2016-01-29 21:18:54 +08:00
|
|
|
int ret = 0, n, rx_draining_ext = 0;
|
|
|
|
struct lws_tokens eff_buf;
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->socket_is_permanently_unusable)
|
|
|
|
return -1;
|
2010-11-08 20:20:42 +00:00
|
|
|
|
|
|
|
switch (wsi->lws_rx_parse_state) {
|
|
|
|
case LWS_RXPS_NEW:
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->u.ws.rx_draining_ext) {
|
2016-01-19 03:34:24 +08:00
|
|
|
struct lws **w = &pt->rx_draining_ext_list;
|
2016-01-11 11:34:01 +08:00
|
|
|
|
|
|
|
eff_buf.token = NULL;
|
|
|
|
eff_buf.token_len = 0;
|
|
|
|
wsi->u.ws.rx_draining_ext = 0;
|
|
|
|
/* remove us from context draining ext list */
|
|
|
|
while (*w) {
|
|
|
|
if (*w == wsi) {
|
|
|
|
*w = wsi->u.ws.rx_draining_ext_list;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
w = &((*w)->u.ws.rx_draining_ext_list);
|
|
|
|
}
|
|
|
|
wsi->u.ws.rx_draining_ext_list = NULL;
|
|
|
|
rx_draining_ext = 1;
|
|
|
|
lwsl_err("%s: doing draining flow\n", __func__);
|
2010-11-13 10:03:47 +00:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
goto drain_extension;
|
|
|
|
}
|
2010-11-08 20:20:42 +00:00
|
|
|
switch (wsi->ietf_spec_revision) {
|
2011-09-25 09:32:54 +01:00
|
|
|
case 13:
|
2011-04-24 05:46:23 +01:00
|
|
|
/*
|
|
|
|
* no prepended frame key any more
|
|
|
|
*/
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 1;
|
2011-04-24 05:46:23 +01:00
|
|
|
goto handle_first;
|
|
|
|
|
2011-02-09 08:49:14 +00:00
|
|
|
default:
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_warn("lws_rx_sm: unknown spec version %d\n",
|
|
|
|
wsi->ietf_spec_revision);
|
2011-02-09 08:49:14 +00:00
|
|
|
break;
|
2010-11-08 20:20:42 +00:00
|
|
|
}
|
2010-11-11 09:22:22 +00:00
|
|
|
break;
|
2016-01-11 11:34:01 +08:00
|
|
|
case LWS_RXPS_04_mask_1:
|
|
|
|
wsi->u.ws.mask[1] = c;
|
2011-02-10 09:22:35 +00:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_mask_2;
|
2011-01-18 18:14:26 +00:00
|
|
|
break;
|
2016-01-11 11:34:01 +08:00
|
|
|
case LWS_RXPS_04_mask_2:
|
|
|
|
wsi->u.ws.mask[2] = c;
|
2011-02-10 09:22:35 +00:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_mask_3;
|
2011-01-18 18:14:26 +00:00
|
|
|
break;
|
2016-01-11 11:34:01 +08:00
|
|
|
case LWS_RXPS_04_mask_3:
|
|
|
|
wsi->u.ws.mask[3] = c;
|
2011-02-10 09:22:35 +00:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2011-01-18 18:14:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* start from the zero'th byte in the XOR key buffer since
|
|
|
|
* this is the start of a frame with a new key
|
|
|
|
*/
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask_idx = 0;
|
2011-01-23 16:50:33 +00:00
|
|
|
|
2011-01-18 18:14:26 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_1;
|
|
|
|
break;
|
2011-01-19 12:20:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 04 logical framing from the spec (all this is masked when incoming
|
|
|
|
* and has to be unmasked)
|
|
|
|
*
|
|
|
|
* We ignore the possibility of extension data because we don't
|
|
|
|
* negotiate any extensions at the moment.
|
2011-01-23 16:50:33 +00:00
|
|
|
*
|
2011-01-19 12:20:27 +00:00
|
|
|
* 0 1 2 3
|
|
|
|
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
|
|
|
* +-+-+-+-+-------+-+-------------+-------------------------------+
|
|
|
|
* |F|R|R|R| opcode|R| Payload len | Extended payload length |
|
|
|
|
* |I|S|S|S| (4) |S| (7) | (16/63) |
|
|
|
|
* |N|V|V|V| |V| | (if payload len==126/127) |
|
|
|
|
* | |1|2|3| |4| | |
|
|
|
|
* +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
|
|
|
|
* | Extended payload length continued, if payload len == 127 |
|
|
|
|
* + - - - - - - - - - - - - - - - +-------------------------------+
|
|
|
|
* | | Extension data |
|
|
|
|
* +-------------------------------+ - - - - - - - - - - - - - - - +
|
|
|
|
* : :
|
|
|
|
* +---------------------------------------------------------------+
|
|
|
|
* : Application data :
|
|
|
|
* +---------------------------------------------------------------+
|
|
|
|
*
|
|
|
|
* We pass payload through to userland as soon as we get it, ignoring
|
|
|
|
* FIN. It's up to userland to buffer it up if it wants to see a
|
|
|
|
* whole unfragmented block of the original size (which may be up to
|
|
|
|
* 2^63 long!)
|
|
|
|
*/
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_1:
|
2011-04-24 05:46:23 +01:00
|
|
|
handle_first:
|
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.opcode = c & 0xf;
|
|
|
|
wsi->u.ws.rsv = c & 0x70;
|
|
|
|
wsi->u.ws.final = !!((c >> 7) & 1);
|
2013-02-28 17:11:29 +08:00
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
switch (wsi->u.ws.opcode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSWSOPC_TEXT_FRAME:
|
|
|
|
case LWSWSOPC_BINARY_FRAME:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rsv_first_msg = (c & 0x70);
|
2013-02-11 17:13:32 +08:00
|
|
|
wsi->u.ws.frame_is_binary =
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->u.ws.opcode == LWSWSOPC_BINARY_FRAME;
|
2013-01-19 10:39:35 +08:00
|
|
|
break;
|
2016-01-11 11:34:01 +08:00
|
|
|
case 3:
|
|
|
|
case 4:
|
|
|
|
case 5:
|
|
|
|
case 6:
|
|
|
|
case 7:
|
|
|
|
case 0xb:
|
|
|
|
case 0xc:
|
|
|
|
case 0xd:
|
|
|
|
case 0xe:
|
|
|
|
case 0xf:
|
|
|
|
lwsl_info("illegal opcode\n");
|
|
|
|
return -1;
|
2013-01-19 10:39:35 +08:00
|
|
|
}
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN:
|
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.this_frame_masked = !!(c & 0x80);
|
2011-04-24 05:46:23 +01:00
|
|
|
|
2011-09-25 10:46:31 +01:00
|
|
|
switch (c & 0x7f) {
|
2011-01-19 12:20:27 +00:00
|
|
|
case 126:
|
|
|
|
/* control frames are not allowed to have big lengths */
|
2013-01-21 11:04:23 +08:00
|
|
|
if (wsi->u.ws.opcode & 8)
|
2011-04-24 06:19:22 +01:00
|
|
|
goto illegal_ctl_length;
|
|
|
|
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_2;
|
|
|
|
break;
|
|
|
|
case 127:
|
|
|
|
/* control frames are not allowed to have big lengths */
|
2013-01-21 11:04:23 +08:00
|
|
|
if (wsi->u.ws.opcode & 8)
|
2011-04-24 06:19:22 +01:00
|
|
|
goto illegal_ctl_length;
|
|
|
|
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_8;
|
|
|
|
break;
|
|
|
|
default:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length = c & 0x7f;
|
|
|
|
if (wsi->u.ws.this_frame_masked)
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_07_COLLECT_FRAME_KEY_1;
|
|
|
|
else
|
2014-02-15 16:00:37 +08:00
|
|
|
if (wsi->u.ws.rx_packet_length)
|
|
|
|
wsi->lws_rx_parse_state =
|
2011-01-19 12:20:27 +00:00
|
|
|
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
|
2014-02-15 16:00:37 +08:00
|
|
|
else {
|
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_NEW;
|
|
|
|
goto spill;
|
|
|
|
}
|
2011-01-19 12:20:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN16_2:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length = c << 8;
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN16_1:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= c;
|
|
|
|
if (wsi->u.ws.this_frame_masked)
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_07_COLLECT_FRAME_KEY_1;
|
|
|
|
else
|
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
|
2011-01-19 12:20:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_8:
|
|
|
|
if (c & 0x80) {
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_warn("b63 of length must be zero\n");
|
2011-01-19 12:20:27 +00:00
|
|
|
/* kill the connection */
|
|
|
|
return -1;
|
|
|
|
}
|
2011-01-27 06:45:53 +00:00
|
|
|
#if defined __LP64__
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length = ((size_t)c) << 56;
|
2011-01-27 06:45:53 +00:00
|
|
|
#else
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length = 0;
|
2011-01-27 06:45:53 +00:00
|
|
|
#endif
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_7:
|
2011-01-27 06:45:53 +00:00
|
|
|
#if defined __LP64__
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 48;
|
2011-01-27 06:45:53 +00:00
|
|
|
#endif
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_6;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_6:
|
2011-01-27 06:45:53 +00:00
|
|
|
#if defined __LP64__
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 40;
|
2011-01-27 06:45:53 +00:00
|
|
|
#endif
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_5;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_5:
|
2011-01-27 06:45:53 +00:00
|
|
|
#if defined __LP64__
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 32;
|
2011-01-27 06:45:53 +00:00
|
|
|
#endif
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_4;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_4:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 24;
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_3;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_3:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 16;
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_2:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c) << 8;
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_04_FRAME_HDR_LEN64_1:
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.rx_packet_length |= ((size_t)c);
|
|
|
|
if (wsi->u.ws.this_frame_masked)
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_07_COLLECT_FRAME_KEY_1;
|
|
|
|
else
|
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
|
2011-01-19 12:20:27 +00:00
|
|
|
break;
|
|
|
|
|
2011-04-24 05:46:23 +01:00
|
|
|
case LWS_RXPS_07_COLLECT_FRAME_KEY_1:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask[0] = c;
|
2011-04-24 05:46:23 +01:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_07_COLLECT_FRAME_KEY_2:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask[1] = c;
|
2011-04-24 05:46:23 +01:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_3;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_07_COLLECT_FRAME_KEY_3:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask[2] = c;
|
2011-04-24 05:46:23 +01:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_4;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_RXPS_07_COLLECT_FRAME_KEY_4:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask[3] = c;
|
2011-04-24 05:46:23 +01:00
|
|
|
if (c)
|
2013-01-21 11:04:23 +08:00
|
|
|
wsi->u.ws.all_zero_nonce = 0;
|
2011-04-24 05:46:23 +01:00
|
|
|
wsi->lws_rx_parse_state =
|
|
|
|
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.mask_idx = 0;
|
2014-02-15 16:00:37 +08:00
|
|
|
if (wsi->u.ws.rx_packet_length == 0) {
|
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_NEW;
|
2013-02-28 17:11:29 +08:00
|
|
|
goto spill;
|
2014-02-15 16:00:37 +08:00
|
|
|
}
|
2011-04-24 05:46:23 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
2010-11-08 20:20:42 +00:00
|
|
|
case LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED:
|
2016-01-11 11:34:01 +08:00
|
|
|
assert(wsi->u.ws.rx_ubuf);
|
2013-02-06 21:10:16 +09:00
|
|
|
|
2016-01-19 03:34:24 +08:00
|
|
|
if (wsi->u.ws.rx_ubuf_head + LWS_PRE >=
|
|
|
|
wsi->u.ws.rx_ubuf_alloc) {
|
|
|
|
lwsl_err("Attempted overflow \n");
|
|
|
|
return -1;
|
|
|
|
}
|
2013-01-21 11:04:23 +08:00
|
|
|
if (wsi->u.ws.all_zero_nonce)
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf[LWS_PRE +
|
2016-01-19 03:34:24 +08:00
|
|
|
(wsi->u.ws.rx_ubuf_head++)] = c;
|
2011-02-10 09:22:35 +00:00
|
|
|
else
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf[LWS_PRE +
|
|
|
|
(wsi->u.ws.rx_ubuf_head++)] =
|
|
|
|
c ^ wsi->u.ws.mask[
|
|
|
|
(wsi->u.ws.mask_idx++) & 3];
|
2011-01-22 12:51:57 +00:00
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
if (--wsi->u.ws.rx_packet_length == 0) {
|
2013-02-14 11:25:44 +08:00
|
|
|
/* spill because we have the whole frame */
|
2011-01-19 12:20:27 +00:00
|
|
|
wsi->lws_rx_parse_state = LWS_RXPS_NEW;
|
|
|
|
goto spill;
|
|
|
|
}
|
2013-02-14 11:25:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if there's no protocol max frame size given, we are
|
|
|
|
* supposed to default to LWS_MAX_SOCKET_IO_BUF
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!wsi->protocol->rx_buffer_size &&
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head !=
|
2013-02-14 11:25:44 +08:00
|
|
|
LWS_MAX_SOCKET_IO_BUF)
|
2011-01-19 12:20:27 +00:00
|
|
|
break;
|
2013-02-14 11:25:44 +08:00
|
|
|
else
|
|
|
|
if (wsi->protocol->rx_buffer_size &&
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head !=
|
2013-02-14 11:25:44 +08:00
|
|
|
wsi->protocol->rx_buffer_size)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* spill because we filled our rx buffer */
|
2011-01-19 12:20:27 +00:00
|
|
|
spill:
|
|
|
|
/*
|
|
|
|
* is this frame a control packet we should take care of at this
|
|
|
|
* layer? If so service it and hide it from the user callback
|
|
|
|
*/
|
|
|
|
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("spill on %s\n", wsi->protocol->name);
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2013-01-21 11:04:23 +08:00
|
|
|
switch (wsi->u.ws.opcode) {
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSWSOPC_CLOSE:
|
2015-12-26 15:47:06 +08:00
|
|
|
|
2011-03-07 07:08:12 +00:00
|
|
|
/* is this an acknowledgement of our close? */
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_AWAITING_CLOSE_ACK) {
|
2011-03-07 07:08:12 +00:00
|
|
|
/*
|
|
|
|
* fine he has told us he is closing too, let's
|
|
|
|
* finish our close
|
|
|
|
*/
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("seen client close ack\n");
|
2011-03-07 07:08:12 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2015-12-17 17:03:59 +08:00
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
|
2015-04-17 20:29:58 +08:00
|
|
|
/* if he sends us 2 CLOSE, kill him */
|
|
|
|
return -1;
|
|
|
|
|
2015-12-26 15:47:06 +08:00
|
|
|
if (user_callback_handle_rxflow(
|
|
|
|
wsi->protocol->callback, wsi,
|
|
|
|
LWS_CALLBACK_WS_PEER_INITIATED_CLOSE,
|
|
|
|
wsi->user_space,
|
2016-01-19 03:34:24 +08:00
|
|
|
&wsi->u.ws.rx_ubuf[LWS_PRE],
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head))
|
2015-12-26 15:47:06 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-01-10 19:50:35 +08:00
|
|
|
lwsl_parser("server sees client close packet\n");
|
2015-12-17 17:03:59 +08:00
|
|
|
wsi->state = LWSS_RETURNED_CLOSE_ALREADY;
|
2015-04-17 20:29:58 +08:00
|
|
|
/* deal with the close packet contents as a PONG */
|
|
|
|
wsi->u.ws.payload_is_close = 1;
|
|
|
|
goto process_as_ping;
|
2011-01-19 12:20:27 +00:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSWSOPC_PING:
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_info("received %d byte ping, sending pong\n",
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head);
|
2014-08-24 14:39:19 +08:00
|
|
|
|
2015-03-24 21:07:01 +08:00
|
|
|
if (wsi->u.ws.ping_pending_flag) {
|
2015-12-14 08:52:03 +08:00
|
|
|
/*
|
2014-08-24 14:39:19 +08:00
|
|
|
* there is already a pending ping payload
|
|
|
|
* we should just log and drop
|
|
|
|
*/
|
|
|
|
lwsl_parser("DROP PING since one pending\n");
|
|
|
|
goto ping_drop;
|
|
|
|
}
|
2015-04-17 20:29:58 +08:00
|
|
|
process_as_ping:
|
2014-08-24 14:39:19 +08:00
|
|
|
/* control packets can only be < 128 bytes long */
|
2016-01-11 11:34:01 +08:00
|
|
|
if (wsi->u.ws.rx_ubuf_head > 128 - 3) {
|
2014-08-24 14:39:19 +08:00
|
|
|
lwsl_parser("DROP PING payload too large\n");
|
|
|
|
goto ping_drop;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-08-24 14:39:19 +08:00
|
|
|
/* stash the pong payload */
|
2016-01-11 11:34:01 +08:00
|
|
|
memcpy(wsi->u.ws.ping_payload_buf + LWS_PRE,
|
|
|
|
&wsi->u.ws.rx_ubuf[LWS_PRE],
|
|
|
|
wsi->u.ws.rx_ubuf_head);
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.ping_payload_len = wsi->u.ws.rx_ubuf_head;
|
2015-03-24 21:07:01 +08:00
|
|
|
wsi->u.ws.ping_pending_flag = 1;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2014-08-24 14:39:19 +08:00
|
|
|
/* get it sent as soon as possible */
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
2014-08-24 14:39:19 +08:00
|
|
|
ping_drop:
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head = 0;
|
2011-01-27 20:06:03 +00:00
|
|
|
return 0;
|
2011-01-19 12:20:27 +00:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSWSOPC_PONG:
|
2015-10-05 11:35:52 +01:00
|
|
|
lwsl_info("received pong\n");
|
2016-01-11 11:34:01 +08:00
|
|
|
lwsl_hexdump(&wsi->u.ws.rx_ubuf[LWS_PRE],
|
|
|
|
wsi->u.ws.rx_ubuf_head);
|
2015-10-05 11:35:52 +01:00
|
|
|
|
|
|
|
/* issue it */
|
|
|
|
callback_action = LWS_CALLBACK_RECEIVE_PONG;
|
|
|
|
break;
|
2011-01-19 12:20:27 +00:00
|
|
|
|
2015-12-17 17:03:59 +08:00
|
|
|
case LWSWSOPC_TEXT_FRAME:
|
|
|
|
case LWSWSOPC_BINARY_FRAME:
|
|
|
|
case LWSWSOPC_CONTINUATION:
|
2011-01-19 12:20:27 +00:00
|
|
|
break;
|
2011-05-23 10:00:03 +01:00
|
|
|
|
|
|
|
default:
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_parser("passing opc %x up to exts\n",
|
2016-01-19 03:34:24 +08:00
|
|
|
wsi->u.ws.opcode);
|
2011-05-23 10:00:03 +01:00
|
|
|
/*
|
|
|
|
* It's something special we can't understand here.
|
|
|
|
* Pass the payload up to the extension's parsing
|
|
|
|
* state machine.
|
|
|
|
*/
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
eff_buf.token = &wsi->u.ws.rx_ubuf[LWS_PRE];
|
|
|
|
eff_buf.token_len = wsi->u.ws.rx_ubuf_head;
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
if (lws_ext_cb_active(wsi, LWS_EXT_CB_EXTENDED_PAYLOAD_RX,
|
2014-04-02 19:45:42 +08:00
|
|
|
&eff_buf, 0) <= 0) /* not handle or fail */
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_ext("ext opc opcode 0x%x unknown\n",
|
|
|
|
wsi->u.ws.opcode);
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
wsi->u.ws.rx_ubuf_head = 0;
|
2011-05-23 10:00:03 +01:00
|
|
|
return 0;
|
2011-01-19 12:20:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No it's real payload, pass it up to the user callback.
|
|
|
|
* It's nicely buffered with the pre-padding taken care of
|
2015-12-04 08:43:54 +08:00
|
|
|
* so it can be sent straight out again using lws_write
|
2011-01-19 12:20:27 +00:00
|
|
|
*/
|
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
eff_buf.token = &wsi->u.ws.rx_ubuf[LWS_PRE];
|
|
|
|
eff_buf.token_len = wsi->u.ws.rx_ubuf_head;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
drain_extension:
|
|
|
|
lwsl_ext("%s: passing %d to ext\n", __func__, eff_buf.token_len);
|
|
|
|
|
|
|
|
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
|
|
|
|
wsi->state == LWSS_AWAITING_CLOSE_ACK)
|
|
|
|
goto already_done;
|
|
|
|
|
|
|
|
n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &eff_buf, 0);
|
|
|
|
if (n < 0) {
|
|
|
|
/*
|
|
|
|
* we may rely on this to get RX, just drop connection
|
|
|
|
*/
|
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2014-04-02 19:45:42 +08:00
|
|
|
return -1;
|
2016-01-11 11:34:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_draining_ext && eff_buf.token_len == 0)
|
|
|
|
goto already_done;
|
|
|
|
|
|
|
|
if (n && eff_buf.token_len) {
|
|
|
|
/* extension had more... main loop will come back */
|
|
|
|
wsi->u.ws.rx_draining_ext = 1;
|
2016-01-19 03:34:24 +08:00
|
|
|
wsi->u.ws.rx_draining_ext_list = pt->rx_draining_ext_list;
|
|
|
|
pt->rx_draining_ext_list = wsi;
|
2016-01-11 11:34:01 +08:00
|
|
|
}
|
2014-04-02 19:45:42 +08:00
|
|
|
|
2015-12-01 14:44:33 +02:00
|
|
|
if (eff_buf.token_len > 0 ||
|
|
|
|
callback_action == LWS_CALLBACK_RECEIVE_PONG) {
|
2013-02-10 21:21:24 +08:00
|
|
|
eff_buf.token[eff_buf.token_len] = '\0';
|
2013-01-09 18:06:55 +08:00
|
|
|
|
2015-10-05 11:35:52 +01:00
|
|
|
if (wsi->protocol->callback) {
|
|
|
|
|
|
|
|
if (callback_action == LWS_CALLBACK_RECEIVE_PONG)
|
2016-01-11 11:34:01 +08:00
|
|
|
lwsl_info("Doing pong callback\n");
|
2015-10-05 11:35:52 +01:00
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
ret = user_callback_handle_rxflow(
|
|
|
|
wsi->protocol->callback,
|
2015-10-05 11:35:52 +01:00
|
|
|
wsi,
|
2015-12-04 11:08:32 +08:00
|
|
|
(enum lws_callback_reasons)callback_action,
|
2013-02-11 17:13:32 +08:00
|
|
|
wsi->user_space,
|
|
|
|
eff_buf.token,
|
|
|
|
eff_buf.token_len);
|
2015-10-05 11:35:52 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
lwsl_err("No callback on payload spill!\n");
|
2013-01-09 18:06:55 +08:00
|
|
|
}
|
2011-05-23 10:00:03 +01:00
|
|
|
|
2016-01-11 11:34:01 +08:00
|
|
|
already_done:
|
|
|
|
wsi->u.ws.rx_ubuf_head = 0;
|
2011-01-22 12:51:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-02-10 21:21:24 +08:00
|
|
|
return ret;
|
2011-04-24 06:19:22 +01:00
|
|
|
|
|
|
|
illegal_ctl_length:
|
|
|
|
|
2013-02-11 17:13:32 +08:00
|
|
|
lwsl_warn("Control frame with xtended length is illegal\n");
|
2011-04-24 06:19:22 +01:00
|
|
|
/* kill the connection */
|
|
|
|
return -1;
|
2011-01-22 12:51:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-19 12:20:27 +00:00
|
|
|
/**
|
2015-12-04 08:43:54 +08:00
|
|
|
* lws_remaining_packet_payload() - Bytes to come before "overall"
|
2011-01-23 16:50:33 +00:00
|
|
|
* rx packet is complete
|
2011-01-19 12:20:27 +00:00
|
|
|
* @wsi: Websocket instance (available from user callback)
|
|
|
|
*
|
|
|
|
* This function is intended to be called from the callback if the
|
|
|
|
* user code is interested in "complete packets" from the client.
|
|
|
|
* libwebsockets just passes through payload as it comes and issues a buffer
|
|
|
|
* additionally when it hits a built-in limit. The LWS_CALLBACK_RECEIVE
|
|
|
|
* callback handler can use this API to find out if the buffer it has just
|
|
|
|
* been given is the last piece of a "complete packet" from the client --
|
2015-12-04 08:43:54 +08:00
|
|
|
* when that is the case lws_remaining_packet_payload() will return
|
2011-01-19 12:20:27 +00:00
|
|
|
* 0.
|
|
|
|
*
|
|
|
|
* Many protocols won't care becuse their packets are always small.
|
|
|
|
*/
|
|
|
|
|
2013-03-30 09:52:21 +08:00
|
|
|
LWS_VISIBLE size_t
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_remaining_packet_payload(struct lws *wsi)
|
2011-01-19 12:20:27 +00:00
|
|
|
{
|
2013-01-21 11:04:23 +08:00
|
|
|
return wsi->u.ws.rx_packet_length;
|
2011-01-19 12:20:27 +00:00
|
|
|
}
|
2016-03-16 23:56:23 +01:00
|
|
|
|
|
|
|
/* Once we reach LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED, we know how much
|
|
|
|
* to expect in that state and can deal with it in bulk more efficiently.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_payload_until_length_exhausted(struct lws *wsi, unsigned char **buf,
|
|
|
|
size_t *len)
|
|
|
|
{
|
|
|
|
unsigned char *buffer = *buf, mask[4];
|
|
|
|
int buffer_size, avail, n;
|
|
|
|
char *rx_ubuf;
|
|
|
|
|
|
|
|
if (wsi->protocol->rx_buffer_size)
|
|
|
|
buffer_size = wsi->protocol->rx_buffer_size;
|
|
|
|
else
|
|
|
|
buffer_size = LWS_MAX_SOCKET_IO_BUF;
|
|
|
|
avail = buffer_size - wsi->u.ws.rx_ubuf_head;
|
|
|
|
|
|
|
|
/* do not consume more than we should */
|
|
|
|
if (avail > wsi->u.ws.rx_packet_length)
|
|
|
|
avail = wsi->u.ws.rx_packet_length;
|
|
|
|
|
|
|
|
/* do not consume more than what is in the buffer */
|
|
|
|
if (avail > *len)
|
|
|
|
avail = *len;
|
|
|
|
|
|
|
|
/* we want to leave 1 byte for the parser to handle properly */
|
|
|
|
if (avail <= 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
avail--;
|
|
|
|
rx_ubuf = wsi->u.ws.rx_ubuf + LWS_PRE + wsi->u.ws.rx_ubuf_head;
|
|
|
|
if (wsi->u.ws.all_zero_nonce)
|
|
|
|
memcpy(rx_ubuf, buffer, avail);
|
|
|
|
else {
|
|
|
|
|
|
|
|
for (n = 0; n < 4; n++)
|
|
|
|
mask[n] = wsi->u.ws.mask[(wsi->u.ws.mask_idx + n) & 3];
|
|
|
|
|
|
|
|
/* deal with 4-byte chunks using unwrapped loop */
|
|
|
|
n = avail >> 2;
|
|
|
|
while (n--) {
|
|
|
|
*(rx_ubuf++) = *(buffer++) ^ mask[0];
|
|
|
|
*(rx_ubuf++) = *(buffer++) ^ mask[1];
|
|
|
|
*(rx_ubuf++) = *(buffer++) ^ mask[2];
|
|
|
|
*(rx_ubuf++) = *(buffer++) ^ mask[3];
|
|
|
|
}
|
|
|
|
/* and the remaining bytes bytewise */
|
|
|
|
for (n = 0; n < (avail & 3); n++)
|
|
|
|
*(rx_ubuf++) = *(buffer++) ^ mask[n];
|
|
|
|
|
|
|
|
wsi->u.ws.mask_idx = (wsi->u.ws.mask_idx + avail) & 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*buf) += avail;
|
|
|
|
wsi->u.ws.rx_ubuf_head += avail;
|
|
|
|
wsi->u.ws.rx_packet_length -= avail;
|
|
|
|
*len -= avail;
|
|
|
|
}
|