1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-16 00:00:07 +01:00
libwebsockets/lib/roles/http/server/parsers.c

1140 lines
26 KiB
C
Raw Normal View History

/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation:
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include "private-libwebsockets.h"
2018-03-07 18:15:17 +08:00
static const unsigned char lextable[] = {
#include "../lextable.h"
};
#define FAIL_CHAR 0x08
static struct allocated_headers *
_lws_create_ah(struct lws_context_per_thread *pt, ah_data_idx_t data_size)
{
struct allocated_headers *ah = lws_zalloc(sizeof(*ah), "ah struct");
if (!ah)
return NULL;
ah->data = lws_malloc(data_size, "ah data");
if (!ah->data) {
lws_free(ah);
return NULL;
}
ah->next = pt->http.ah_list;
pt->http.ah_list = ah;
ah->data_length = data_size;
pt->http.ah_pool_length++;
lwsl_info("%s: created ah %p (size %d): pool length %d\n", __func__,
ah, (int)data_size, pt->http.ah_pool_length);
return ah;
}
int
_lws_destroy_ah(struct lws_context_per_thread *pt, struct allocated_headers *ah)
{
lws_start_foreach_llp(struct allocated_headers **, a, pt->http.ah_list) {
if ((*a) == ah) {
*a = ah->next;
pt->http.ah_pool_length--;
lwsl_info("%s: freed ah %p : pool length %d\n",
__func__, ah, pt->http.ah_pool_length);
if (ah->data)
lws_free(ah->data);
lws_free(ah);
return 0;
}
} lws_end_foreach_llp(a, next);
return 1;
}
void
_lws_header_table_reset(struct allocated_headers *ah)
{
/* init the ah to reflect no headers or data have appeared yet */
memset(ah->frag_index, 0, sizeof(ah->frag_index));
memset(ah->frags, 0, sizeof(ah->frags));
ah->nfrag = 0;
ah->pos = 0;
ah->http_response = 0;
}
// doesn't scrub the ah rxbuffer by default, parent must do if needed
void
2018-03-02 14:22:49 +08:00
__lws_header_table_reset(struct lws *wsi, int autoservice)
{
struct allocated_headers *ah = wsi->http.ah;
struct lws_context_per_thread *pt;
struct lws_pollfd *pfd;
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
/* if we have the idea we're resetting 'our' ah, must be bound to one */
assert(ah);
/* ah also concurs with ownership */
assert(ah->wsi == wsi);
_lws_header_table_reset(ah);
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
ah->parser_state = WSI_TOKEN_NAME_PART;
ah->lextable_pos = 0;
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
/* since we will restart the ah, our new headers are not completed */
wsi->hdr_parsing_completed = 0;
2017-07-26 11:49:41 +08:00
/* while we hold the ah, keep a timeout on the wsi */
2018-03-02 14:22:49 +08:00
__lws_set_timeout(wsi, PENDING_TIMEOUT_HOLDING_AH,
2017-07-26 11:49:41 +08:00
wsi->vhost->timeout_secs_ah_idle);
time(&ah->assigned);
if (lws_buflist_next_segment_len(&wsi->buflist, NULL) &&
autoservice) {
lwsl_debug("%s: service on readbuf ah\n", __func__);
pt = &wsi->context->pt[(int)wsi->tsi];
/*
* Unlike a normal connect, we have the headers already
* (or the first part of them anyway)
*/
pfd = &pt->fds[wsi->position_in_fds_table];
pfd->revents |= LWS_POLLIN;
lwsl_err("%s: calling service\n", __func__);
lws_service_fd_tsi(wsi->context, pfd, wsi->tsi);
}
}
2018-03-02 14:22:49 +08:00
void
lws_header_table_reset(struct lws *wsi, int autoservice)
{
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
lws_pt_lock(pt, __func__);
__lws_header_table_reset(wsi, autoservice);
lws_pt_unlock(pt);
}
static void
_lws_header_ensure_we_are_on_waiting_list(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
struct lws_pollargs pa;
struct lws **pwsi = &pt->http.ah_wait_list;
while (*pwsi) {
if (*pwsi == wsi)
return;
pwsi = &(*pwsi)->http.ah_wait_list;
}
lwsl_info("%s: wsi: %p\n", __func__, wsi);
wsi->http.ah_wait_list = pt->http.ah_wait_list;
pt->http.ah_wait_list = wsi;
pt->http.ah_wait_list_length++;
/* we cannot accept input then */
_lws_change_pollfd(wsi, LWS_POLLIN, 0, &pa);
}
static int
__lws_remove_from_ah_waiting_list(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
struct lws **pwsi =&pt->http.ah_wait_list;
while (*pwsi) {
if (*pwsi == wsi) {
lwsl_info("%s: wsi %p\n", __func__, wsi);
/* point prev guy to our next */
*pwsi = wsi->http.ah_wait_list;
/* we shouldn't point anywhere now */
wsi->http.ah_wait_list = NULL;
pt->http.ah_wait_list_length--;
return 1;
}
pwsi = &(*pwsi)->http.ah_wait_list;
}
return 0;
}
int LWS_WARN_UNUSED_RESULT
lws_header_table_attach(struct lws *wsi, int autoservice)
2013-02-10 18:02:31 +08:00
{
struct lws_context *context = wsi->context;
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
struct lws_pollargs pa;
int n;
lwsl_info("%s: wsi %p: ah %p (tsi %d, count = %d) in\n", __func__,
(void *)wsi, (void *)wsi->http.ah, wsi->tsi,
pt->http.ah_count_in_use);
2018-03-07 18:15:17 +08:00
lws_pt_lock(pt, __func__);
/* if we are already bound to one, just clear it down */
if (wsi->http.ah) {
lwsl_info("%s: cleardown\n", __func__);
goto reset;
}
n = pt->http.ah_count_in_use == context->max_http_header_pool;
#if defined(LWS_WITH_PEER_LIMITS)
if (!n) {
n = lws_peer_confirm_ah_attach_ok(context, wsi->peer);
if (n)
lws_stats_atomic_bump(wsi->context, pt,
LWSSTATS_C_PEER_LIMIT_AH_DENIED, 1);
}
#endif
if (n) {
/*
* Pool is either all busy, or we don't want to give this
* particular guy an ah right now...
*
* Make sure we are on the waiting list, and return that we
* weren't able to provide the ah
*/
_lws_header_ensure_we_are_on_waiting_list(wsi);
goto bail;
2013-02-10 18:02:31 +08:00
}
__lws_remove_from_ah_waiting_list(wsi);
wsi->http.ah = _lws_create_ah(pt, context->max_http_header_data);
if (!wsi->http.ah) { /* we could not create an ah */
_lws_header_ensure_we_are_on_waiting_list(wsi);
goto bail;
}
wsi->http.ah->in_use = 1;
wsi->http.ah->wsi = wsi; /* mark our owner */
pt->http.ah_count_in_use++;
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
lws_context_lock(context); /* <====================================== */
if (wsi->peer)
wsi->peer->http.count_ah++;
lws_context_unlock(context); /* ====================================> */
#endif
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
2017-05-16 19:35:55 +08:00
lwsl_info("%s: did attach wsi %p: ah %p: count %d (on exit)\n", __func__,
(void *)wsi, (void *)wsi->http.ah, pt->http.ah_count_in_use);
reset:
2018-03-02 14:22:49 +08:00
__lws_header_table_reset(wsi, autoservice);
2013-02-10 18:02:31 +08:00
2018-03-07 18:15:17 +08:00
lws_pt_unlock(pt);
#ifndef LWS_NO_CLIENT
if (lwsi_role_client(wsi) && lwsi_state(wsi) == LRS_UNCONNECTED)
if (!lws_client_connect_via_info2(wsi))
/* our client connect has failed, the wsi
* has been closed
*/
return -1;
#endif
2013-02-10 18:02:31 +08:00
return 0;
bail:
lws_pt_unlock(pt);
return 1;
2013-02-10 18:02:31 +08:00
}
2018-03-05 16:49:28 +08:00
int __lws_header_table_detach(struct lws *wsi, int autoservice)
{
struct lws_context *context = wsi->context;
struct allocated_headers *ah = wsi->http.ah;
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
struct lws_pollargs pa;
struct lws **pwsi, **pwsi_eligible;
time_t now;
2017-07-19 04:06:15 +08:00
__lws_remove_from_ah_waiting_list(wsi);
if (!ah)
return 0;
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
(void *)wsi, (void *)ah, wsi->tsi,
pt->http.ah_count_in_use);
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
/* we did have an ah attached */
time(&now);
if (ah->assigned && now - ah->assigned > 3) {
/*
* we're detaching the ah, but it was held an
* unreasonably long time
*/
lwsl_debug("%s: wsi %p: ah held %ds, role/state 0x%x 0x%x,"
"\n", __func__, wsi, (int)(now - ah->assigned),
lwsi_role(wsi), lwsi_state(wsi));
}
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
ah->assigned = 0;
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
/* if we think we're detaching one, there should be one in use */
assert(pt->http.ah_count_in_use > 0);
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
/* and this specific one should have been in use */
assert(ah->in_use);
memset(&wsi->http.ah, 0, sizeof(wsi->http.ah));
#if defined(LWS_WITH_PEER_LIMITS)
if (ah->wsi)
lws_peer_track_ah_detach(context, wsi->peer);
#endif
ah->wsi = NULL; /* no owner */
pwsi = &pt->http.ah_wait_list;
2017-07-19 04:06:15 +08:00
/* oh there is nobody on the waiting list... leave the ah unattached */
if (!*pwsi)
goto nobody_usable_waiting;
/*
* at least one wsi on the same tsi is waiting, give it to oldest guy
* who is allowed to take it (if any)
*/
lwsl_info("pt wait list %p\n", *pwsi);
wsi = NULL;
pwsi_eligible = NULL;
while (*pwsi) {
#if defined(LWS_WITH_PEER_LIMITS)
/* are we willing to give this guy an ah? */
if (!lws_peer_confirm_ah_attach_ok(context, (*pwsi)->peer))
#endif
{
wsi = *pwsi;
pwsi_eligible = pwsi;
}
#if defined(LWS_WITH_PEER_LIMITS)
else
if (!(*pwsi)->http.ah_wait_list)
2017-09-23 13:27:11 +08:00
lws_stats_atomic_bump(context, pt,
LWSSTATS_C_PEER_LIMIT_AH_DENIED, 1);
#endif
pwsi = &(*pwsi)->http.ah_wait_list;
}
if (!wsi) /* everybody waiting already has too many ah... */
goto nobody_usable_waiting;
lwsl_info("%s: last eligible wsi in wait list %p\n", __func__, wsi);
wsi->http.ah = ah;
ah owns rxbuf This is intended to solve a longstanding problem with the relationship between http/1.1 keep-alive and the service loop. Ah now contain an rx buffer which is used during header processing, and the ah may not be detached from the wsi until the rx buffer is exhausted. Having the rx buffer in the ah means we can delay using the rx until a later service loop. Ah which have pending rx force POLLIN service on the wsi they are attached to automatically, so we can interleave general service / connections with draining each ah rx buffer. The possible http/1.1 situations and their dispositions are: 1) exactly one set of http headers come. After processing, the ah is detached since no pending rx left. If more headers come later, a fresh ah is aqcuired when available and the rx flow control blocks the read until then. 2) more that one whole set of headers come and we remain in http mode (no upgrade). The ah is left attached and returns to the service loop after the first set of headers. We will get forced service due to the ah having pending content (respecting flowcontrol) and process the pending rx in the ah. If we use it all up, we will detach the ah. 3) one set of http headers come with ws traffic appended. We service the headers, do the upgrade, and keep the ah until the remaining ws content is used. When we exhausted the ws traffix in the ah rx buffer, we detach the ah. Since there can be any amount of http/1.1 pipelining on a connection, and each may be expensive to service, it's now enforced there is a return to the service loop after each header set is serviced on a connection. When I added the forced service for ah with pending buffering, I added support for it to the windows plat code. However this is untested. Signed-off-by: Andy Green <andy.green@linaro.org>
2016-02-15 12:37:04 +08:00
ah->wsi = wsi; /* new owner */
2018-03-02 14:22:49 +08:00
__lws_header_table_reset(wsi, autoservice);
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
lws_context_lock(context); /* <====================================== */
if (wsi->peer)
wsi->peer->http.count_ah++;
lws_context_unlock(context); /* ====================================> */
#endif
2016-08-07 08:33:08 +08:00
/* clients acquire the ah and then insert themselves in fds table... */
if (wsi->position_in_fds_table != -1) {
lwsl_info("%s: Enabling %p POLLIN\n", __func__, wsi);
/* he has been stuck waiting for an ah, but now his wait is
* over, let him progress */
2016-08-07 08:33:08 +08:00
_lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
}
/* point prev guy to next guy in list instead */
*pwsi_eligible = wsi->http.ah_wait_list;
/* the guy who got one is out of the list */
wsi->http.ah_wait_list = NULL;
pt->http.ah_wait_list_length--;
#ifndef LWS_NO_CLIENT
if (lwsi_role_client(wsi) && lwsi_state(wsi) == LRS_UNCONNECTED) {
2017-06-12 13:36:24 +08:00
lws_pt_unlock(pt);
if (!lws_client_connect_via_info2(wsi)) {
/* our client connect has failed, the wsi
* has been closed
*/
return -1;
}
2017-06-12 13:36:24 +08:00
return 0;
}
#endif
assert(!!pt->http.ah_wait_list_length == !!(lws_intptr_t)pt->http.ah_wait_list);
bail:
2016-07-23 14:18:25 +08:00
lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
(void *)wsi, (void *)ah, pt->tid, pt->http.ah_count_in_use);
return 0;
nobody_usable_waiting:
lwsl_info("%s: nobody usable waiting\n", __func__);
_lws_destroy_ah(pt, ah);
pt->http.ah_count_in_use--;
goto bail;
}
2018-03-05 16:49:28 +08:00
int lws_header_table_detach(struct lws *wsi, int autoservice)
{
struct lws_context *context = wsi->context;
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
int n;
lws_pt_lock(pt, __func__);
n = __lws_header_table_detach(wsi, autoservice);
lws_pt_unlock(pt);
return n;
}
LWS_VISIBLE int
lws_hdr_fragment_length(struct lws *wsi, enum lws_token_indexes h, int frag_idx)
{
int n;
if (!wsi->http.ah)
2017-07-19 04:39:14 +08:00
return 0;
n = wsi->http.ah->frag_index[h];
if (!n)
return 0;
do {
if (!frag_idx)
return wsi->http.ah->frags[n].len;
n = wsi->http.ah->frags[n].nfrag;
} while (frag_idx-- && n);
return 0;
}
LWS_VISIBLE int lws_hdr_total_length(struct lws *wsi, enum lws_token_indexes h)
2013-02-10 18:02:31 +08:00
{
int n;
int len = 0;
if (!wsi->http.ah)
2017-07-19 04:39:14 +08:00
return 0;
n = wsi->http.ah->frag_index[h];
if (!n)
2013-02-10 18:02:31 +08:00
return 0;
do {
len += wsi->http.ah->frags[n].len;
n = wsi->http.ah->frags[n].nfrag;
2013-02-10 18:02:31 +08:00
} while (n);
return len;
}
LWS_VISIBLE int lws_hdr_copy_fragment(struct lws *wsi, char *dst, int len,
enum lws_token_indexes h, int frag_idx)
{
int n = 0;
2017-07-19 04:39:14 +08:00
int f;
if (!wsi->http.ah)
2017-07-19 04:39:14 +08:00
return -1;
f = wsi->http.ah->frag_index[h];
if (!f)
return -1;
while (n < frag_idx) {
f = wsi->http.ah->frags[f].nfrag;
if (!f)
return -1;
n++;
}
if (wsi->http.ah->frags[f].len >= len)
return -1;
memcpy(dst, wsi->http.ah->data + wsi->http.ah->frags[f].offset,
wsi->http.ah->frags[f].len);
dst[wsi->http.ah->frags[f].len] = '\0';
return wsi->http.ah->frags[f].len;
}
LWS_VISIBLE int lws_hdr_copy(struct lws *wsi, char *dst, int len,
enum lws_token_indexes h)
2013-02-10 18:02:31 +08:00
{
int toklen = lws_hdr_total_length(wsi, h);
int n;
if (toklen >= len)
return -1;
if (!wsi->http.ah)
2017-07-19 04:39:14 +08:00
return -1;
n = wsi->http.ah->frag_index[h];
if (!n)
2013-02-10 18:02:31 +08:00
return 0;
do {
if (wsi->http.ah->frags[n].len >= len)
2017-11-02 08:10:41 +08:00
return -1;
strncpy(dst, &wsi->http.ah->data[wsi->http.ah->frags[n].offset],
wsi->http.ah->frags[n].len);
dst += wsi->http.ah->frags[n].len;
len -= wsi->http.ah->frags[n].len;
n = wsi->http.ah->frags[n].nfrag;
2013-02-10 18:02:31 +08:00
} while (n);
2017-11-02 08:10:41 +08:00
*dst = '\0';
2013-02-10 18:02:31 +08:00
return toklen;
}
char *lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h)
2013-02-10 18:02:31 +08:00
{
int n;
n = wsi->http.ah->frag_index[h];
2013-02-10 18:02:31 +08:00
if (!n)
return NULL;
return wsi->http.ah->data + wsi->http.ah->frags[n].offset;
2013-02-10 18:02:31 +08:00
}
2018-03-07 18:15:17 +08:00
static int LWS_WARN_UNUSED_RESULT
lws_pos_in_bounds(struct lws *wsi)
{
if (wsi->http.ah->pos <
2017-09-23 12:55:21 +08:00
(unsigned int)wsi->context->max_http_header_data)
return 0;
if ((int)wsi->http.ah->pos == wsi->context->max_http_header_data) {
lwsl_err("Ran out of header data space\n");
return 1;
}
/*
* with these tests everywhere, it should never be able to exceed
2017-09-23 12:55:21 +08:00
* the limit, only meet it
*/
lwsl_err("%s: pos %d, limit %d\n", __func__, wsi->http.ah->pos,
wsi->context->max_http_header_data);
assert(0);
return 1;
}
int LWS_WARN_UNUSED_RESULT
lws_hdr_simple_create(struct lws *wsi, enum lws_token_indexes h, const char *s)
{
wsi->http.ah->nfrag++;
if (wsi->http.ah->nfrag == ARRAY_SIZE(wsi->http.ah->frags)) {
lwsl_warn("More hdr frags than we can deal with, dropping\n");
return -1;
}
wsi->http.ah->frag_index[h] = wsi->http.ah->nfrag;
wsi->http.ah->frags[wsi->http.ah->nfrag].offset = wsi->http.ah->pos;
wsi->http.ah->frags[wsi->http.ah->nfrag].len = 0;
wsi->http.ah->frags[wsi->http.ah->nfrag].nfrag = 0;
do {
if (lws_pos_in_bounds(wsi))
return -1;
wsi->http.ah->data[wsi->http.ah->pos++] = *s;
if (*s)
wsi->http.ah->frags[wsi->http.ah->nfrag].len++;
} while (*s++);
return 0;
}
static int LWS_WARN_UNUSED_RESULT
issue_char(struct lws *wsi, unsigned char c)
{
2015-11-29 19:26:01 +08:00
unsigned short frag_len;
if (lws_pos_in_bounds(wsi))
return -1;
frag_len = wsi->http.ah->frags[wsi->http.ah->nfrag].len;
/*
* If we haven't hit the token limit, just copy the character into
* the header
*/
if (frag_len < wsi->http.ah->current_token_limit) {
wsi->http.ah->data[wsi->http.ah->pos++] = c;
2015-11-29 19:24:04 +08:00
if (c)
wsi->http.ah->frags[wsi->http.ah->nfrag].len++;
2015-11-29 19:24:04 +08:00
return 0;
}
/* Insert a null character when we *hit* the limit: */
if (frag_len == wsi->http.ah->current_token_limit) {
if (lws_pos_in_bounds(wsi))
return -1;
wsi->http.ah->data[wsi->http.ah->pos++] = '\0';
lwsl_warn("header %i exceeds limit %d\n",
wsi->http.ah->parser_state,
wsi->http.ah->current_token_limit);
}
2015-11-29 19:24:04 +08:00
return 1;
}
int
lws_parse_urldecode(struct lws *wsi, uint8_t *_c)
{
struct allocated_headers *ah = wsi->http.ah;
unsigned int enc = 0;
uint8_t c = *_c;
// lwsl_notice("ah->ups %d\n", ah->ups);
/*
* PRIORITY 1
* special URI processing... convert %xx
*/
switch (ah->ues) {
case URIES_IDLE:
if (c == '%') {
ah->ues = URIES_SEEN_PERCENT;
goto swallow;
}
break;
case URIES_SEEN_PERCENT:
if (char_to_hex(c) < 0)
/* illegal post-% char */
goto forbid;
ah->esc_stash = c;
ah->ues = URIES_SEEN_PERCENT_H1;
goto swallow;
case URIES_SEEN_PERCENT_H1:
if (char_to_hex(c) < 0)
/* illegal post-% char */
goto forbid;
*_c = (char_to_hex(ah->esc_stash) << 4) |
char_to_hex(c);
c = *_c;
enc = 1;
ah->ues = URIES_IDLE;
break;
}
/*
* PRIORITY 2
* special URI processing...
* convert /.. or /... or /../ etc to /
* convert /./ to /
* convert // or /// etc to /
* leave /.dir or whatever alone
*/
switch (ah->ups) {
case URIPS_IDLE:
if (!c)
return -1;
/* genuine delimiter */
if ((c == '&' || c == ';') && !enc) {
if (issue_char(wsi, c) < 0)
return -1;
/* swallow the terminator */
ah->frags[ah->nfrag].len--;
/* link to next fragment */
ah->frags[ah->nfrag].nfrag = ah->nfrag + 1;
ah->nfrag++;
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
goto excessive;
/* start next fragment after the & */
ah->post_literal_equal = 0;
ah->frags[ah->nfrag].offset = ah->pos;
ah->frags[ah->nfrag].len = 0;
ah->frags[ah->nfrag].nfrag = 0;
goto swallow;
}
/* uriencoded = in the name part, disallow */
if (c == '=' && enc &&
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] &&
!ah->post_literal_equal) {
c = '_';
*_c =c;
}
/* after the real =, we don't care how many = */
if (c == '=' && !enc)
ah->post_literal_equal = 1;
/* + to space */
if (c == '+' && !enc) {
c = ' ';
*_c = c;
}
/* issue the first / always */
if (c == '/' && !ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS])
ah->ups = URIPS_SEEN_SLASH;
break;
case URIPS_SEEN_SLASH:
/* swallow subsequent slashes */
if (c == '/')
goto swallow;
/* track and swallow the first . after / */
if (c == '.') {
ah->ups = URIPS_SEEN_SLASH_DOT;
goto swallow;
}
ah->ups = URIPS_IDLE;
break;
case URIPS_SEEN_SLASH_DOT:
/* swallow second . */
if (c == '.') {
ah->ups = URIPS_SEEN_SLASH_DOT_DOT;
goto swallow;
}
/* change /./ to / */
if (c == '/') {
ah->ups = URIPS_SEEN_SLASH;
goto swallow;
}
/* it was like /.dir ... regurgitate the . */
ah->ups = URIPS_IDLE;
if (issue_char(wsi, '.') < 0)
return -1;
break;
case URIPS_SEEN_SLASH_DOT_DOT:
/* /../ or /..[End of URI] --> backup to last / */
if (c == '/' || c == '?') {
/*
* back up one dir level if possible
* safe against header fragmentation because
* the method URI can only be in 1 fragment
*/
if (ah->frags[ah->nfrag].len > 2) {
ah->pos--;
ah->frags[ah->nfrag].len--;
do {
ah->pos--;
ah->frags[ah->nfrag].len--;
} while (ah->frags[ah->nfrag].len > 1 &&
ah->data[ah->pos] != '/');
}
ah->ups = URIPS_SEEN_SLASH;
if (ah->frags[ah->nfrag].len > 1)
break;
goto swallow;
}
/* /..[^/] ... regurgitate and allow */
if (issue_char(wsi, '.') < 0)
return -1;
if (issue_char(wsi, '.') < 0)
return -1;
ah->ups = URIPS_IDLE;
break;
}
if (c == '?' && !enc &&
!ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS]) { /* start of URI args */
if (ah->ues != URIES_IDLE)
goto forbid;
/* seal off uri header */
if (issue_char(wsi, '\0') < 0)
return -1;
/* move to using WSI_TOKEN_HTTP_URI_ARGS */
ah->nfrag++;
if (ah->nfrag >= ARRAY_SIZE(ah->frags))
goto excessive;
ah->frags[ah->nfrag].offset = ah->pos;
ah->frags[ah->nfrag].len = 0;
ah->frags[ah->nfrag].nfrag = 0;
ah->post_literal_equal = 0;
ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] = ah->nfrag;
ah->ups = URIPS_IDLE;
goto swallow;
}
return LPUR_CONTINUE;
swallow:
return LPUR_SWALLOW;
forbid:
return LPUR_FORBID;
excessive:
return LPUR_EXCESSIVE;
}
static const unsigned char methods[] = {
WSI_TOKEN_GET_URI,
WSI_TOKEN_POST_URI,
WSI_TOKEN_OPTIONS_URI,
WSI_TOKEN_PUT_URI,
WSI_TOKEN_PATCH_URI,
WSI_TOKEN_DELETE_URI,
WSI_TOKEN_CONNECT,
WSI_TOKEN_HEAD_URI,
};
2018-03-07 18:15:17 +08:00
/*
* possible returns:, -1 fail, 0 ok or 2, transition to raw
*/
int LWS_WARN_UNUSED_RESULT
2018-03-07 18:15:17 +08:00
lws_parse(struct lws *wsi, unsigned char *buf, int *len)
{
struct allocated_headers *ah = wsi->http.ah;
struct lws_context *context = wsi->context;
unsigned int n, m;
2018-03-07 18:15:17 +08:00
unsigned char c;
int r, pos;
assert(wsi->http.ah);
2018-03-07 18:15:17 +08:00
do {
(*len)--;
c = *buf++;
2018-03-07 18:15:17 +08:00
switch (ah->parser_state) {
default:
2018-03-07 18:15:17 +08:00
lwsl_parser("WSI_TOK_(%d) '%c'\n", ah->parser_state, c);
2018-03-07 18:15:17 +08:00
/* collect into malloc'd buffers */
/* optional initial space swallow */
if (!ah->frags[ah->frag_index[ah->parser_state]].len &&
c == ' ')
break;
2018-03-07 18:15:17 +08:00
for (m = 0; m < ARRAY_SIZE(methods); m++)
if (ah->parser_state == methods[m])
break;
if (m == ARRAY_SIZE(methods))
/* it was not any of the methods */
goto check_eol;
2018-03-07 18:15:17 +08:00
/* special URI processing... end at space */
HTTP Version, Keep-alive support, No-copy POST This is a squashed commit from https://github.com/andrew-canaday/libwebsockets, dev/http_keepalive branch (strategies changed a few times, so the commit history is clutteread). This branch is submitted for clarity, but the other can be used as a reference or alternative. * added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests * added **enum http_connection_type** to track keep-alive vs close * replaced content_length_seen and body_index with **content_remain** * removed **post_buffer** (see handshake.c modifications) * removed post_buffer free * switch state to WSI_TOKEN_SKIPPING after URI is complete to store version * delete *spill* label (unused) * add vars to track HTTP version and connection type * HTTP version defaults to 1.0 * connection type defaults to 'close' for 1.0, keep-alive for 1.1 * additional checks in **cleanup:** label: * if HTTP version string is present and valid, set enum val appropriately * override connection default with the "Connection:" header, if present * set state to WSI_STATE_HTTP_BODY if content_length > 0 * return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise * add vars to track remaining content_length and body chunk size * re-arrange switch case order to facilitate creation of jump-table * added new labels: * **read_ok**: normal location reach on break from switch; just return 0 * **http_complete**: check for keep-alive + init state, mode, hdr table * **http_new**: jump location for keep-alive when http_complete sees len>0 * after libwebsocket_parse, jump to one of those labels based on state * POST body handling: * don't bother iterating over input byte-by-byte or using memcpy * just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
2018-03-07 18:15:17 +08:00
if (c == ' ') {
/* enforce starting with / */
if (!ah->frags[ah->nfrag].len)
if (issue_char(wsi, '/') < 0)
return -1;
if (ah->ups == URIPS_SEEN_SLASH_DOT_DOT) {
/*
* back up one dir level if possible
* safe against header fragmentation because
* the method URI can only be in 1 fragment
*/
if (ah->frags[ah->nfrag].len > 2) {
ah->pos--;
ah->frags[ah->nfrag].len--;
2018-03-07 18:15:17 +08:00
do {
ah->pos--;
ah->frags[ah->nfrag].len--;
} while (ah->frags[ah->nfrag].len > 1 &&
ah->data[ah->pos] != '/');
}
}
2018-03-07 18:15:17 +08:00
/* begin parsing HTTP version: */
if (issue_char(wsi, '\0') < 0)
return -1;
ah->parser_state = WSI_TOKEN_HTTP;
goto start_fragment;
}
2018-03-07 18:15:17 +08:00
r = lws_parse_urldecode(wsi, &c);
switch (r) {
case LPUR_CONTINUE:
break;
case LPUR_SWALLOW:
goto swallow;
case LPUR_FORBID:
goto forbid;
case LPUR_EXCESSIVE:
goto excessive;
default:
HTTP Version, Keep-alive support, No-copy POST This is a squashed commit from https://github.com/andrew-canaday/libwebsockets, dev/http_keepalive branch (strategies changed a few times, so the commit history is clutteread). This branch is submitted for clarity, but the other can be used as a reference or alternative. * added **enum http_version** to track HTTP/1.0 vs HTTP/1.1 requests * added **enum http_connection_type** to track keep-alive vs close * replaced content_length_seen and body_index with **content_remain** * removed **post_buffer** (see handshake.c modifications) * removed post_buffer free * switch state to WSI_TOKEN_SKIPPING after URI is complete to store version * delete *spill* label (unused) * add vars to track HTTP version and connection type * HTTP version defaults to 1.0 * connection type defaults to 'close' for 1.0, keep-alive for 1.1 * additional checks in **cleanup:** label: * if HTTP version string is present and valid, set enum val appropriately * override connection default with the "Connection:" header, if present * set state to WSI_STATE_HTTP_BODY if content_length > 0 * return 0 on HTTP requests, unless LWS_CALLBACK_HTTP indicates otherwise * add vars to track remaining content_length and body chunk size * re-arrange switch case order to facilitate creation of jump-table * added new labels: * **read_ok**: normal location reach on break from switch; just return 0 * **http_complete**: check for keep-alive + init state, mode, hdr table * **http_new**: jump location for keep-alive when http_complete sees len>0 * after libwebsocket_parse, jump to one of those labels based on state * POST body handling: * don't bother iterating over input byte-by-byte or using memcpy * just pass the relevant portion of the context->service_buffer to callback
2014-07-13 01:07:36 -04:00
return -1;
2018-03-07 18:15:17 +08:00
}
check_eol:
2018-03-07 18:15:17 +08:00
/* bail at EOL */
if (ah->parser_state != WSI_TOKEN_CHALLENGE &&
c == '\x0d') {
if (ah->ues != URIES_IDLE)
goto forbid;
c = '\0';
ah->parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
lwsl_parser("*\n");
}
2018-03-07 18:15:17 +08:00
n = issue_char(wsi, c);
if ((int)n < 0)
return -1;
if (n > 0)
ah->parser_state = WSI_TOKEN_SKIPPING;
swallow:
2018-03-07 18:15:17 +08:00
/* per-protocol end of headers management */
2018-03-07 18:15:17 +08:00
if (ah->parser_state == WSI_TOKEN_CHALLENGE)
goto set_parsing_complete;
break;
2018-03-07 18:15:17 +08:00
/* collecting and checking a name part */
case WSI_TOKEN_NAME_PART:
lwsl_parser("WSI_TOKEN_NAME_PART '%c' 0x%02X (role=0x%x) "
"wsi->lextable_pos=%d\n", c, c, lwsi_role(wsi),
2018-03-07 18:15:17 +08:00
ah->lextable_pos);
if (c >= 'A' && c <= 'Z')
c += 'a' - 'A';
pos = ah->lextable_pos;
while (1) {
if (lextable[pos] & (1 << 7)) { /* 1-byte, fail on mismatch */
if ((lextable[pos] & 0x7f) != c) {
nope:
ah->lextable_pos = -1;
break;
}
/* fall thru */
pos++;
if (lextable[pos] == FAIL_CHAR)
goto nope;
ah->lextable_pos = pos;
break;
}
2018-03-07 18:15:17 +08:00
if (lextable[pos] == FAIL_CHAR)
goto nope;
/* b7 = 0, end or 3-byte */
if (lextable[pos] < FAIL_CHAR) { /* terminal marker */
ah->lextable_pos = pos;
break;
}
2018-03-07 18:15:17 +08:00
if (lextable[pos] == c) { /* goto */
ah->lextable_pos = pos + (lextable[pos + 1]) +
(lextable[pos + 2] << 8);
break;
}
/* fall thru goto */
pos += 3;
/* continue */
}
/*
* If it's h1, server needs to look out for unknown
* methods...
*/
if (ah->lextable_pos < 0 && lwsi_role_h1(wsi) &&
lwsi_role_server(wsi)) {
2018-03-07 18:15:17 +08:00
/* this is not a header we know about */
for (m = 0; m < ARRAY_SIZE(methods); m++)
if (ah->frag_index[methods[m]]) {
/*
* already had the method, no idea what
* this crap from the client is, ignore
*/
ah->parser_state = WSI_TOKEN_SKIPPING;
break;
}
/*
2018-03-07 18:15:17 +08:00
* hm it's an unknown http method from a client in fact,
* it cannot be valid http
*/
2018-03-07 18:15:17 +08:00
if (m == ARRAY_SIZE(methods)) {
/*
* are we set up to accept raw in these cases?
*/
if (lws_check_opt(wsi->vhost->options,
LWS_SERVER_OPTION_FALLBACK_TO_RAW))
return 2; /* transition to raw */
2018-03-07 18:15:17 +08:00
lwsl_info("Unknown method - dropping\n");
goto forbid;
}
break;
}
/*
* ...otherwise for a client, let him ignore unknown headers
* coming from the server
*/
if (ah->lextable_pos < 0) {
ah->parser_state = WSI_TOKEN_SKIPPING;
break;
}
2018-03-07 18:15:17 +08:00
if (lextable[ah->lextable_pos] < FAIL_CHAR) {
/* terminal state */
2018-03-07 18:15:17 +08:00
n = ((unsigned int)lextable[ah->lextable_pos] << 8) |
lextable[ah->lextable_pos + 1];
2018-03-07 18:15:17 +08:00
lwsl_parser("known hdr %d\n", n);
for (m = 0; m < ARRAY_SIZE(methods); m++)
if (n == methods[m] &&
ah->frag_index[methods[m]]) {
lwsl_warn("Duplicated method\n");
return -1;
}
2018-03-07 18:15:17 +08:00
/*
* WSORIGIN is protocol equiv to ORIGIN,
* JWebSocket likes to send it, map to ORIGIN
*/
if (n == WSI_TOKEN_SWORIGIN)
n = WSI_TOKEN_ORIGIN;
2018-03-07 18:15:17 +08:00
ah->parser_state = (enum lws_token_indexes)
(WSI_TOKEN_GET_URI + n);
ah->ups = URIPS_IDLE;
2018-03-07 18:15:17 +08:00
if (context->token_limits)
ah->current_token_limit = context->
token_limits->token_limit[
ah->parser_state];
else
ah->current_token_limit =
wsi->context->max_http_header_data;
2018-03-07 18:15:17 +08:00
if (ah->parser_state == WSI_TOKEN_CHALLENGE)
goto set_parsing_complete;
2018-03-07 18:15:17 +08:00
goto start_fragment;
}
break;
2013-02-10 18:02:31 +08:00
start_fragment:
2018-03-07 18:15:17 +08:00
ah->nfrag++;
excessive:
2018-03-07 18:15:17 +08:00
if (ah->nfrag == ARRAY_SIZE(ah->frags)) {
lwsl_warn("More hdr frags than we can deal with\n");
return -1;
}
2018-03-07 18:15:17 +08:00
ah->frags[ah->nfrag].offset = ah->pos;
ah->frags[ah->nfrag].len = 0;
ah->frags[ah->nfrag].nfrag = 0;
ah->frags[ah->nfrag].flags = 2;
n = ah->frag_index[ah->parser_state];
if (!n) { /* first fragment */
ah->frag_index[ah->parser_state] = ah->nfrag;
ah->hdr_token_idx = ah->parser_state;
break;
}
/* continuation */
while (ah->frags[n].nfrag)
n = ah->frags[n].nfrag;
ah->frags[n].nfrag = ah->nfrag;
2013-02-10 18:02:31 +08:00
2018-03-07 18:15:17 +08:00
if (issue_char(wsi, ' ') < 0)
return -1;
break;
2013-02-10 18:02:31 +08:00
2018-03-07 18:15:17 +08:00
/* skipping arg part of a name we didn't recognize */
case WSI_TOKEN_SKIPPING:
lwsl_parser("WSI_TOKEN_SKIPPING '%c'\n", c);
2018-03-07 18:15:17 +08:00
if (c == '\x0d')
ah->parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
break;
2018-03-07 18:15:17 +08:00
case WSI_TOKEN_SKIPPING_SAW_CR:
lwsl_parser("WSI_TOKEN_SKIPPING_SAW_CR '%c'\n", c);
if (ah->ues != URIES_IDLE)
goto forbid;
if (c == '\x0a') {
ah->parser_state = WSI_TOKEN_NAME_PART;
ah->lextable_pos = 0;
} else
ah->parser_state = WSI_TOKEN_SKIPPING;
break;
/* we're done, ignore anything else */
2018-03-07 18:15:17 +08:00
case WSI_PARSING_COMPLETE:
lwsl_parser("WSI_PARSING_COMPLETE '%c'\n", c);
break;
}
2018-03-07 18:15:17 +08:00
} while (*len);
return 0;
set_parsing_complete:
if (ah->ues != URIES_IDLE)
goto forbid;
2013-02-10 18:02:31 +08:00
if (lws_hdr_total_length(wsi, WSI_TOKEN_UPGRADE)) {
if (lws_hdr_total_length(wsi, WSI_TOKEN_VERSION))
wsi->rx_frame_type = /* temp for ws version index */
2013-02-10 18:02:31 +08:00
atoi(lws_hdr_simple_ptr(wsi, WSI_TOKEN_VERSION));
lwsl_parser("v%02d hdrs done\n", wsi->rx_frame_type);
}
ah->parser_state = WSI_PARSING_COMPLETE;
wsi->hdr_parsing_completed = 1;
return 0;
forbid:
lwsl_notice(" forbidding on uri sanitation\n");
lws_return_http_status(wsi, HTTP_STATUS_FORBIDDEN, NULL);
return -1;
}