mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
client: http1.1 pipelining
This commit is contained in:
parent
3647cd8968
commit
04e1661411
8 changed files with 452 additions and 86 deletions
|
@ -1080,7 +1080,29 @@ prepare the client SSL context for the vhost after creating the vhost, since
|
|||
this is not normally done if the vhost was set up to listen / serve. Call
|
||||
the api lws_init_vhost_client_ssl() to also allow client SSL on the vhost.
|
||||
|
||||
@section clipipe Pipelining Client Requests to same host
|
||||
|
||||
If you are opening more client requests to the same host and port, you
|
||||
can give the flag LCCSCF_PIPELINE on `info.ssl_connection` to indicate
|
||||
you wish to pipeline them.
|
||||
|
||||
Without the flag, the client connections will occur concurrently using a
|
||||
socket and tls wrapper if requested for each connection individually.
|
||||
That is fast, but resource-intensive.
|
||||
|
||||
With the flag, lws will queue subsequent client connections on the first
|
||||
connection to the same host and port. When it has confirmed from the
|
||||
first connection that pipelining / keep-alive is supported by the server,
|
||||
it lets the queued client pipeline connections send their headers ahead
|
||||
of time to create a pipeline of requests on the server side.
|
||||
|
||||
In this way only one tcp connection and tls wrapper is required to transfer
|
||||
all the transactions sequentially. It takes a little longer but it
|
||||
can make a significant difference to resources on both sides.
|
||||
|
||||
If lws learns from the first response header that keepalive is not possible,
|
||||
then it marks itself with that information and detaches any queued clients
|
||||
to make their own individual connections as a fallback.
|
||||
|
||||
@section vhosts Using lws vhosts
|
||||
|
||||
|
|
|
@ -28,15 +28,16 @@ lws_getaddrinfo46(struct lws *wsi, const char *ads, struct addrinfo **result)
|
|||
struct lws *
|
||||
lws_client_connect_2(struct lws *wsi)
|
||||
{
|
||||
sockaddr46 sa46;
|
||||
struct addrinfo *result;
|
||||
struct lws_context *context = wsi->context;
|
||||
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
||||
const char *cce = "", *iface, *adsin, *meth;
|
||||
struct lws *wsi_piggy = NULL;
|
||||
struct addrinfo *result;
|
||||
struct lws_pollfd pfd;
|
||||
const char *cce = "", *iface;
|
||||
int n, port;
|
||||
ssize_t plen = 0;
|
||||
const char *ads;
|
||||
sockaddr46 sa46;
|
||||
int n, port;
|
||||
#ifdef LWS_WITH_IPV6
|
||||
char ipv6only = lws_check_opt(wsi->vhost->options,
|
||||
LWS_SERVER_OPTION_IPV6_V6ONLY_MODIFY |
|
||||
|
@ -55,6 +56,61 @@ lws_client_connect_2(struct lws *wsi)
|
|||
goto oom4;
|
||||
}
|
||||
|
||||
/* we can only piggyback GET */
|
||||
|
||||
meth = lws_hdr_simple_ptr(wsi, _WSI_TOKEN_CLIENT_METHOD);
|
||||
if (meth && strcmp(meth, "GET"))
|
||||
goto create_new_conn;
|
||||
|
||||
/* we only pipeline connections that said it was okay */
|
||||
|
||||
if (!wsi->client_pipeline)
|
||||
goto create_new_conn;
|
||||
|
||||
/*
|
||||
* let's take a look first and see if there are any already-active
|
||||
* client connections we can piggy-back on.
|
||||
*/
|
||||
|
||||
adsin = lws_hdr_simple_ptr(wsi, _WSI_TOKEN_CLIENT_PEER_ADDRESS);
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->vhost->dll_active_client_conns.next) {
|
||||
struct lws *w = lws_container_of(d, struct lws,
|
||||
dll_active_client_conns);
|
||||
|
||||
if (w->ah && !strcmp(adsin, lws_hdr_simple_ptr(w,
|
||||
_WSI_TOKEN_CLIENT_PEER_ADDRESS)) &&
|
||||
wsi->c_port == w->c_port) {
|
||||
/* someone else is already connected to the right guy */
|
||||
|
||||
/* do we know for a fact pipelining won't fly? */
|
||||
if (w->keepalive_rejected) {
|
||||
lwsl_info("defeating pipelining due to no KA on server\n");
|
||||
goto create_new_conn;
|
||||
}
|
||||
/*
|
||||
* ...let's add ourselves to his transaction queue...
|
||||
*/
|
||||
lws_dll_lws_add_front(&wsi->dll_client_transaction_queue,
|
||||
&w->dll_client_transaction_queue_head);
|
||||
/*
|
||||
* pipeline our headers out on him, and wait for our
|
||||
* turn at client transaction_complete to take over
|
||||
* parsing the rx.
|
||||
*/
|
||||
|
||||
wsi_piggy = w;
|
||||
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
goto send_hs;
|
||||
}
|
||||
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
create_new_conn:
|
||||
|
||||
/*
|
||||
* start off allowing ipv6 on connection if vhost allows it
|
||||
*/
|
||||
|
@ -378,31 +434,71 @@ lws_client_connect_2(struct lws *wsi)
|
|||
}
|
||||
#endif
|
||||
|
||||
send_hs:
|
||||
if (!lws_dll_is_null(&wsi->dll_client_transaction_queue)) {
|
||||
/*
|
||||
* We are pipelining on an already-established connection...
|
||||
* we can skip tls establishment.
|
||||
*/
|
||||
wsi->mode = LWSCM_WSCL_ISSUE_HANDSHAKE2;
|
||||
|
||||
/*
|
||||
* we can't send our headers directly, because they have to
|
||||
* be sent when the parent is writeable. The parent will check
|
||||
* for anybody on his client transaction queue that is in
|
||||
* LWSCM_WSCL_ISSUE_HANDSHAKE2, and let them write.
|
||||
*
|
||||
* If we are trying to do this too early, before the master
|
||||
* connection has written his own headers,
|
||||
*/
|
||||
lws_callback_on_writable(wsi_piggy);
|
||||
lwsl_debug("wsi %p: waiting to send headers\n", wsi);
|
||||
} else {
|
||||
/* we are making our own connection */
|
||||
wsi->mode = LWSCM_WSCL_ISSUE_HANDSHAKE;
|
||||
|
||||
/*
|
||||
* provoke service to issue the handshake directly.
|
||||
*
|
||||
* we need to do it this way because in the proxy case, this is
|
||||
* the next state and executed only if and when we get a good
|
||||
* proxy response inside the state machine... but notice in
|
||||
* SSL case this may not have sent anything yet with 0 return,
|
||||
* and won't until many retries from main loop. To stop that
|
||||
* becoming endless, cover with a timeout.
|
||||
*/
|
||||
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE,
|
||||
AWAITING_TIMEOUT);
|
||||
|
||||
pfd.fd = wsi->desc.sockfd;
|
||||
pfd.events = LWS_POLLIN;
|
||||
pfd.revents = LWS_POLLIN;
|
||||
|
||||
n = lws_service_fd(context, &pfd);
|
||||
if (n < 0) {
|
||||
cce = "first service failed";
|
||||
goto failed;
|
||||
}
|
||||
if (n) /* returns 1 on failure after closing wsi */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* provoke service to issue the handshake directly
|
||||
* we need to do it this way because in the proxy case, this is the
|
||||
* next state and executed only if and when we get a good proxy
|
||||
* response inside the state machine... but notice in SSL case this
|
||||
* may not have sent anything yet with 0 return, and won't until some
|
||||
* many retries from main loop. To stop that becoming endless,
|
||||
* cover with a timeout.
|
||||
* If we made our own connection, and we're doing a method that can take
|
||||
* a pipeline, we are an "active client connection".
|
||||
*
|
||||
* Add ourselves to the vhost list of those so that others can
|
||||
* piggyback on our transaction queue
|
||||
*/
|
||||
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE,
|
||||
AWAITING_TIMEOUT);
|
||||
|
||||
wsi->mode = LWSCM_WSCL_ISSUE_HANDSHAKE;
|
||||
pfd.fd = wsi->desc.sockfd;
|
||||
pfd.events = LWS_POLLIN;
|
||||
pfd.revents = LWS_POLLIN;
|
||||
|
||||
n = lws_service_fd(context, &pfd);
|
||||
if (n < 0) {
|
||||
cce = "first service failed";
|
||||
goto failed;
|
||||
if (meth && !strcmp(meth, "GET") &&
|
||||
lws_dll_is_null(&wsi->dll_client_transaction_queue)) {
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_dll_lws_add_front(&wsi->dll_active_client_conns,
|
||||
&wsi->vhost->dll_active_client_conns);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
}
|
||||
if (n) /* returns 1 on failure after closing wsi */
|
||||
return NULL;
|
||||
|
||||
return wsi;
|
||||
|
||||
|
@ -452,7 +548,8 @@ LWS_VISIBLE struct lws *
|
|||
lws_client_reset(struct lws **pwsi, int ssl, const char *address, int port,
|
||||
const char *path, const char *host)
|
||||
{
|
||||
char origin[300] = "", protocol[300] = "", method[32] = "", iface[16] = "", *p;
|
||||
char origin[300] = "", protocol[300] = "", method[32] = "",
|
||||
iface[16] = "", *p;
|
||||
struct lws *wsi = *pwsi;
|
||||
|
||||
if (wsi->redirects == 3) {
|
||||
|
@ -778,6 +875,7 @@ lws_client_connect_via_info(struct lws_client_connect_info *i)
|
|||
}
|
||||
|
||||
wsi->protocol = &wsi->vhost->protocols[0];
|
||||
wsi->client_pipeline = !!(i->ssl_connection & LCCSCF_PIPELINE);
|
||||
|
||||
/*
|
||||
* 1) for http[s] connection, allow protocol selection by name
|
||||
|
|
|
@ -77,12 +77,67 @@ lws_client_http_body_pending(struct lws *wsi, int something_left_to_send)
|
|||
wsi->client_http_body_pending = !!something_left_to_send;
|
||||
}
|
||||
|
||||
int
|
||||
lws_client_socket_service(struct lws_context *context, struct lws *wsi,
|
||||
struct lws_pollfd *pollfd)
|
||||
/*
|
||||
* return self, or queued client wsi we are acting on behalf of
|
||||
*/
|
||||
|
||||
struct lws *
|
||||
lws_client_wsi_effective(struct lws *wsi)
|
||||
{
|
||||
struct lws *wsi_eff = wsi;
|
||||
|
||||
if (!wsi->transaction_from_pipeline_queue ||
|
||||
!wsi->dll_client_transaction_queue_head.next)
|
||||
return wsi;
|
||||
|
||||
/*
|
||||
* The head is the last queued transaction... so
|
||||
* the guy we are fulfilling here is the tail
|
||||
*/
|
||||
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
if (d->next == NULL)
|
||||
wsi_eff = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
return wsi_eff;
|
||||
}
|
||||
|
||||
/*
|
||||
* return self or the guy we are queued under
|
||||
*/
|
||||
|
||||
struct lws *
|
||||
lws_client_wsi_master(struct lws *wsi)
|
||||
{
|
||||
struct lws *wsi_eff = wsi;
|
||||
struct lws_dll_lws *d;
|
||||
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
d = wsi->dll_client_transaction_queue.prev;
|
||||
while (d) {
|
||||
wsi_eff = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue_head);
|
||||
|
||||
d = d->prev;
|
||||
}
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
return wsi_eff;
|
||||
}
|
||||
|
||||
int
|
||||
lws_client_socket_service(struct lws *wsi, struct lws_pollfd *pollfd,
|
||||
struct lws *wsi_conn)
|
||||
{
|
||||
struct lws_context *context = wsi->context;
|
||||
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
||||
char *p = (char *)&pt->serv_buf[0];
|
||||
struct lws *w;
|
||||
#if defined(LWS_OPENSSL_SUPPORT)
|
||||
char ebuf[128];
|
||||
#endif
|
||||
|
@ -95,6 +150,35 @@ lws_client_socket_service(struct lws_context *context, struct lws *wsi,
|
|||
char conn_mode = 0, pending_timeout = 0;
|
||||
#endif
|
||||
|
||||
if ((pollfd->revents & LWS_POLLOUT) &&
|
||||
wsi->keepalive_active &&
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
|
||||
lwsl_debug("%s: pollout HANDSHAKE2\n", __func__);
|
||||
|
||||
/* we have a transaction queue that wants to pipeline */
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
struct lws *w = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
|
||||
if (w->mode == LWSCM_WSCL_ISSUE_HANDSHAKE2) {
|
||||
/*
|
||||
* pollfd has the master sockfd in it... we
|
||||
* need to use that in HANDSHAKE2 to understand
|
||||
* which wsi to actually write on
|
||||
*/
|
||||
lws_client_socket_service(w, pollfd, wsi);
|
||||
lws_callback_on_writable(wsi);
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (wsi->mode) {
|
||||
|
||||
case LWSCM_WSCL_WAITING_CONNECT:
|
||||
|
@ -320,7 +404,11 @@ start_ws_handshake:
|
|||
/* send our request to the server */
|
||||
lws_latency_pre(context, wsi);
|
||||
|
||||
n = lws_ssl_capable_write(wsi, (unsigned char *)sb, (int)(p - sb));
|
||||
w = lws_client_wsi_master(wsi);
|
||||
lwsl_debug("%s: HANDSHAKE2: %p: sending headers on %p\n",
|
||||
__func__, wsi, w);
|
||||
|
||||
n = lws_ssl_capable_write(w, (unsigned char *)sb, (int)(p - sb));
|
||||
lws_latency(context, wsi, "send lws_issue_raw", n,
|
||||
n == p - sb);
|
||||
switch (n) {
|
||||
|
@ -342,6 +430,8 @@ start_ws_handshake:
|
|||
break;
|
||||
}
|
||||
|
||||
lws_callback_on_writable(w);
|
||||
|
||||
goto client_http_body_sent;
|
||||
|
||||
case LWSCM_WSCL_ISSUE_HTTP_BODY:
|
||||
|
@ -353,6 +443,7 @@ start_ws_handshake:
|
|||
break;
|
||||
}
|
||||
client_http_body_sent:
|
||||
/* prepare ourselves to do the parsing */
|
||||
wsi->ah->parser_state = WSI_TOKEN_NAME_PART;
|
||||
wsi->ah->lextable_pos = 0;
|
||||
wsi->mode = LWSCM_WSCL_WAITING_SERVER_REPLY;
|
||||
|
@ -477,41 +568,89 @@ strtolower(char *s)
|
|||
int LWS_WARN_UNUSED_RESULT
|
||||
lws_http_transaction_completed_client(struct lws *wsi)
|
||||
{
|
||||
lwsl_debug("%s: wsi %p\n", __func__, wsi);
|
||||
/* if we can't go back to accept new headers, drop the connection */
|
||||
if (wsi->http.connection_type != HTTP_CONNECTION_KEEP_ALIVE) {
|
||||
lwsl_info("%s: %p: close connection\n", __func__, wsi);
|
||||
return 1;
|
||||
struct lws *wsi_eff = lws_client_wsi_effective(wsi);
|
||||
|
||||
lwsl_info("%s: wsi: %p, wsi_eff: %p\n", __func__, wsi, wsi_eff);
|
||||
|
||||
if (user_callback_handle_rxflow(wsi_eff->protocol->callback,
|
||||
wsi_eff, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
|
||||
wsi_eff->user_space, NULL, 0)) {
|
||||
lwsl_debug("%s: Completed call returned nonzero (mode %d)\n",
|
||||
__func__, wsi_eff->mode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* we don't support chained client connections yet */
|
||||
return 1;
|
||||
#if 0
|
||||
/*
|
||||
* Are we constitutionally capable of having a queue, ie, we are on
|
||||
* the "active client connections" list?
|
||||
*
|
||||
* If not, that's it for us.
|
||||
*/
|
||||
|
||||
if (lws_dll_is_null(&wsi->dll_active_client_conns))
|
||||
return -1;
|
||||
|
||||
/* if this was a queued guy, close him and remove from queue */
|
||||
|
||||
if (wsi->transaction_from_pipeline_queue) {
|
||||
lwsl_debug("closing queued wsi %p\n", wsi_eff);
|
||||
/* so the close doesn't trigger a CCE */
|
||||
wsi_eff->already_did_cce = 1;
|
||||
__lws_close_free_wsi(wsi_eff,
|
||||
LWS_CLOSE_STATUS_CLIENT_TRANSACTION_DONE,
|
||||
"queued client done");
|
||||
}
|
||||
|
||||
/* after the first one, they can only be coming from the queue */
|
||||
wsi->transaction_from_pipeline_queue = 1;
|
||||
|
||||
/* is there a new tail after removing that one? */
|
||||
wsi_eff = lws_client_wsi_effective(wsi);
|
||||
|
||||
/*
|
||||
* Do we have something pipelined waiting?
|
||||
* it's OK if he hasn't managed to send his headers yet... he's next
|
||||
* in line to do that...
|
||||
*/
|
||||
if (wsi_eff == wsi) {
|
||||
/*
|
||||
* Nothing pipelined... we should hang around a bit
|
||||
* in case something turns up...
|
||||
*/
|
||||
lwsl_info("%s: nothing pipelined waiting\n", __func__);
|
||||
if (wsi->ah) {
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 0);
|
||||
}
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE, 5);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* H1: we can serialize the queued guys into into the same ah
|
||||
* (H2: everybody needs their own ah until STREAM_END)
|
||||
*/
|
||||
|
||||
/* otherwise set ourselves up ready to go again */
|
||||
wsi->state = LWSS_CLIENT_HTTP_ESTABLISHED;
|
||||
wsi->mode = LWSCM_HTTP_CLIENT_ACCEPTED;
|
||||
wsi->http.rx_content_length = 0;
|
||||
wsi->hdr_parsing_completed = 0;
|
||||
|
||||
/* He asked for it to stay alive indefinitely */
|
||||
lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
|
||||
wsi->ah->parser_state = WSI_TOKEN_NAME_PART;
|
||||
wsi->ah->lextable_pos = 0;
|
||||
wsi->mode = LWSCM_WSCL_WAITING_SERVER_REPLY;
|
||||
|
||||
/*
|
||||
* As client, nothing new is going to come until we ask for it
|
||||
* we can drop the ah, if any
|
||||
*/
|
||||
if (wsi->ah) {
|
||||
lws_header_table_force_to_detachable_state(wsi);
|
||||
lws_header_table_detach(wsi, 0);
|
||||
}
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_SERVER_RESPONSE,
|
||||
wsi->context->timeout_secs);
|
||||
|
||||
/* If we're (re)starting on headers, need other implied init */
|
||||
wsi->ues = URIES_IDLE;
|
||||
wsi->ah->ues = URIES_IDLE;
|
||||
|
||||
lwsl_info("%s: %p: keep-alive await new transaction\n", __func__, wsi);
|
||||
lwsl_info("%s: %p: new queued transaction as %p\n", __func__, wsi, wsi_eff);
|
||||
lws_callback_on_writable(wsi);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN unsigned int
|
||||
|
@ -546,6 +685,7 @@ lws_client_interpret_server_handshake(struct lws *wsi)
|
|||
struct lws_context *context = wsi->context;
|
||||
const char *pc, *prot, *ads = NULL, *path, *cce = NULL;
|
||||
struct allocated_headers *ah = NULL;
|
||||
struct lws *w = lws_client_wsi_effective(wsi);
|
||||
char *p, *q;
|
||||
char new_path[300];
|
||||
#if !defined(LWS_WITHOUT_EXTENSIONS)
|
||||
|
@ -683,6 +823,48 @@ lws_client_interpret_server_handshake(struct lws *wsi)
|
|||
|
||||
if (!wsi->do_ws) {
|
||||
|
||||
/* if keepalive is allowed, enable the queued pipeline guys */
|
||||
|
||||
if (w == wsi) { /* ie, coming to this for the first time */
|
||||
if (wsi->http.connection_type == HTTP_CONNECTION_KEEP_ALIVE)
|
||||
wsi->keepalive_active = 1;
|
||||
else {
|
||||
/*
|
||||
* Ugh... now the main http connection has seen
|
||||
* both sides, we learn the server doesn't
|
||||
* support keepalive.
|
||||
*
|
||||
* That means any guys queued on us are going
|
||||
* to have to be restarted from connect2 with
|
||||
* their own connections.
|
||||
*/
|
||||
|
||||
/*
|
||||
* stick around telling any new guys they can't
|
||||
* pipeline to this server
|
||||
*/
|
||||
wsi->keepalive_rejected = 1;
|
||||
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
struct lws *ww = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
|
||||
/* remove him from our queue */
|
||||
lws_dll_lws_remove(&ww->dll_client_transaction_queue);
|
||||
/* give up on pipelining */
|
||||
ww->client_pipeline = 0;
|
||||
|
||||
/* go back to "trying to connect" state */
|
||||
lws_union_transition(ww, LWSCM_HTTP_CLIENT);
|
||||
ww->user_space = NULL;
|
||||
ww->state = LWSS_CLIENT_UNCONNECTED;
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef LWS_WITH_HTTP_PROXY
|
||||
wsi->perform_rewrite = 0;
|
||||
if (lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_CONTENT_TYPE)) {
|
||||
|
@ -716,7 +898,7 @@ lws_client_interpret_server_handshake(struct lws *wsi)
|
|||
wsi->http.rx_content_length =
|
||||
atoll(lws_hdr_simple_ptr(wsi,
|
||||
WSI_TOKEN_HTTP_CONTENT_LENGTH));
|
||||
lwsl_notice("%s: incoming content length %llu\n",
|
||||
lwsl_info("%s: incoming content length %llu\n",
|
||||
__func__, (unsigned long long)
|
||||
wsi->http.rx_content_length);
|
||||
wsi->http.rx_content_remain =
|
||||
|
@ -751,10 +933,18 @@ lws_client_interpret_server_handshake(struct lws *wsi)
|
|||
goto bail3;
|
||||
}
|
||||
|
||||
/* free up his parsing allocations */
|
||||
lws_header_table_detach(wsi, 0);
|
||||
/*
|
||||
* for pipelining, master needs to keep his ah... guys who
|
||||
* queued on him can drop it now though.
|
||||
*/
|
||||
|
||||
lwsl_notice("%s: client connection up\n", __func__);
|
||||
if (w != wsi) {
|
||||
/* free up parsing allocations for queued guy */
|
||||
lws_header_table_force_to_detachable_state(w);
|
||||
lws_header_table_detach(w, 0);
|
||||
}
|
||||
|
||||
lwsl_info("%s: client connection up\n", __func__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -539,6 +539,35 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
|
|||
pt = &context->pt[(int)wsi->tsi];
|
||||
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_CLOSE, 1);
|
||||
|
||||
#if !defined(LWS_NO_CLIENT)
|
||||
/* we are no longer an active client connection that can piggyback */
|
||||
lws_dll_lws_remove(&wsi->dll_active_client_conns);
|
||||
|
||||
/*
|
||||
* if we have wsi in our transaction queue, if we are closing we
|
||||
* must go through and close all those first
|
||||
*/
|
||||
lws_vhost_lock(wsi->vhost);
|
||||
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
|
||||
wsi->dll_client_transaction_queue_head.next) {
|
||||
struct lws *w = lws_container_of(d, struct lws,
|
||||
dll_client_transaction_queue);
|
||||
|
||||
__lws_close_free_wsi(w, reason, "trans q leader closing");
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
|
||||
/*
|
||||
* !!! If we are closing, but we have pending pipelined transaction
|
||||
* results we already sent headers for, that's going to destroy sync
|
||||
* for HTTP/1 and leave H2 stream with no live swsi.
|
||||
*
|
||||
* However this is normal if we are being closed because the transaction
|
||||
* queue leader is closing.
|
||||
*/
|
||||
lws_dll_lws_remove(&wsi->dll_client_transaction_queue);
|
||||
lws_vhost_unlock(wsi->vhost);
|
||||
#endif
|
||||
|
||||
/* if we have children, close them first */
|
||||
if (wsi->child_list) {
|
||||
wsi2 = wsi->child_list;
|
||||
|
@ -823,12 +852,11 @@ just_kill_connection:
|
|||
}
|
||||
|
||||
if ((wsi->mode == LWSCM_WSCL_WAITING_SERVER_REPLY ||
|
||||
wsi->mode == LWSCM_WSCL_WAITING_CONNECT) &&
|
||||
!wsi->already_did_cce) {
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
wsi->mode == LWSCM_WSCL_WAITING_CONNECT) &&
|
||||
!wsi->already_did_cce)
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
wsi->user_space, NULL, 0);
|
||||
}
|
||||
|
||||
if (wsi->mode & LWSCM_FLAG_IMPLIES_CALLBACK_CLOSED_CLIENT_HTTP) {
|
||||
const struct lws_protocols *pro = wsi->protocol;
|
||||
|
|
|
@ -800,6 +800,8 @@ enum lws_close_status {
|
|||
connection was closed due to a failure to perform a TLS handshake
|
||||
(e.g., the server certificate can't be verified). */
|
||||
|
||||
LWS_CLOSE_STATUS_CLIENT_TRANSACTION_DONE = 2000,
|
||||
|
||||
/****** add new things just above ---^ ******/
|
||||
|
||||
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY = 9999,
|
||||
|
@ -3266,7 +3268,11 @@ enum lws_client_connect_ssl_connection_flags {
|
|||
LCCSCF_USE_SSL = (1 << 0),
|
||||
LCCSCF_ALLOW_SELFSIGNED = (1 << 1),
|
||||
LCCSCF_SKIP_SERVER_CERT_HOSTNAME_CHECK = (1 << 2),
|
||||
LCCSCF_ALLOW_EXPIRED = (1 << 3)
|
||||
LCCSCF_ALLOW_EXPIRED = (1 << 3),
|
||||
|
||||
LCCSCF_PIPELINE = (1 << 16),
|
||||
/**< Serialize / pipeline multiple client connections
|
||||
* on a single connection where possible. */
|
||||
};
|
||||
|
||||
/** struct lws_client_connect_info - parameters to connect with when using
|
||||
|
@ -3280,7 +3286,7 @@ struct lws_client_connect_info {
|
|||
int port;
|
||||
/**< remote port to connect to */
|
||||
int ssl_connection;
|
||||
/**< nonzero for ssl */
|
||||
/**< 0, or a combination of LCCSCF_ flags */
|
||||
const char *path;
|
||||
/**< uri path */
|
||||
const char *host;
|
||||
|
@ -4556,6 +4562,7 @@ enum pending_timeout {
|
|||
PENDING_TIMEOUT_CLOSE_SEND = 24,
|
||||
PENDING_TIMEOUT_HOLDING_AH = 25,
|
||||
PENDING_TIMEOUT_UDP_IDLE = 26,
|
||||
PENDING_TIMEOUT_CLIENT_CONN_IDLE = 27,
|
||||
|
||||
/****** add new things just above ---^ ******/
|
||||
|
||||
|
@ -5474,6 +5481,8 @@ struct lws_dll_lws { /* typed as struct lws * */
|
|||
struct lws_dll_lws *next;
|
||||
};
|
||||
|
||||
#define lws_dll_is_null(___dll) (!(___dll)->prev && !(___dll)->next)
|
||||
|
||||
static inline void
|
||||
lws_dll_lws_add_front(struct lws_dll_lws *_a, struct lws_dll_lws *_head)
|
||||
{
|
||||
|
|
|
@ -971,6 +971,9 @@ struct lws_vhost {
|
|||
const struct lws_protocol_vhost_options *pvo;
|
||||
const struct lws_protocol_vhost_options *headers;
|
||||
struct lws **same_vh_protocol_list;
|
||||
#if !defined(LWS_NO_CLIENT)
|
||||
struct lws_dll_lws dll_active_client_conns;
|
||||
#endif
|
||||
const char *error_document_404;
|
||||
#ifdef LWS_OPENSSL_SUPPORT
|
||||
lws_tls_ctx *ssl_ctx;
|
||||
|
@ -1875,7 +1878,7 @@ struct lws {
|
|||
#endif
|
||||
const struct lws_protocols *protocol;
|
||||
struct lws **same_vh_protocol_prev, *same_vh_protocol_next;
|
||||
/* we get on the list if either the timeout or the timer is valid */
|
||||
|
||||
struct lws_dll_lws dll_timeout;
|
||||
struct lws_dll_lws dll_hrtimer;
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
|
@ -1887,6 +1890,9 @@ struct lws {
|
|||
unsigned char *preamble_rx;
|
||||
#ifndef LWS_NO_CLIENT
|
||||
struct client_info_stash *stash;
|
||||
struct lws_dll_lws dll_active_client_conns;
|
||||
struct lws_dll_lws dll_client_transaction_queue_head;
|
||||
struct lws_dll_lws dll_client_transaction_queue;
|
||||
#endif
|
||||
void *user_space;
|
||||
void *opaque_parent_data;
|
||||
|
@ -1962,7 +1968,7 @@ struct lws {
|
|||
unsigned int rxflow_will_be_applied:1;
|
||||
unsigned int event_pipe:1;
|
||||
unsigned int on_same_vh_list:1;
|
||||
unsigned int handling_404;
|
||||
unsigned int handling_404:1;
|
||||
|
||||
unsigned int could_have_pending:1; /* detect back-to-back writes */
|
||||
unsigned int outer_will_close:1;
|
||||
|
@ -1975,6 +1981,10 @@ struct lws {
|
|||
unsigned int chunked:1; /* if the clientside connection is chunked */
|
||||
unsigned int client_rx_avail:1;
|
||||
unsigned int client_http_body_pending:1;
|
||||
unsigned int transaction_from_pipeline_queue:1;
|
||||
unsigned int keepalive_active:1;
|
||||
unsigned int keepalive_rejected:1;
|
||||
unsigned int client_pipeline:1;
|
||||
#endif
|
||||
#ifdef LWS_WITH_HTTP_PROXY
|
||||
unsigned int perform_rewrite:1;
|
||||
|
@ -2596,9 +2606,11 @@ lws_rewrite_parse(struct lws_rewrite *r, const unsigned char *in, int in_len);
|
|||
#endif
|
||||
|
||||
#ifndef LWS_NO_CLIENT
|
||||
LWS_EXTERN int lws_client_socket_service(struct lws_context *context,
|
||||
struct lws *wsi,
|
||||
struct lws_pollfd *pollfd);
|
||||
LWS_EXTERN int lws_client_socket_service(struct lws *wsi,
|
||||
struct lws_pollfd *pollfd,
|
||||
struct lws *wsi_conn);
|
||||
LWS_EXTERN struct lws *
|
||||
lws_client_wsi_effective(struct lws *wsi);
|
||||
LWS_EXTERN int LWS_WARN_UNUSED_RESULT
|
||||
lws_http_transaction_completed_client(struct lws *wsi);
|
||||
#ifdef LWS_OPENSSL_SUPPORT
|
||||
|
|
|
@ -1018,14 +1018,18 @@ spin_chunks:
|
|||
lws_rewrite_parse(wsi->rw, (unsigned char *)*buf, n);
|
||||
else
|
||||
#endif
|
||||
if (user_callback_handle_rxflow(wsi->protocol->callback,
|
||||
wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
|
||||
wsi->user_space, *buf, n)) {
|
||||
{
|
||||
struct lws *wsi_eff = lws_client_wsi_effective(wsi);
|
||||
|
||||
if (user_callback_handle_rxflow(wsi_eff->protocol->callback,
|
||||
wsi_eff, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
|
||||
wsi_eff->user_space, *buf, n)) {
|
||||
lwsl_debug("%s: RECEIVE_CLIENT_HTTP_READ returned -1\n",
|
||||
__func__);
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (wsi->chunked && wsi->chunk_remaining) {
|
||||
(*buf) += n;
|
||||
|
@ -1050,12 +1054,6 @@ spin_chunks:
|
|||
return 0;
|
||||
|
||||
completed:
|
||||
if (user_callback_handle_rxflow(wsi->protocol->callback,
|
||||
wsi, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
|
||||
wsi->user_space, NULL, 0)) {
|
||||
lwsl_debug("%s: Completed call returned nonzero (mode %d)\n", __func__, wsi->mode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (lws_http_transaction_completed_client(wsi)) {
|
||||
lwsl_notice("%s: transaction completed says -1\n", __func__);
|
||||
|
@ -1884,7 +1882,7 @@ drain:
|
|||
goto close_and_handled;
|
||||
}
|
||||
|
||||
n = lws_client_socket_service(context, wsi, pollfd);
|
||||
n = lws_client_socket_service(wsi, pollfd, NULL);
|
||||
if (n)
|
||||
return 1;
|
||||
goto handled;
|
||||
|
|
|
@ -12,6 +12,19 @@
|
|||
* Currently that takes the form of 8 individual simultaneous tcp and
|
||||
* tls connections, which happen concurrently. Notice that the ordering
|
||||
* of the returned payload may be intermingled for the various connections.
|
||||
*
|
||||
* By default the connections happen all together at the beginning and operate
|
||||
* concurrently, which is fast. However this is resource-intenstive, there are
|
||||
* 8 tcp connections, 8 tls tunnels on both the client and server. You can
|
||||
* instead opt to have the connections happen one after the other inside a
|
||||
* single tcp connection and tls tunnel, using HTTP/1.1 pipelining. To be
|
||||
* eligible to be pipelined on another existing connection to the same server,
|
||||
* the client connection must have the LCCSCF_PIPELINE flag on its
|
||||
* info.ssl_connection member (this is independent of whether the connection
|
||||
* is in ssl mode or not).
|
||||
*
|
||||
* Pipelined connections are slower (2.3s vs 1.6s for 8 connections), since the
|
||||
* transfers are serialized, but it is much less resource-intensive.
|
||||
*/
|
||||
|
||||
#include <libwebsockets.h>
|
||||
|
@ -84,7 +97,7 @@ callback_http(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
if (++completed == COUNT) {
|
||||
lwsl_user("Done: failed: %d\n", failed);
|
||||
interrupted = 1;
|
||||
/* so we exit without the poll wait */
|
||||
/* so we exit immediately */
|
||||
lws_cancel_service(lws_get_context(wsi));
|
||||
}
|
||||
break;
|
||||
|
@ -97,12 +110,7 @@ callback_http(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
}
|
||||
|
||||
static const struct lws_protocols protocols[] = {
|
||||
{
|
||||
"http",
|
||||
callback_http,
|
||||
0,
|
||||
0,
|
||||
},
|
||||
{ "http", callback_http, 0, 0, },
|
||||
{ NULL, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -159,7 +167,8 @@ int main(int argc, char **argv)
|
|||
i.path = "/";
|
||||
i.host = i.address;
|
||||
i.origin = i.address;
|
||||
i.ssl_connection = 1;
|
||||
i.ssl_connection = LCCSCF_PIPELINE /* enables http1.1 pipelining */ |
|
||||
LCCSCF_USE_SSL;
|
||||
i.method = "GET";
|
||||
|
||||
i.protocol = protocols[0].name;
|
||||
|
|
Loading…
Add table
Reference in a new issue