1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

fix minimal-server-tls

This commit is contained in:
Andy Green 2018-04-27 09:13:23 +08:00
parent c9fb42bb8f
commit 82adc07c0a
5 changed files with 46 additions and 41 deletions

View file

@ -556,25 +556,27 @@ __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *
* if we have wsi in our transaction queue, if we are closing we
* must go through and close all those first
*/
lws_vhost_lock(wsi->vhost);
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
wsi->dll_client_transaction_queue_head.next) {
struct lws *w = lws_container_of(d, struct lws,
dll_client_transaction_queue);
if (wsi->vhost) {
lws_vhost_lock(wsi->vhost);
lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
wsi->dll_client_transaction_queue_head.next) {
struct lws *w = lws_container_of(d, struct lws,
dll_client_transaction_queue);
__lws_close_free_wsi(w, reason, "trans q leader closing");
} lws_end_foreach_dll_safe(d, d1);
__lws_close_free_wsi(w, reason, "trans q leader closing");
} lws_end_foreach_dll_safe(d, d1);
/*
* !!! If we are closing, but we have pending pipelined transaction
* results we already sent headers for, that's going to destroy sync
* for HTTP/1 and leave H2 stream with no live swsi.
*
* However this is normal if we are being closed because the transaction
* queue leader is closing.
*/
lws_dll_lws_remove(&wsi->dll_client_transaction_queue);
lws_vhost_unlock(wsi->vhost);
/*
* !!! If we are closing, but we have pending pipelined transaction
* results we already sent headers for, that's going to destroy sync
* for HTTP/1 and leave H2 stream with no live swsi.
*
* However this is normal if we are being closed because the transaction
* queue leader is closing.
*/
lws_dll_lws_remove(&wsi->dll_client_transaction_queue);
lws_vhost_unlock(wsi->vhost);
}
#endif
/* if we have children, close them first */

View file

@ -328,24 +328,27 @@ __remove_wsi_socket_from_fds(struct lws *wsi)
__func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
pt->fds_count, pt->fds[pt->fds_count].fd);
/* have the last guy take up the now vacant slot */
pt->fds[m] = pt->fds[pt->fds_count - 1];
/* this decrements pt->fds_count */
lws_plat_delete_socket_from_fds(context, wsi, m);
v = (int) pt->fds[m].fd;
/* end guy's "position in fds table" is now the deletion guy's old one */
end_wsi = wsi_from_fd(context, v);
if (!end_wsi) {
lwsl_err("no wsi found for fd %d at pos %d, pt->fds_count=%d\n",
(int)pt->fds[m].fd, m, pt->fds_count);
assert(0);
} else
end_wsi->position_in_fds_table = m;
if (m != LWS_SOCK_INVALID) {
/* deletion guy's lws_lookup entry needs nuking */
delete_from_fd(context, wsi->desc.sockfd);
/* removed wsi has no position any more */
wsi->position_in_fds_table = -1;
/* have the last guy take up the now vacant slot */
pt->fds[m] = pt->fds[pt->fds_count - 1];
/* this decrements pt->fds_count */
lws_plat_delete_socket_from_fds(context, wsi, m);
v = (int) pt->fds[m].fd;
/* end guy's "position in fds table" is now the deletion guy's old one */
end_wsi = wsi_from_fd(context, v);
if (!end_wsi) {
lwsl_err("no wsi found for fd %d at pos %d, pt->fds_count=%d\n",
(int)pt->fds[m].fd, m, pt->fds_count);
assert(0);
} else
end_wsi->position_in_fds_table = m;
/* deletion guy's lws_lookup entry needs nuking */
delete_from_fd(context, wsi->desc.sockfd);
/* removed wsi has no position any more */
wsi->position_in_fds_table = -1;
}
/* remove also from external POLL support via protocol 0 */
if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->vhost &&

View file

@ -117,7 +117,7 @@ lws_read_h1(struct lws *wsi, unsigned char *buf, lws_filepos_t len)
case LRS_BODY:
http_postbody:
lwsl_notice("%s: http post body: remain %d\n", __func__,
lwsl_debug("%s: http post body: remain %d\n", __func__,
(int)wsi->http.rx_content_remain);
while (len && wsi->http.rx_content_remain) {
/* Copy as much as possible, up to the limit of:

View file

@ -51,15 +51,16 @@ lws_client_wsi_effective(struct lws *wsi)
/*
* return self or the guy we are queued under
*
* REQUIRES VHOST LOCK HELD
*/
struct lws *
lws_client_wsi_master(struct lws *wsi)
static struct lws *
_lws_client_wsi_master(struct lws *wsi)
{
struct lws *wsi_eff = wsi;
struct lws_dll_lws *d;
lws_vhost_lock(wsi->vhost);
d = wsi->dll_client_transaction_queue.prev;
while (d) {
wsi_eff = lws_container_of(d, struct lws,
@ -67,7 +68,6 @@ lws_client_wsi_master(struct lws *wsi)
d = d->prev;
}
lws_vhost_unlock(wsi->vhost);
return wsi_eff;
}
@ -372,7 +372,7 @@ start_ws_handshake:
/* send our request to the server */
lws_latency_pre(context, wsi);
w = lws_client_wsi_master(wsi);
w = _lws_client_wsi_master(wsi);
lwsl_info("%s: HANDSHAKE2: %p: sending headers on %p (wsistate 0x%x 0x%x)\n",
__func__, wsi, w, wsi->wsistate, w->wsistate);

View file

@ -162,7 +162,7 @@ file_upload_cb(void *data, const char *name, const char *filename,
return 1;
n = write((int)(long long)pss->post_fd, buf, len);
lwsl_notice("%s: write %d says %d\n", __func__, len, n);
lwsl_info("%s: write %d says %d\n", __func__, len, n);
}
if (state == LWS_UFS_CONTENT)
break;