2013-01-16 12:21:29 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
|
2013-01-16 12:21:29 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
2013-01-16 12:21:29 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
2013-01-16 12:21:29 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
2013-01-16 12:21:29 +08:00
|
|
|
*/
|
|
|
|
|
2019-08-15 10:49:52 +01:00
|
|
|
#include "private-lib-core.h"
|
2013-01-16 12:21:29 +08:00
|
|
|
|
2013-02-23 10:50:10 +08:00
|
|
|
/*
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
* notice this returns number of bytes consumed, or -1
|
2013-02-23 10:50:10 +08:00
|
|
|
*/
|
2019-08-23 16:10:36 +01:00
|
|
|
int
|
|
|
|
lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
|
2013-01-16 12:21:29 +08:00
|
|
|
{
|
2015-12-17 18:25:25 +08:00
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
2013-12-10 21:15:00 +08:00
|
|
|
size_t real_len = len;
|
2018-09-02 14:35:37 +08:00
|
|
|
unsigned int n, m;
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2020-07-27 10:03:12 +01:00
|
|
|
/*
|
2020-07-30 16:23:59 +01:00
|
|
|
* If you're looking to dump data being sent down the tls tunnel, see
|
|
|
|
* lws_ssl_capable_write() in lib/tls/mbedtls/mbedtls-ssl.c or
|
|
|
|
* lib/tls/openssl/openssl-ssl.c.
|
|
|
|
*
|
|
|
|
* There's also a corresponding lws_ssl_capable_read() in those files
|
|
|
|
* where you can enable a dump of decrypted data as soon as it was
|
|
|
|
* read.
|
2020-07-27 10:03:12 +01:00
|
|
|
*/
|
|
|
|
|
2014-04-10 14:25:24 +08:00
|
|
|
/* just ignore sends after we cleared the truncation buffer */
|
2018-08-20 12:02:26 +08:00
|
|
|
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE &&
|
http: compression methods
Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION.
This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist
outside that context.
Currently provides 'deflate' and 'br' compression methods for server side only.
'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in
your distro already) and dev package.
Other compression methods can be added nicely using an ops struct.
The built-in file serving stuff will use this is the client says he can handle it, and the
mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of
Javascript.
zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this
is only useful on relatively strong servers with lots of memory. However for some usecases
where you are serving a lot of css and js assets, it's a nice help.
The patch performs special treatment for http/1.1 pipelining, since the compression is
performed on the fly the compressed content-length is not known until the end. So for h1
only, chunked transfer-encoding is automatically added so pipelining can continue of the
connection.
For h2 the chunking is neither supported nor required, so it "just works".
User code can also request to add a compression transform before the reply headers were
sent using the new api
LWS_VISIBLE int
lws_http_compression_apply(struct lws *wsi, const char *name,
unsigned char **p, unsigned char *end, char decomp);
... this allows transparent compression of dynamically generated HTTP. The requested
compression (eg, "deflate") is only applied if the client headers indicated it was
supported, otherwise it's a NOP.
Name may be NULL in which case the first compression method in the internal table at
stream.c that is mentioned as acceptable by the client will be used.
NOTE: the compression translation, same as h2 support, relies on the user code using
LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal
lws fileserving code already does this.
2018-09-02 14:43:05 +08:00
|
|
|
!lws_has_buffered_out(wsi)
|
|
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|
|
&& !wsi->http.comp_ctx.may_have_more
|
|
|
|
#endif
|
|
|
|
)
|
2017-10-25 08:00:23 +08:00
|
|
|
return (int)len;
|
2013-12-09 14:16:17 +08:00
|
|
|
|
2018-08-20 12:02:26 +08:00
|
|
|
if (buf && lws_has_buffered_out(wsi)) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "** prot: %s, incr buflist_out by %lu",
|
|
|
|
wsi->a.protocol->name, (unsigned long)len);
|
2016-04-14 15:07:44 +08:00
|
|
|
|
2018-08-20 12:02:26 +08:00
|
|
|
/*
|
|
|
|
* already buflist ahead of this, add it on the tail of the
|
|
|
|
* buflist, then ignore it for now and act like we're flushing
|
|
|
|
* the buflist...
|
|
|
|
*/
|
|
|
|
|
2018-12-13 20:05:12 +08:00
|
|
|
if (lws_buflist_append_segment(&wsi->buflist_out, buf, len))
|
|
|
|
return -1;
|
2018-08-20 12:02:26 +08:00
|
|
|
|
|
|
|
buf = NULL;
|
|
|
|
len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wsi->buflist_out) {
|
|
|
|
/* we have to drain the earliest buflist_out stuff first */
|
|
|
|
|
|
|
|
len = lws_buflist_next_segment_len(&wsi->buflist_out, &buf);
|
|
|
|
real_len = len;
|
http: compression methods
Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION.
This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist
outside that context.
Currently provides 'deflate' and 'br' compression methods for server side only.
'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in
your distro already) and dev package.
Other compression methods can be added nicely using an ops struct.
The built-in file serving stuff will use this is the client says he can handle it, and the
mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of
Javascript.
zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this
is only useful on relatively strong servers with lots of memory. However for some usecases
where you are serving a lot of css and js assets, it's a nice help.
The patch performs special treatment for http/1.1 pipelining, since the compression is
performed on the fly the compressed content-length is not known until the end. So for h1
only, chunked transfer-encoding is automatically added so pipelining can continue of the
connection.
For h2 the chunking is neither supported nor required, so it "just works".
User code can also request to add a compression transform before the reply headers were
sent using the new api
LWS_VISIBLE int
lws_http_compression_apply(struct lws *wsi, const char *name,
unsigned char **p, unsigned char *end, char decomp);
... this allows transparent compression of dynamically generated HTTP. The requested
compression (eg, "deflate") is only applied if the client headers indicated it was
supported, otherwise it's a NOP.
Name may be NULL in which case the first compression method in the internal table at
stream.c that is mentioned as acceptable by the client will be used.
NOTE: the compression translation, same as h2 support, relies on the user code using
LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal
lws fileserving code already does this.
2018-09-02 14:43:05 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "draining %d", (int)len);
|
2013-12-09 14:16:17 +08:00
|
|
|
}
|
2018-04-20 10:33:23 +08:00
|
|
|
|
2018-08-23 11:48:17 +08:00
|
|
|
if (!len || !buf)
|
2018-08-20 12:02:26 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-12-23 11:31:57 +00:00
|
|
|
if (!wsi->mux_substream && !lws_socket_is_valid(wsi->desc.sockfd))
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_err(wsi, "invalid sock");
|
2013-01-16 12:21:29 +08:00
|
|
|
|
2016-03-18 23:55:59 +08:00
|
|
|
/* limit sending */
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
if (wsi->a.protocol->tx_packet_size)
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (unsigned int)wsi->a.protocol->tx_packet_size;
|
2017-03-16 10:46:31 +08:00
|
|
|
else {
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (unsigned int)wsi->a.protocol->rx_buffer_size;
|
2017-03-16 10:46:31 +08:00
|
|
|
if (!n)
|
|
|
|
n = context->pt_serv_buf_size;
|
|
|
|
}
|
2016-05-15 08:59:48 +08:00
|
|
|
n += LWS_PRE + 4;
|
2016-03-18 23:55:59 +08:00
|
|
|
if (n > len)
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (unsigned int)len;
|
2016-03-18 15:02:27 +08:00
|
|
|
|
2015-12-06 05:52:09 +08:00
|
|
|
/* nope, send it on the socket directly */
|
2018-09-02 14:35:37 +08:00
|
|
|
|
2021-03-16 13:32:05 +00:00
|
|
|
if (lws_fi(&wsi->fic, "sendfail"))
|
|
|
|
m = (unsigned int)LWS_SSL_CAPABLE_ERROR;
|
|
|
|
else
|
|
|
|
m = (unsigned int)lws_ssl_capable_write(wsi, buf, n);
|
|
|
|
|
2021-06-18 07:28:23 +01:00
|
|
|
lwsl_wsi_info(wsi, "ssl_capable_write (%d) says %d", n, m);
|
2014-04-10 14:08:10 +08:00
|
|
|
|
2017-12-07 07:20:47 +08:00
|
|
|
/* something got written, it can have been truncated now */
|
|
|
|
wsi->could_have_pending = 1;
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
switch ((int)m) {
|
2014-04-06 06:26:35 +01:00
|
|
|
case LWS_SSL_CAPABLE_ERROR:
|
2014-10-16 08:23:46 +08:00
|
|
|
/* we're going to close, let close know sends aren't possible */
|
|
|
|
wsi->socket_is_permanently_unusable = 1;
|
2014-04-06 06:26:35 +01:00
|
|
|
return -1;
|
|
|
|
case LWS_SSL_CAPABLE_MORE_SERVICE:
|
2017-12-07 07:20:47 +08:00
|
|
|
/*
|
|
|
|
* nothing got sent, not fatal. Retry the whole thing later,
|
|
|
|
* ie, implying treat it was a truncated send so it gets
|
|
|
|
* retried
|
|
|
|
*/
|
2018-09-02 14:35:37 +08:00
|
|
|
m = 0;
|
2014-04-10 14:08:10 +08:00
|
|
|
break;
|
2013-01-16 12:21:29 +08:00
|
|
|
}
|
2018-04-20 10:33:23 +08:00
|
|
|
|
2019-07-13 11:54:40 -07:00
|
|
|
if ((int)m < 0)
|
|
|
|
m = 0;
|
|
|
|
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
/*
|
2018-08-20 12:02:26 +08:00
|
|
|
* we were sending this from buflist_out? Then not sending everything
|
|
|
|
* is a small matter of advancing ourselves only by the amount we did
|
|
|
|
* send in the buflist.
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
*/
|
2018-08-20 12:02:26 +08:00
|
|
|
if (lws_has_buffered_out(wsi)) {
|
2018-09-02 14:35:37 +08:00
|
|
|
if (m) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "partial adv %d (vs %ld)",
|
|
|
|
m, (long)real_len);
|
2018-09-02 14:35:37 +08:00
|
|
|
lws_buflist_use_segment(&wsi->buflist_out, m);
|
2018-08-25 12:27:00 +08:00
|
|
|
}
|
2018-08-20 12:02:26 +08:00
|
|
|
|
|
|
|
if (!lws_has_buffered_out(wsi)) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "buflist_out flushed");
|
2018-08-20 12:02:26 +08:00
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
m = (unsigned int)real_len;
|
2018-04-02 11:55:17 +08:00
|
|
|
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "*signalling to close now");
|
2014-04-10 14:25:24 +08:00
|
|
|
return -1; /* retry closing now */
|
2014-04-10 17:06:59 +08:00
|
|
|
}
|
2018-06-16 10:38:17 +08:00
|
|
|
|
2018-12-01 06:45:23 +08:00
|
|
|
if (wsi->close_when_buffered_out_drained) {
|
|
|
|
wsi->close_when_buffered_out_drained = 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-06-16 10:38:17 +08:00
|
|
|
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
2019-08-18 05:04:15 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2018-06-16 10:38:17 +08:00
|
|
|
if (wsi->http.deferred_transaction_completed) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_notice(wsi, "partial completed, doing "
|
|
|
|
"deferred transaction completed");
|
2018-06-16 10:38:17 +08:00
|
|
|
wsi->http.deferred_transaction_completed = 0;
|
2018-09-20 07:06:51 +08:00
|
|
|
return lws_http_transaction_completed(wsi) ?
|
|
|
|
-1 : (int)real_len;
|
2018-06-16 10:38:17 +08:00
|
|
|
}
|
|
|
|
#endif
|
2020-03-04 17:39:11 +08:00
|
|
|
#endif
|
|
|
|
#if defined(LWS_ROLE_WS)
|
|
|
|
/* Since buflist_out flushed, we're not inside a frame any more */
|
|
|
|
if (wsi->ws)
|
|
|
|
wsi->ws->inside_frame = 0;
|
2018-06-16 10:38:17 +08:00
|
|
|
#endif
|
2014-04-10 11:23:18 +08:00
|
|
|
}
|
|
|
|
/* always callback on writeable */
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
return (int)m;
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
}
|
|
|
|
|
http: compression methods
Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION.
This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist
outside that context.
Currently provides 'deflate' and 'br' compression methods for server side only.
'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in
your distro already) and dev package.
Other compression methods can be added nicely using an ops struct.
The built-in file serving stuff will use this is the client says he can handle it, and the
mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of
Javascript.
zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this
is only useful on relatively strong servers with lots of memory. However for some usecases
where you are serving a lot of css and js assets, it's a nice help.
The patch performs special treatment for http/1.1 pipelining, since the compression is
performed on the fly the compressed content-length is not known until the end. So for h1
only, chunked transfer-encoding is automatically added so pipelining can continue of the
connection.
For h2 the chunking is neither supported nor required, so it "just works".
User code can also request to add a compression transform before the reply headers were
sent using the new api
LWS_VISIBLE int
lws_http_compression_apply(struct lws *wsi, const char *name,
unsigned char **p, unsigned char *end, char decomp);
... this allows transparent compression of dynamically generated HTTP. The requested
compression (eg, "deflate") is only applied if the client headers indicated it was
supported, otherwise it's a NOP.
Name may be NULL in which case the first compression method in the internal table at
stream.c that is mentioned as acceptable by the client will be used.
NOTE: the compression translation, same as h2 support, relies on the user code using
LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal
lws fileserving code already does this.
2018-09-02 14:43:05 +08:00
|
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|
|
if (wsi->http.comp_ctx.may_have_more)
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
#endif
|
|
|
|
|
2018-09-02 14:35:37 +08:00
|
|
|
if (m == real_len)
|
2014-04-10 14:08:10 +08:00
|
|
|
/* what we just sent went out cleanly */
|
2020-12-12 06:21:40 +00:00
|
|
|
return (int)m;
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
/*
|
2018-08-20 12:02:26 +08:00
|
|
|
* We were not able to send everything... and we were not sending from
|
|
|
|
* an existing buflist_out. So we are starting a fresh buflist_out, by
|
|
|
|
* buffering the unsent remainder on it.
|
|
|
|
* (it will get first priority next time the socket is writable).
|
2014-04-10 14:08:10 +08:00
|
|
|
*/
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "new partial sent %d from %lu total",
|
|
|
|
m, (unsigned long)real_len);
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
|
2019-07-13 12:01:39 -07:00
|
|
|
if (lws_buflist_append_segment(&wsi->buflist_out, buf + m,
|
|
|
|
real_len - m) < 0)
|
|
|
|
return -1;
|
2018-08-20 12:02:26 +08:00
|
|
|
|
2019-09-30 09:42:38 -07:00
|
|
|
#if defined(LWS_WITH_UDP)
|
2020-10-14 08:55:40 +01:00
|
|
|
if (lws_wsi_is_udp(wsi))
|
2018-03-24 08:07:00 +08:00
|
|
|
/* stash original destination for fulfilling UDP partials */
|
2020-10-14 08:55:40 +01:00
|
|
|
wsi->udp->sa46_pending = wsi->udp->sa46;
|
2018-04-12 15:56:38 +08:00
|
|
|
#endif
|
2018-03-24 08:07:00 +08:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
/* since something buffered, force it to get another chance to send */
|
2015-12-16 18:19:08 +08:00
|
|
|
lws_callback_on_writable(wsi);
|
add explicit error for partial send
This patch adds code to handle the situation that a prepared user buffer could not all be sent on the
socket at once. There are two kinds of situation to handle
1) User code handles it: The connection only has extensions active that do not rewrite the buffer.
In this case, the patch caused libwebsocket_write() to simply return the amount of user buffer that
was consumed (this is specifically the amount of user buffer used in sending what was accepted,
nothing else). So user code can just advance its buffer that much and resume sending when the socket
is writable again. This continues the frame rather than starting a new one or new fragment.
2) The connections has extensions active which actually send something quite different than what the
user buffer contains, for example a compression extension. In this case, libwebsockets will dynamically
malloc a buffer to contain a copy of the remaining unsent data, request notifiction when writeable again,
and automatically spill and free this buffer with the highest priority before passing on the writable
notification to anything else. For this situation, the call to write will return that it used the
whole user buffer, even though part is still rebuffered.
This patch should enable libwebsockets to detect the two cases and take the appropriate action.
There are also two choices for user code to deal with partial sends.
1) Leave the no_buffer_all_partial_tx member in the protocol struct at zero. The library will dyamically
buffer anything you send that did not get completely written to the socket, and automatically spill it next
time the socket is writable. You can use this method if your sent frames are relatvely small and unlikely to get
truncated anyway.
2) Set the no_buffer_all_partial_tx member in the protocol struct. User code now needs to take care of the
return value from libwebsocket_write() and deal with resending the remainder if not all of the requested amount
got sent. You should use this method if you are sending large messages and want to maximize throughput and efficiency.
Since the new member no_buffer_all_partial_tx will be zero by default, this patch will auto-rebuffer any
partial sends by default. That's good for most cases but if you attempt to send large blocks, make sure you
follow option 2) above.
Signed-off-by: Andy Green <andy.green@linaro.org>
2013-10-17 08:09:19 +08:00
|
|
|
|
2017-10-25 08:00:23 +08:00
|
|
|
return (int)real_len;
|
2013-01-16 12:21:29 +08:00
|
|
|
}
|
|
|
|
|
2019-08-23 16:10:36 +01:00
|
|
|
int
|
|
|
|
lws_write(struct lws *wsi, unsigned char *buf, size_t len,
|
|
|
|
enum lws_write_protocol wp)
|
2013-01-16 12:21:29 +08:00
|
|
|
{
|
2019-08-23 16:10:36 +01:00
|
|
|
int m;
|
2013-01-16 12:21:29 +08:00
|
|
|
|
2017-05-08 10:49:10 +08:00
|
|
|
if ((int)len < 0) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_err(wsi, "suspicious len int %d, ulong %lu",
|
|
|
|
(int)len, (unsigned long)len);
|
2017-05-08 10:49:10 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-04-15 12:00:23 +08:00
|
|
|
#ifdef LWS_WITH_ACCESS_LOG
|
2018-04-27 19:16:50 +08:00
|
|
|
wsi->http.access_log.sent += len;
|
2016-04-15 12:00:23 +08:00
|
|
|
#endif
|
|
|
|
|
2018-04-11 13:39:42 +08:00
|
|
|
assert(wsi->role_ops);
|
roles: compress role ops structs
role ops are usually only sparsely filled, there are currently 20
function pointers but several roles only fill in two. No single
role has more than 14 of the ops. On a 32/64 bit build this part
of the ops struct takes a fixed 80 / 160 bytes then.
First reduce the type of the callback reason part from uint16_t to
uint8_t, this saves 12 bytes unconditionally.
Change to a separate function pointer array with a nybble index
array, it costs 10 bytes for the index and a pointer to the
separate array, for 32-bit the cost is
2 + (4 x ops_used)
and for 64-bit
6 + (8 x ops_used)
for 2 x ops_used it means 32-bit: 10 vs 80 / 64-bit: 22 vs 160
For a typical system with h1 (9), h2 (14), listen (2), netlink (2),
pipe (1), raw_skt (3), ws (12), == 43 ops_used out of 140, it means
the .rodata for this reduced from 32-bit: 560 -> 174 (386 byte
saving) and 64-bit: 1120 -> 350 (770 byte saving)
This doesn't account for the changed function ops calling code, two
ways were tried, a preprocessor macro and explicit functions
For an x86_64 gcc 10 build with most options, release mode,
.text + .rodata
before patch: 553282
accessor macro: 552714 (568 byte saving)
accessor functions: 553674 (392 bytes worse than without patch)
therefore we went with the macros
2020-10-19 13:55:21 +01:00
|
|
|
|
|
|
|
if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol))
|
2021-01-06 15:08:22 +00:00
|
|
|
m = lws_issue_raw(wsi, buf, len);
|
|
|
|
else
|
|
|
|
m = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol).
|
|
|
|
write_role_protocol(wsi, buf, len, &wp);
|
2019-08-23 16:10:36 +01:00
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS)
|
|
|
|
if (wsi->a.vhost)
|
|
|
|
lws_metric_event(wsi->a.vhost->mt_traffic_tx, (char)
|
|
|
|
(m < 0 ? METRES_NOGO : METRES_GO), len);
|
2019-08-23 16:10:36 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return m;
|
2013-01-16 12:21:29 +08:00
|
|
|
}
|
|
|
|
|
2019-08-23 16:10:36 +01:00
|
|
|
int
|
2020-12-12 06:21:40 +00:00
|
|
|
lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, size_t len)
|
2014-04-06 06:26:35 +01:00
|
|
|
{
|
2021-01-18 14:20:37 +00:00
|
|
|
int n = 0, en;
|
2017-05-07 10:02:03 +08:00
|
|
|
|
2019-03-21 06:47:54 +08:00
|
|
|
errno = 0;
|
2019-09-30 09:42:38 -07:00
|
|
|
#if defined(LWS_WITH_UDP)
|
2018-03-24 08:07:00 +08:00
|
|
|
if (lws_wsi_is_udp(wsi)) {
|
2020-10-14 08:55:40 +01:00
|
|
|
socklen_t slt = sizeof(wsi->udp->sa46);
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)recvfrom(wsi->desc.sockfd, (char *)buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
|
|
|
len, 0,
|
2020-10-14 08:55:40 +01:00
|
|
|
sa46_sockaddr(&wsi->udp->sa46), &slt);
|
2018-03-24 08:07:00 +08:00
|
|
|
} else
|
2019-09-30 09:42:38 -07:00
|
|
|
#endif
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)recv(wsi->desc.sockfd, (char *)buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
|
|
|
len, 0);
|
2021-01-18 14:20:37 +00:00
|
|
|
en = LWS_ERRNO;
|
2016-05-05 09:06:09 +08:00
|
|
|
if (n >= 0) {
|
2018-09-04 08:06:46 +08:00
|
|
|
|
2021-03-19 15:30:43 +00:00
|
|
|
if (!n && wsi->unix_skt)
|
|
|
|
goto do_err;
|
2018-09-04 08:06:46 +08:00
|
|
|
|
2019-03-21 06:47:54 +08:00
|
|
|
/*
|
|
|
|
* See https://libwebsockets.org/
|
|
|
|
* pipermail/libwebsockets/2019-March/007857.html
|
|
|
|
*/
|
2021-01-06 15:08:22 +00:00
|
|
|
if (!n && !wsi->unix_skt)
|
|
|
|
goto do_err;
|
2019-03-21 06:47:54 +08:00
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
#if defined(LWS_WITH_SYS_METRICS) && defined(LWS_WITH_SERVER)
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
if (wsi->a.vhost)
|
2021-01-06 15:08:22 +00:00
|
|
|
lws_metric_event(wsi->a.vhost->mt_traffic_rx,
|
|
|
|
METRES_GO /* rx */, (unsigned int)n);
|
2019-08-18 06:29:34 +01:00
|
|
|
#endif
|
2018-03-24 08:07:00 +08:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
return n;
|
2016-04-15 14:01:29 +08:00
|
|
|
}
|
2018-04-11 13:39:42 +08:00
|
|
|
|
2021-01-18 14:20:37 +00:00
|
|
|
if (en == LWS_EAGAIN ||
|
|
|
|
en == LWS_EWOULDBLOCK ||
|
|
|
|
en == LWS_EINTR)
|
2015-10-15 21:21:06 +08:00
|
|
|
return LWS_SSL_CAPABLE_MORE_SERVICE;
|
2018-04-11 13:39:42 +08:00
|
|
|
|
2021-01-06 15:08:22 +00:00
|
|
|
do_err:
|
|
|
|
#if defined(LWS_WITH_SYS_METRICS) && defined(LWS_WITH_SERVER)
|
|
|
|
if (wsi->a.vhost)
|
|
|
|
lws_metric_event(wsi->a.vhost->mt_traffic_rx, METRES_NOGO, 0u);
|
|
|
|
#endif
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "error on reading from skt : %d, errno %d", n, en);
|
2021-01-18 14:20:37 +00:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
return LWS_SSL_CAPABLE_ERROR;
|
2014-04-06 06:26:35 +01:00
|
|
|
}
|
|
|
|
|
2019-08-23 16:10:36 +01:00
|
|
|
int
|
2020-12-12 06:21:40 +00:00
|
|
|
lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, size_t len)
|
2014-04-06 06:26:35 +01:00
|
|
|
{
|
2015-11-08 12:10:26 +08:00
|
|
|
int n = 0;
|
2019-01-11 17:14:04 +08:00
|
|
|
#if defined(LWS_PLAT_OPTEE)
|
2019-01-11 13:14:32 +08:00
|
|
|
ssize_t send(int sockfd, const void *buf, size_t len, int flags);
|
2019-01-11 17:14:04 +08:00
|
|
|
#endif
|
2015-11-02 20:34:12 +08:00
|
|
|
|
2019-09-30 09:42:38 -07:00
|
|
|
#if defined(LWS_WITH_UDP)
|
2018-03-24 08:07:00 +08:00
|
|
|
if (lws_wsi_is_udp(wsi)) {
|
2021-03-16 13:32:05 +00:00
|
|
|
|
|
|
|
if (lws_fi(&wsi->fic, "udp_tx_loss")) {
|
|
|
|
/* pretend it was sent */
|
|
|
|
n = (int)(ssize_t)len;
|
|
|
|
goto post_send;
|
2019-09-08 08:08:55 +01:00
|
|
|
}
|
2020-09-20 09:14:46 +01:00
|
|
|
|
2018-08-20 12:02:26 +08:00
|
|
|
if (lws_has_buffered_out(wsi))
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)sendto(wsi->desc.sockfd, (const char *)buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
2020-10-14 08:55:40 +01:00
|
|
|
len, 0, sa46_sockaddr(&wsi->udp->sa46_pending),
|
|
|
|
sa46_socklen(&wsi->udp->sa46_pending));
|
2018-03-24 08:07:00 +08:00
|
|
|
else
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)sendto(wsi->desc.sockfd, (const char *)buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
2020-10-14 08:55:40 +01:00
|
|
|
len, 0, sa46_sockaddr(&wsi->udp->sa46),
|
|
|
|
sa46_socklen(&wsi->udp->sa46));
|
2018-03-24 08:07:00 +08:00
|
|
|
} else
|
2019-09-30 09:42:38 -07:00
|
|
|
#endif
|
2019-12-08 16:34:13 +00:00
|
|
|
if (wsi->role_ops->file_handle)
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)write((int)(lws_intptr_t)wsi->desc.filefd, buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
|
|
|
len);
|
2019-12-08 16:34:13 +00:00
|
|
|
else
|
2020-12-12 06:21:40 +00:00
|
|
|
n = (int)send(wsi->desc.sockfd, (char *)buf,
|
|
|
|
#if defined(WIN32)
|
|
|
|
(int)
|
|
|
|
#endif
|
|
|
|
len, MSG_NOSIGNAL);
|
2016-01-27 08:50:31 +08:00
|
|
|
// lwsl_info("%s: sent len %d result %d", __func__, len, n);
|
2019-09-08 08:08:55 +01:00
|
|
|
|
2019-09-30 09:42:38 -07:00
|
|
|
#if defined(LWS_WITH_UDP)
|
2019-09-08 08:08:55 +01:00
|
|
|
post_send:
|
2019-09-30 09:42:38 -07:00
|
|
|
#endif
|
2014-04-10 14:08:10 +08:00
|
|
|
if (n >= 0)
|
|
|
|
return n;
|
|
|
|
|
|
|
|
if (LWS_ERRNO == LWS_EAGAIN ||
|
|
|
|
LWS_ERRNO == LWS_EWOULDBLOCK ||
|
|
|
|
LWS_ERRNO == LWS_EINTR) {
|
2017-02-18 17:26:40 +08:00
|
|
|
if (LWS_ERRNO == LWS_EWOULDBLOCK) {
|
2014-04-10 14:08:10 +08:00
|
|
|
lws_set_blocking_send(wsi);
|
2017-02-18 17:26:40 +08:00
|
|
|
}
|
2014-04-06 06:26:35 +01:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
return LWS_SSL_CAPABLE_MORE_SERVICE;
|
|
|
|
}
|
2015-12-14 08:52:03 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "ERROR writing len %d to skt fd %d err %d / errno %d",
|
|
|
|
(int)(ssize_t)len, wsi->desc.sockfd, n, LWS_ERRNO);
|
2018-03-11 11:26:06 +08:00
|
|
|
|
2014-04-10 14:08:10 +08:00
|
|
|
return LWS_SSL_CAPABLE_ERROR;
|
2014-04-06 06:26:35 +01:00
|
|
|
}
|
2018-04-11 13:39:42 +08:00
|
|
|
|
2019-08-23 16:10:36 +01:00
|
|
|
int
|
2015-12-04 11:08:32 +08:00
|
|
|
lws_ssl_pending_no_ssl(struct lws *wsi)
|
2015-08-19 16:23:33 +02:00
|
|
|
{
|
2015-11-02 13:10:33 +08:00
|
|
|
(void)wsi;
|
2019-08-18 10:35:43 +01:00
|
|
|
#if defined(LWS_PLAT_FREERTOS)
|
2017-05-07 08:19:55 +08:00
|
|
|
return 100;
|
|
|
|
#else
|
2017-05-03 21:28:26 +08:00
|
|
|
return 0;
|
2017-05-07 08:19:55 +08:00
|
|
|
#endif
|
2015-08-19 16:23:33 +02:00
|
|
|
}
|