mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-23 00:00:06 +01:00

Add generic http compression layer eanbled at cmake with LWS_WITH_HTTP_STREAM_COMPRESSION. This is wholly a feature of the HTTP role (used by h1 and h2 roles) and doesn't exist outside that context. Currently provides 'deflate' and 'br' compression methods for server side only. 'br' requires also -DLWS_WITH_HTTP_BROTLI=1 at cmake and the brotli libraries (available in your distro already) and dev package. Other compression methods can be added nicely using an ops struct. The built-in file serving stuff will use this is the client says he can handle it, and the mimetype of the file either starts with "text/" (html and css etc) or is the mimetype of Javascript. zlib allocates quite a bit while in use, it seems to be around 256KiB per stream. So this is only useful on relatively strong servers with lots of memory. However for some usecases where you are serving a lot of css and js assets, it's a nice help. The patch performs special treatment for http/1.1 pipelining, since the compression is performed on the fly the compressed content-length is not known until the end. So for h1 only, chunked transfer-encoding is automatically added so pipelining can continue of the connection. For h2 the chunking is neither supported nor required, so it "just works". User code can also request to add a compression transform before the reply headers were sent using the new api LWS_VISIBLE int lws_http_compression_apply(struct lws *wsi, const char *name, unsigned char **p, unsigned char *end, char decomp); ... this allows transparent compression of dynamically generated HTTP. The requested compression (eg, "deflate") is only applied if the client headers indicated it was supported, otherwise it's a NOP. Name may be NULL in which case the first compression method in the internal table at stream.c that is mentioned as acceptable by the client will be used. NOTE: the compression translation, same as h2 support, relies on the user code using LWS_WRITE_HTTP and then LWS_WRITE_HTTP_FINAL on the last part written. The internal lws fileserving code already does this.
368 lines
6.7 KiB
C
368 lines
6.7 KiB
C
#include "core/private.h"
|
|
|
|
/*
|
|
* included from libwebsockets.c for OPTEE builds
|
|
*/
|
|
|
|
int lws_plat_apply_FD_CLOEXEC(int n)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_pipe_create(struct lws *wsi)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
int
|
|
lws_plat_pipe_signal(struct lws *wsi)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
void
|
|
lws_plat_pipe_close(struct lws *wsi)
|
|
{
|
|
}
|
|
|
|
void TEE_GenerateRandom(void *randomBuffer, uint32_t randomBufferLen);
|
|
|
|
unsigned long long time_in_microseconds(void)
|
|
{
|
|
return ((unsigned long long)time(NULL)) * 1000000;
|
|
}
|
|
#if 0
|
|
int
|
|
lws_get_random(struct lws_context *context, void *buf, int len)
|
|
{
|
|
TEE_GenerateRandom(buf, len);
|
|
|
|
return len;
|
|
}
|
|
#endif
|
|
LWS_VISIBLE int
|
|
lws_send_pipe_choked(struct lws *wsi)
|
|
{
|
|
struct lws *wsi_eff = wsi;
|
|
|
|
#if defined(LWS_WITH_HTTP2)
|
|
wsi_eff = lws_get_network_wsi(wsi);
|
|
#endif
|
|
|
|
/* the fact we checked implies we avoided back-to-back writes */
|
|
wsi_eff->could_have_pending = 0;
|
|
|
|
/* treat the fact we got a truncated send pending as if we're choked */
|
|
if (lws_has_buffered_out(wsi_eff)
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|| wsi->http.comp_ctx.buflist_comp ||
|
|
wsi->http.comp_ctx.may_have_more
|
|
#endif
|
|
)
|
|
return 1;
|
|
|
|
#if 0
|
|
struct lws_pollfd fds;
|
|
|
|
/* treat the fact we got a truncated send pending as if we're choked */
|
|
if (lws_has_buffered_out(wsi))
|
|
return 1;
|
|
|
|
fds.fd = wsi->desc.sockfd;
|
|
fds.events = POLLOUT;
|
|
fds.revents = 0;
|
|
|
|
if (poll(&fds, 1, 0) != 1)
|
|
return 1;
|
|
|
|
if ((fds.revents & POLLOUT) == 0)
|
|
return 1;
|
|
#endif
|
|
/* okay to send another packet without blocking */
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_poll_listen_fd(struct lws_pollfd *fd)
|
|
{
|
|
// return poll(fd, 1, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#if 0
|
|
void lwsl_emit_syslog(int level, const char *line)
|
|
{
|
|
IMSG("%d: %s\n", level, line);
|
|
}
|
|
#endif
|
|
|
|
LWS_EXTERN int
|
|
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|
{
|
|
struct lws_context_per_thread *pt;
|
|
int n = -1, m, c;
|
|
//char buf;
|
|
|
|
/* stay dead once we are dead */
|
|
|
|
if (!context || !context->vhost_list)
|
|
return 1;
|
|
|
|
pt = &context->pt[tsi];
|
|
|
|
if (timeout_ms < 0)
|
|
goto faked_service;
|
|
|
|
if (!pt->service_tid_detected) {
|
|
struct lws _lws;
|
|
|
|
memset(&_lws, 0, sizeof(_lws));
|
|
_lws.context = context;
|
|
|
|
pt->service_tid = context->vhost_list->protocols[0].callback(
|
|
&_lws, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
|
|
pt->service_tid_detected = 1;
|
|
}
|
|
|
|
/*
|
|
* is there anybody with pending stuff that needs service forcing?
|
|
*/
|
|
if (!lws_service_adjust_timeout(context, 1, tsi)) {
|
|
lwsl_notice("%s: doing forced service\n", __func__);
|
|
/* -1 timeout means just do forced service */
|
|
_lws_plat_service_tsi(context, -1, pt->tid);
|
|
/* still somebody left who wants forced service? */
|
|
if (!lws_service_adjust_timeout(context, 1, pt->tid))
|
|
/* yes... come back again quickly */
|
|
timeout_ms = 0;
|
|
}
|
|
|
|
n = poll(pt->fds, pt->fds_count, timeout_ms);
|
|
|
|
m = 0;
|
|
|
|
if (pt->context->tls_ops &&
|
|
pt->context->tls_ops->fake_POLLIN_for_buffered)
|
|
m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
|
|
|
|
if (/*!pt->ws.rx_draining_ext_list && */!m && !n) { /* nothing to do */
|
|
lws_service_fd_tsi(context, NULL, tsi);
|
|
return 0;
|
|
}
|
|
|
|
faked_service:
|
|
m = lws_service_flag_pending(context, tsi);
|
|
if (m)
|
|
c = -1; /* unknown limit */
|
|
else
|
|
if (n < 0) {
|
|
if (LWS_ERRNO != LWS_EINTR)
|
|
return -1;
|
|
return 0;
|
|
} else
|
|
c = n;
|
|
|
|
/* any socket with events to service? */
|
|
for (n = 0; n < (int)pt->fds_count && c; n++) {
|
|
if (!pt->fds[n].revents)
|
|
continue;
|
|
|
|
c--;
|
|
#if 0
|
|
if (pt->fds[n].fd == pt->dummy_pipe_fds[0]) {
|
|
if (read(pt->fds[n].fd, &buf, 1) != 1)
|
|
lwsl_err("Cannot read from dummy pipe.");
|
|
continue;
|
|
}
|
|
#endif
|
|
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
|
|
if (m < 0)
|
|
return -1;
|
|
/* if something closed, retry this slot */
|
|
if (m)
|
|
n--;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_check_connection_error(struct lws *wsi)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_service(struct lws_context *context, int timeout_ms)
|
|
{
|
|
return _lws_plat_service_tsi(context, timeout_ms, 0);
|
|
}
|
|
|
|
int
|
|
lws_plat_set_socket_options(struct lws_vhost *vhost, int fd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
lws_plat_drop_app_privileges(const struct lws_context_creation_info *info)
|
|
{
|
|
}
|
|
|
|
int
|
|
lws_plat_context_early_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
lws_plat_context_early_destroy(struct lws_context *context)
|
|
{
|
|
}
|
|
|
|
void
|
|
lws_plat_context_late_destroy(struct lws_context *context)
|
|
{
|
|
if (context->lws_lookup)
|
|
lws_free(context->lws_lookup);
|
|
}
|
|
|
|
/* cast a struct sockaddr_in6 * into addr for ipv6 */
|
|
|
|
int
|
|
lws_interface_to_sa(int ipv6, const char *ifname, struct sockaddr_in *addr,
|
|
size_t addrlen)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
void
|
|
lws_plat_insert_socket_into_fds(struct lws_context *context, struct lws *wsi)
|
|
{
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
pt->fds[pt->fds_count++].revents = 0;
|
|
}
|
|
|
|
void
|
|
lws_plat_delete_socket_from_fds(struct lws_context *context,
|
|
struct lws *wsi, int m)
|
|
{
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
pt->fds_count--;
|
|
}
|
|
|
|
void
|
|
lws_plat_service_periodic(struct lws_context *context)
|
|
{
|
|
}
|
|
|
|
int
|
|
lws_plat_change_pollfd(struct lws_context *context,
|
|
struct lws *wsi, struct lws_pollfd *pfd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
const char *
|
|
lws_plat_inet_ntop(int af, const void *src, char *dst, int cnt)
|
|
{
|
|
//return inet_ntop(af, src, dst, cnt);
|
|
return "lws_plat_inet_ntop";
|
|
}
|
|
|
|
int
|
|
lws_plat_inet_pton(int af, const char *src, void *dst)
|
|
{
|
|
//return inet_pton(af, src, dst);
|
|
return 1;
|
|
}
|
|
|
|
lws_fop_fd_t
|
|
_lws_plat_file_open(const struct lws_plat_file_ops *fops,
|
|
const char *filename, const char *vpath, lws_fop_flags_t *flags)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
int
|
|
_lws_plat_file_close(lws_fop_fd_t *fop_fd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
lws_fileofs_t
|
|
_lws_plat_file_seek_cur(lws_fop_fd_t fop_fd, lws_fileofs_t offset)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
_lws_plat_file_read(lws_fop_fd_t fop_fd, lws_filepos_t *amount,
|
|
uint8_t *buf, lws_filepos_t len)
|
|
{
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
_lws_plat_file_write(lws_fop_fd_t fop_fd, lws_filepos_t *amount,
|
|
uint8_t *buf, lws_filepos_t len)
|
|
{
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int
|
|
lws_plat_init(struct lws_context *context,
|
|
const struct lws_context_creation_info *info)
|
|
{
|
|
/* master context has the global fd lookup array */
|
|
context->lws_lookup = lws_zalloc(sizeof(struct lws *) *
|
|
context->max_fds, "lws_lookup");
|
|
if (context->lws_lookup == NULL) {
|
|
lwsl_err("OOM on lws_lookup array for %d connections\n",
|
|
context->max_fds);
|
|
return 1;
|
|
}
|
|
|
|
lwsl_notice(" mem: platform fd map: %5lu bytes\n",
|
|
(long)sizeof(struct lws *) * context->max_fds);
|
|
|
|
#ifdef LWS_WITH_PLUGINS
|
|
if (info->plugin_dirs)
|
|
lws_plat_plugins_init(context, info->plugin_dirs);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_write_cert(struct lws_vhost *vhost, int is_key, int fd, void *buf,
|
|
int len)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
int
|
|
lws_plat_write_file(const char *filename, void *buf, int len)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
int
|
|
lws_plat_read_file(const char *filename, void *buf, int len)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
int
|
|
lws_plat_recommended_rsa_bits(void)
|
|
{
|
|
return 4096;
|
|
}
|