1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

lws_cache_ttl

This commit is contained in:
Andy Green 2021-05-26 09:13:03 +01:00
parent cb5b1f720b
commit b25079c4b4
20 changed files with 3048 additions and 4 deletions

View file

@ -279,6 +279,9 @@
"nologs": {
"cmake": "-DLWS_WITH_NO_LOGS=ON"
},
"cookiejar": {
"cmake": "-DLWS_WITH_CACHE_NSCOOKIEJAR=ON"
},
"smp": {
"cmake": "-DLWS_MAX_SMP=32 -DLWS_WITH_MINIMAL_EXAMPLES=1"
},

View file

@ -257,7 +257,7 @@ option(LWS_REPRODUCIBLE "Build libwebsockets reproducible. It removes the build
option(LWS_WITH_MINIMAL_EXAMPLES "Also build the normally standalone minimal examples, for QA" OFF)
option(LWS_WITH_LWSAC "lwsac Chunk Allocation api" ON)
option(LWS_WITH_CUSTOM_HEADERS "Store and allow querying custom HTTP headers (H1 only)" ON)
option(LWS_WITH_DISKCACHE "Hashed cache directory with lazy LRU deletion to size limit" OFF)
option(LWS_WITH_DISKCACHE "Hashed cache directory with lazy LRU deletion to size limit (unrelated to lws_cache_ttl)" OFF)
option(LWS_WITH_ASAN "Build with gcc runtime sanitizer options enabled (needs libasan)" OFF)
option(LWS_WITH_LEJP_CONF "With LEJP configuration parser as used by lwsws" OFF)
option(LWS_WITH_ZLIB "Include zlib support (required for extensions)" OFF)
@ -278,6 +278,7 @@ option(LWS_WITH_SUL_DEBUGGING "Enable zombie lws_sul checking on object deletion
option(LWS_WITH_PLUGINS_API "Build generic lws_plugins apis (see LWS_WITH_PLUGINS to also build protocol plugins)" OFF)
option(LWS_WITH_CONMON "Collect introspectable connection latency stats on individual client connections" ON)
option(LWS_WITHOUT_EVENTFD "Force using pipe instead of eventfd" OFF)
option(LWS_WITH_CACHE_NSCOOKIEJAR "Build file-backed lws-cache-ttl that uses netscape cookie jar format (linux-only)" OFF)
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
option(LWS_WITH_NETLINK "Monitor Netlink for Routing Table changes" ON)

View file

@ -262,6 +262,9 @@ interpreted by your shell.
|context||`ctx_createfail_ss_pol1`|Fail context creation due to ss policy parse start failed (if policy enabled)|
|context||`ctx_createfail_ss_pol2`|Fail context creation due to ss policy parse failed (if policy enabled)|
|context||`ctx_createfail_ss_pol3`|Fail context creation due to ss policy set failed (if policy enabled)|
|context||`cache_createfail`|Fail `lws_cache` creation due to OOM|
|context||`cache_lookup_oom`|Fail `lws_cache` lookup due to OOM|
|vhost|`vh`|`vh_create_oom`|Fail vh creation on vh object alloc OOM|
|vhost|`vh`|`vh_create_oom`|Fail vh creation on vh object alloc OOM|
|vhost|`vh`|`vh_create_pcols_oom`|Fail vh creation at protocols alloc OOM|
|vhost|`vh`|`vh_create_access_log_open_fail`|Fail vh creation due to unable to open access log (LWS_WITH_ACCESS_LOG)|
@ -294,7 +297,6 @@ interpreted by your shell.
|ssproxy|`ss`|`ssproxy_onward_conn_fail`|Act as if proxy onward client connection failed immediately|
|ssproxy|`ss`|`ssproxy_dsh_c2p_pay_oom`|Cause proxy's DSH alloc for C->P payload to fail|
## Well-known namespace targets
Namespaces can be used to target these more precisely, for example even though

160
READMEs/README.lws_cache.md Normal file
View file

@ -0,0 +1,160 @@
# lws_cache: Flexible single and multilevel caching
lws_cache implements a single- or multi-level cache for generic payload items
that are **keyed by a unique string**.
![lws_cache overview](../doc-assets/lws_cache-1.png)
L1 cache is always stored on heap, but it may be hooked up to additional levels
of cache objects with different backing storage. The last level always contains
a complete set of cached items, earlier levels may be empty or contain a partial
set of objects.
User code can define its own subclassed lws_cache objects with custom storage
formats and media, while being able to take advantage of a suitably-sized L1
heap cache to minimize the cost of repeated access.
![lws_cache overview](../doc-assets/lws_cache-2.png)
You can find examples of how to create, use and destroy single and multilevel
caches in `minimal-examples/api-tests/api-test-lws_cache`
## Cache size restriction, LRU and TTL
The max heap footprint of its items and max number of items can be capped. LRU
tracking is performed so the least recently relevant items are evicted first.
It's also possible to limit the maximum size of any single payload.
Time To Live (TTL) tracking is also performed automatically, so cached items
auto-expire if a non-zero TTL is provided when the object is created. A user
callback can be defined to get called when an item is about to be removed from
a particular cache level, in case any housekeeping needed.
## Atomicity
Items in L1 can be accessed in heap casually and reliably if the following is
borne in mind:
- Any return to the event loop may perform removal of cache items due to TTL
expiry
- Any operation that writes new items may evict items from non-last
cache levels which have limits to the footprint or item count to make room for
it, using LRU ordering.
In short process cache results before returning to the event loop or writing
or removing items in the cache.
## Cache creation
Caches are created using an info struct `struct lws_cache_creation_info`
that should be zeroed down. Most members are optional and can be left at zero,
a pointer to the lws_context and a short cache name are mandatory.
```
struct lws_cache_ttl_lru *
lws_cache_create(const struct lws_cache_creation_info *info);
```
How caches work is defined by an "ops struct" that the cache is bound to at
creation time. `lws_cache_ops_heap` ops struct is provided by lws, you can
define your own to implement your own specialized cache level. See
`./include/libwebsockets/lws-cache-ttl.h` for the definition.
## Cache destruction
Created cache levels should be destroyed when you are finished with them.
```
void
lws_cache_destroy(struct lws_cache_ttl_lru **cache);
```
For L1, in heap, this frees any allocations. For other levels, eg, with file
storage for the items, this would close the file and leave any entries as they
are.
## Writethrough
```
int
lws_cache_write_through(struct lws_cache_ttl_lru *cache,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppay);
```
The combined caches are always accessed via the L1 cache, writing new items is
done at L1 and writes through to each cache layer immediately, so new items go
into the backing store without delay, but are available from heap for read.
If existing keys are rewritten, the previous item of the same key is deleted
from all levels of the cache before writing the new one.
## Removal
Removal also is performed at all cache levels at once.
```
int
lws_cache_item_remove(struct lws_cache_ttl_lru *cache, const char *wildcard_key);
```
internally earlier cache levels can evict cached items just at their level, but
this is triggered automatically and not by api.
A wildcard key is supported, removing all items matching, eg "myitem*".
## Get by key
```
int
lws_cache_item_get(struct lws_cache_ttl_lru *cache, const char *specific_key,
const void **pdata, size_t *psize);
```
Apis are provided to get the blob related to a specific key, if it exists at
any cache layer. Again this should use L1, it will bring a copy of the item
into L1 if one is not already there, so it can be accessed from heap.
## Lookup with wildcards
```
int
lws_cache_lookup(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
const void **pdata, size_t *psize);
```
lws_cache also supports **lookup** queries that contain wildcards or otherwise match
on multiple keys according to cache-specific rules. These queries do not return
a single item, instead they return lists of keys that match, in a blob of its
own that is also cached in L1.
The user can walk the lookup results blob using a provided helper api
```
int
lws_cache_results_walk(lws_cache_results_t *walk_ctx);
```
After recovering each result key this way, the user code can use the _get api
to access the blob for each indiviudally.
The lookup results themselves are cached in L1, any new key that matches the
wildcard lookup in any cached results, or any deletion of items with keys
matching the cached wildcard lookup invalidate the affected cached lookup
results so they will be regenerated next time.
In the typical case after a lookup, at least for a while the lookup results blob
and all items mentioned in the lookup results will already be in L1 and cheaply
accessible.
## Expunging
An api is also provided to "expunge" or completely empty all cache levels and
corresponding backing stores.
```
int
lws_cache_expunge(struct lws_cache_ttl_lru *cache);
```

View file

@ -175,6 +175,7 @@
#cmakedefine LWS_WITH_NETLINK
#cmakedefine LWS_WITH_NETWORK
#cmakedefine LWS_WITH_NO_LOGS
#cmakedefine LWS_WITH_CACHE_NSCOOKIEJAR
#cmakedefine LWS_WITH_CLIENT
#cmakedefine LWS_WITHOUT_EXTENSIONS
#cmakedefine LWS_WITH_SERVER

BIN
doc-assets/lws_cache-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

BIN
doc-assets/lws_cache-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

View file

@ -568,6 +568,7 @@ struct lws;
#include <libwebsockets/lws-dll2.h>
#include <libwebsockets/lws-fault-injection.h>
#include <libwebsockets/lws-timeout-timer.h>
#include <libwebsockets/lws-cache-ttl.h>
#if defined(LWS_WITH_SYS_SMD)
#include <libwebsockets/lws-smd.h>
#endif

View file

@ -0,0 +1,348 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/** \defgroup lws_cache_ttl Cache supporting expiry
* ##Cache supporting expiry
*
* These apis let you quickly and reliably implement caches of named objects,
* that have a "destroy-by date" and cache limits that will be observed.
*
* You can instantiate as many caches as you need. The first one must be an
* L1 / heap cache type, it can have parents and grandparents of other types
* which are accessible why writing / looking up and getting from the L1 cache.
* The outer "cache" layer may persistently store items to a backing store.
*
* Allocated object memory is entirely for the use of user code, up to the
* requested size.
*
* The key name for the listed objects may be any string chosen by the user,
* there is no special length limit as it is also allocated.
*
* Both expiry and LRU orderings are kept so it is easy to find out usage
* ordering and when the next object that will expire.
*
* Cached objects may be destroyed any time you go around the event loop, when
* you allocate new objects (to keep the whole cache under the specified limit),
* or when their expiry time arrives. So you shouldn't keep copies of pointers
* to cached objects after returning to the event loop.
*/
///@{
struct lws_cache_ttl_lru;
/**
* lws_cache_write_through() - add a new cache item object in all layers
*
* \param cache: the existing cache to allocate the object in
* \param specific_key: a key string that identifies the item in the cache
* \param source: optional payload for the cached item, NULL means caller will
* write the payload
* \param size: the size of the object to allocate
* \param expiry: the usec time that the object will autodestroy
* \param ppay: NULL, or a pointer to a void * to be set to the L1 payload
*
* If an item with the key already exists, it is destroyed before allocating a
* new one.
*
* Returns 0 if successful. The written entry will be scheduled to be auto-
* destroyed when \p expiry occurs.
*
* Adding or removing cache items may cause invalidation of cached queries.
*/
LWS_VISIBLE LWS_EXTERN int /* only valid until return to event loop */
lws_cache_write_through(struct lws_cache_ttl_lru *cache,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppay);
typedef struct lws_cache_match {
lws_dll2_t list;
lws_usec_t expiry;
/* earliest expiry amongst results */
size_t payload_size;
/**< the payload is not attached here. This is a hint about what
* (*get)() will return for this tag name.
*/
size_t tag_size;
/* tag name + NUL is overcommitted */
} lws_cache_match_t;
/**
* lws_cache_heap_lookup() - get a list of matching items
*
* \param cache: the cache to search for the key
* \param wildcard_key: the item key string, may contain wildcards
* \param pdata: pointer to pointer to be set to the serialized result list
* \param psize: pointer to size_t to receive length of serialized result list
*
* This finds all unique items in the final cache that match search_key, which
* may contain wildcards. It does not return the payloads for matching items,
* just a list of specific tags in the that match.
*
* If successful, results are provided in a serialized list format, in no
* particular order, each result has the following fields
*
* - BE32: payload size in bytes (payload itself is not included)
* - BE32: specific tag name length in bytes
* - chars: tag name with terminating NUL
*
* These serialized results are themselves cached in L1 cache (only) and the
* result pointers are set pointing into that. If the results are still in L1
* cache next time this api is called, the results will be returned directly
* from that without repeating the expensive lookup on the backup store. That
* is why the results are provided in serialized form.
*
* The cached results list expiry is set to the earliest expiry of any listed
* item. Additionally any cached results are invalidated on addition or
* deletion (update is done as addition + deletion) of any item that would
* match the results' original wildcard_key. For the typical case new items
* are rare compared to lookups, this is efficient.
*
* Lookup matching does not itself affect LRU or cache status of the result
* itsems. Typically user code will get the lookup results, and then perform
* get operations on each item in its desired order, that will bring the items
* to the head of the LRU list and occupy L1 cache.
*
* Returns 0 if proceeded alright, or nonzero if error. If there was an error,
* any partial results set has been deallocated cleanly before returning.
*/
LWS_VISIBLE LWS_EXTERN int
lws_cache_lookup(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
const void **pdata, size_t *psize);
/**
* lws_cache_item_get() - bring a specific item into L1 and get payload info
*
* \param cache: the cache to search for the key
* \param specific_key: the key string of the item to get
* \param pdata: pointer to a void * to be set to the payload in L1 cache
* \param psize: pointer to a size_t to be set to the payload size
*
* If the cache still has an item matching the key string, it will be destroyed.
*
* Adding or removing cache items may cause invalidation of cached queries.
*
* Notice the cache payload is a blob of the given size. If you are storing
* strings, there is no NUL termination unless you stored them with it.
*
* Returns 0 if successful.
*/
LWS_VISIBLE LWS_EXTERN int
lws_cache_item_get(struct lws_cache_ttl_lru *cache, const char *specific_key,
const void **pdata, size_t *psize);
/**
* lws_cache_item_remove() - remove item from all cache levels
*
* \param cache: the cache to search for the key
* \param wildcard_key: the item key string
*
* Removes any copy of any item matching the \p wildcard_key from any cache
* level in one step.
*
* Adding or removing cache items may cause invalidation of cached queries
* that could refer to the removed item.
*/
LWS_VISIBLE LWS_EXTERN int
lws_cache_item_remove(struct lws_cache_ttl_lru *cache, const char *wildcard_key);
/**
* lws_cache_footprint() - query the amount of storage used by the cache layer
*
* \param cache: cache to query
*
* Returns number of payload bytes stored in cache currently
*/
LWS_VISIBLE LWS_EXTERN uint64_t
lws_cache_footprint(struct lws_cache_ttl_lru *cache);
/**
* lws_cache_debug_dump() - if built in debug mode dump cache contents to log
*
* \param cache: cache to dump
*
* If lws was built in debug mode, dump cache to log, otherwise a NOP.
*/
LWS_VISIBLE LWS_EXTERN void
lws_cache_debug_dump(struct lws_cache_ttl_lru *cache);
typedef struct lws_cache_results {
const uint8_t *ptr; /* set before using walk api */
size_t size; /* set before using walk api */
size_t payload_len;
size_t tag_len;
const uint8_t *tag;
} lws_cache_results_t;
/**
* lws_cache_results_walk() - parse next result
*
* \param walk_ctx: the context of the results blob to walk
*
* Caller must initialize \p walk_ctx.ptr and \p walk_ctx.size before calling.
* These are set to the results returned from a _lookup api call.
*
* The call returns 0 if the struct elements have been set to a result, or 1
* if there where no more results in the blob to walk.
*
* If successful, after the call \p payload_len is set to the length of the
* payload related to this result key (the payload itself is not present),
* \p tag_len is set to the length of the result key name, and \p tag is set
* to the result tag name, with a terminating NUL.
*/
LWS_VISIBLE LWS_EXTERN int
lws_cache_results_walk(lws_cache_results_t *walk_ctx);
typedef void (*lws_cache_item_destroy_cb)(void *item, size_t size);
struct lws_cache_creation_info {
struct lws_context *cx;
/**< Mandatory: the lws_context */
const char *name;
/**< Mandatory: short cache name */
lws_cache_item_destroy_cb cb;
/**< NULL, or a callback that can hook cache item destory */
struct lws_cache_ttl_lru *parent;
/**< NULL, or next cache level */
const struct lws_cache_ops *ops;
/**< NULL for default, heap-based ops, else custom cache storage and
* query implementation */
union {
struct {
const char *filepath;
/**< the filepath to store items in */
} nscookiejar;
} u;
/**< these are extra configuration for specific cache types */
size_t max_footprint;
/**< 0, or the max heap allocation allowed before destroying
* lru items to keep it under the limit */
size_t max_items;
/**< 0, or the max number of items allowed in the cache before
* destroying lru items to keep it under the limit */
size_t max_payload;
/**< 0, or the max allowed payload size for one item */
int tsi;
/**< 0 unless using SMP, then tsi to bind sul to */
};
struct lws_cache_ops {
struct lws_cache_ttl_lru *
(*create)(const struct lws_cache_creation_info *info);
/**< create an instance of the cache type specified in info */
void
(*destroy)(struct lws_cache_ttl_lru **_cache);
/**< destroy the logical cache instance pointed to by *_cache, doesn't
* affect any NV backing storage */
int
(*expunge)(struct lws_cache_ttl_lru *cache);
/**< completely delete any backing storage related to the cache
* instance, eg, delete the backing file */
int
(*write)(struct lws_cache_ttl_lru *cache, const char *specific_key,
const uint8_t *source, size_t size, lws_usec_t expiry,
void **ppvoid);
/**< create an entry in the cache level according to the given info */
int
(*tag_match)(struct lws_cache_ttl_lru *cache, const char *wc,
const char *tag, char lookup_rules);
/**< Just tell us if tag would match wildcard, using whatever special
* rules the backing store might use for tag matching. 0 indicates
* it is a match on wildcard, nonzero means does not match.
*/
int
(*lookup)(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
lws_dll2_owner_t *results_owner);
/**+ add keys for search_key matches not already listed in the results
* owner */
int
(*invalidate)(struct lws_cache_ttl_lru *cache, const char *wildcard_key);
/**< remove matching item(s) from cache level */
int
(*get)(struct lws_cache_ttl_lru *cache, const char *specific_key,
const void **pdata, size_t *psize);
/**< if it has the item, fills L1 with item. updates LRU, and returns
* pointer to payload in L1 */
void
(*debug_dump)(struct lws_cache_ttl_lru *cache);
/**< Helper to dump the whole cache contents to log, useful for debug */
};
/**
* lws_cache_create() - create an empty cache you can allocate items in
*
* \param info: a struct describing the cache to create
*
* Create an empty cache you can allocate items in. The cache will be kept
* below the max_footprint and max_items limits if they are nonzero, by
* destroying least-recently-used items until it remains below the limits.
*
* Items will auto-destroy when their expiry time is reached.
*
* When items are destroyed from the cache, if \p cb is non-NULL, it will be
* called back with the item pointer after it has been removed from the cache,
* but before it is deallocated and destroyed.
*
* context and tsi are used when scheduling expiry callbacks
*/
LWS_VISIBLE LWS_EXTERN struct lws_cache_ttl_lru *
lws_cache_create(const struct lws_cache_creation_info *info);
/**
* lws_cache_destroy() - destroy a previously created cache
*
* \param cache: pointer to the cache
*
* Everything in the cache is destroyed, then the cache itself is destroyed,
* and *cache set to NULL.
*/
LWS_VISIBLE LWS_EXTERN void
lws_cache_destroy(struct lws_cache_ttl_lru **cache);
/**
* lws_cache_expunge() - destroy all items in cache and parents
*
* \param cache: pointer to the cache
*
* Everything in the cache and parents is destroyed, leaving it empty.
* If the cache has a backing store, it is deleted.
*
* Returns 0 if no problems reported at any cache layer, else nonzero.
*/
LWS_VISIBLE LWS_EXTERN int
lws_cache_expunge(struct lws_cache_ttl_lru *cache);
LWS_VISIBLE extern const struct lws_cache_ops lws_cache_ops_heap,
lws_cache_ops_nscookiejar;
///@}

View file

@ -238,7 +238,6 @@ struct client_info_stash {
#define LWS_H2_FRAME_HEADER_LENGTH 9
lws_usec_t
__lws_sul_service_ripe(lws_dll2_owner_t *own, int num_own, lws_usec_t usnow);

View file

@ -300,10 +300,16 @@ lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
pt = &context->pt[tsi];
if (pt->evlib_pt) {
lws_usec_t u = __lws_sul_service_ripe(pt->pt_sul_owner,
lws_usec_t u;
lws_pt_lock(pt, __func__); /* -------------- pt { */
u = __lws_sul_service_ripe(pt->pt_sul_owner,
LWS_COUNT_PT_SUL_OWNERS, lws_now_usecs());
if (u < (lws_usec_t)timeout_ms * (lws_usec_t)1000)
timeout_ms = (int)(u / 1000);
lws_pt_unlock(pt);
}
/*

View file

@ -36,12 +36,27 @@ list(APPEND SOURCES
misc/prng.c
misc/lws-ring.c)
if (LWS_WITH_NETWORK)
list(APPEND SOURCES
misc/cache-ttl/lws-cache-ttl.c
misc/cache-ttl/heap.c
)
if (LWS_WITH_CACHE_NSCOOKIEJAR)
list(APPEND SOURCES
misc/cache-ttl/file.c)
endif()
endif()
if (LWS_WITH_FTS)
list(APPEND SOURCES
misc/fts/trie.c
misc/fts/trie-fd.c)
endif()
# this is an older, standalone hashed disk cache
# implementation unrelated to lws-cache-ttl
if (LWS_WITH_DISKCACHE)
list(APPEND SOURCES
misc/diskcache.c)

942
lib/misc/cache-ttl/file.c Normal file
View file

@ -0,0 +1,942 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Implements a cache backing store compatible with netscape cookies.txt format
* There is one entry per "line", and fields are tab-delimited
*
* We need to know the format here, because while the unique cookie tag consists
* of "hostname|urlpath|cookiename", that does not appear like that in the file;
* we have to go parse the fields and synthesize the corresponding tag.
*
* We rely on all the fields except the cookie value fitting in a 256 byte
* buffer, and allow eating multiple buffers to get a huge cookie values.
*
* Because the cookie file is a device-wide asset, although lws will change it
* from the lws thread without conflict, there may be other processes that will
* change it by removal and regenerating the file asynchronously. For that
* reason, file handles are opened fresh each time we want to use the file, so
* we always get the latest version.
*
* When updating the file ourselves, we use a lockfile to ensure our process
* has exclusive access.
*
*
* Tag Matching rules
*
* There are three kinds of tag matching rules
*
* 1) specific - tag strigs must be the same
* 2) wilcard - tags matched using optional wildcards
* 3) wildcard + lookup - wildcard, but path part matches using cookie scope rules
*
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
typedef enum nsc_iterator_ret {
NIR_CONTINUE = 0,
NIR_FINISH_OK = 1,
NIR_FINISH_ERROR = -1
} nsc_iterator_ret_t;
typedef enum cbreason {
LCN_SOL = (1 << 0),
LCN_EOL = (1 << 1)
} cbreason_t;
typedef int (*nsc_cb_t)(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size);
static void
expiry_cb(lws_sorted_usec_list_t *sul);
static int
nsc_backing_open_lock(lws_cache_nscookiejar_t *cache, int mode, const char *par)
{
int sanity = 50;
char lock[128];
int fd_lock, fd;
lwsl_debug("%s: %s\n", __func__, par);
lws_snprintf(lock, sizeof(lock), "%s.LCK",
cache->cache.info.u.nscookiejar.filepath);
do {
fd_lock = open(lock, LWS_O_CREAT | O_EXCL, 0600);
if (fd_lock != LWS_INVALID_FILE) {
close(fd_lock);
break;
}
if (!sanity--) {
lwsl_warn("%s: unable to lock %s: errno %d\n", __func__,
lock, errno);
return LWS_INVALID_FILE;
}
usleep(100000);
} while (1);
fd = open(cache->cache.info.u.nscookiejar.filepath,
LWS_O_CREAT | mode, 0600);
if (fd == LWS_INVALID_FILE) {
lwsl_warn("%s: unable to open or create %s\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
unlink(lock);
}
return fd;
}
static void
nsc_backing_close_unlock(lws_cache_nscookiejar_t *cache, int fd)
{
char lock[128];
lwsl_debug("%s\n", __func__);
lws_snprintf(lock, sizeof(lock), "%s.LCK",
cache->cache.info.u.nscookiejar.filepath);
close(fd);
unlink(lock);
}
/*
* We're going to call the callback with chunks of the file with flags
* indicating we're giving it the start of a line and / or giving it the end
* of a line.
*
* It's like this because the cookie value may be huge (and to a lesser extent
* the path may also be big).
*
* If it's the start of a line (flags on the cb has LCN_SOL), then the buffer
* contains up to the first 256 chars of the line, it's enough to match with.
*
* We cannot hold the file open inbetweentimes, since other processes may
* regenerate it, so we need to bind to a new inode. We open it with an
* exclusive flock() so other processes can't replace conflicting changes
* while we also write changes, without having to wait and see our changes.
*/
static int
nscookiejar_iterate(lws_cache_nscookiejar_t *cache, int fd,
nsc_cb_t cb, void *opaque)
{
int m = 0, n = 0, e, r = LCN_SOL, ignore = 0, ret = 0;
char temp[256], eof = 0;
lseek(fd, 0, SEEK_SET);
do { /* for as many buffers in the file */
int n1;
lwsl_debug("%s: n %d, m %d\n", __func__, n, m);
n1 = (int)read(fd, temp + m, sizeof(temp) - (size_t)m);
lwsl_debug("%s: n1 %d\n", __func__, n1);
if (n1 <= 0) {
eof = 1;
if (m == n)
continue;
} else
n += n1;
while (m < n) {
m++;
if (temp[m - 1] != '\n')
continue;
/* ie, we hit EOL */
if (temp[0] == '#')
/* lines starting with # are comments */
e = 0;
else
e = cb(cache, opaque, r | LCN_EOL, temp,
(size_t)m - 1);
r = LCN_SOL;
ignore = 0;
/*
* Move back remainder and prefill the gap that opened
* up: we want to pass enough in the start chunk so the
* cb can classify it even if it can't get all the
* value part in one go
*/
memmove(temp, temp + m, (size_t)(n - m));
n -= m;
m = 0;
if (e) {
ret = e;
goto bail;
}
}
if (m) {
/* we ran out of buffer */
if (ignore || (r == LCN_SOL && n && temp[0] == '#')) {
e = 0;
ignore = 1;
} else {
e = cb(cache, opaque,
r | (n == m && eof ? LCN_EOL : 0),
temp, (size_t)m);
m = 0;
n = 0;
}
if (e) {
/*
* We have to call off the whole thing if any
* step, eg, OOMs
*/
ret = e;
goto bail;
}
r = 0;
}
} while (!eof || n != m);
ret = 0;
bail:
return ret;
}
/*
* lookup() just handles wildcard resolution, it doesn't deal with moving the
* hits to L1. That has to be done individually by non-wildcard names.
*/
enum {
NSC_COL_HOST = 0, /* wc idx 0 */
NSC_COL_PATH = 2, /* wc idx 1 */
NSC_COL_EXPIRY = 4,
NSC_COL_NAME = 5, /* wc idx 2 */
NSC_COL_COUNT = 6
};
/*
* This performs the specialized wildcard that knows about cookie path match
* rules.
*
* To defeat the lookup path matching, lie to it about idx being NSC_COL_PATH
*/
static int
nsc_match(const char *wc, size_t wc_len, const char *col, size_t col_len,
int idx)
{
size_t n = 0;
if (idx != NSC_COL_PATH)
return lws_strcmp_wildcard(wc, wc_len, col, col_len);
/*
* Cookie path match is special, if we lookup on a path like /my/path,
* we must match on cookie paths for every dir level including /, so
* match on /, /my, and /my/path. But we must not match on /m or
* /my/pa etc. If we lookup on /, we must not match /my/path
*
* Let's go through wc checking at / and for every complete subpath if
* it is an explicit match
*/
if (!strcmp(col, wc))
return 0; /* exact hit */
while (n <= wc_len) {
if (n == wc_len || wc[n] == '/') {
if (n && col_len <= n && !strncmp(wc, col, n))
return 0; /* hit */
if (n != wc_len && col_len <= n + 1 &&
!strncmp(wc, col, n + 1)) /* check for trailing / */
return 0; /* hit */
}
n++;
}
return 1; /* fail */
}
static const uint8_t nsc_cols[] = { NSC_COL_HOST, NSC_COL_PATH, NSC_COL_NAME };
static int
lws_cache_nscookiejar_tag_match(struct lws_cache_ttl_lru *cache,
const char *wc, const char *tag, char lookup)
{
const char *wc_end = wc + strlen(wc), *tag_end = tag + strlen(tag),
*start_wc, *start_tag;
int n = 0;
lwsl_cache("%s: '%s' vs '%s'\n", __func__, wc, tag);
/*
* Given a well-formed host|path|name tag and a wildcard term,
* make the determination if the tag matches the wildcard or not,
* using lookup rules that apply at this cache level.
*/
while (n < 3) {
start_wc = wc;
while (wc < wc_end && *wc != LWSCTAG_SEP)
wc++;
start_tag = tag;
while (tag < tag_end && *tag != LWSCTAG_SEP)
tag++;
lwsl_cache("%s: '%.*s' vs '%.*s'\n", __func__,
lws_ptr_diff(wc, start_wc), start_wc,
lws_ptr_diff(tag, start_tag), start_tag);
if (nsc_match(start_wc, lws_ptr_diff_size_t(wc, start_wc),
start_tag, lws_ptr_diff_size_t(tag, start_tag),
lookup ? nsc_cols[n] : NSC_COL_HOST)) {
lwsl_cache("%s: fail\n", __func__);
return 1;
}
if (wc < wc_end)
wc++;
if (tag < tag_end)
tag++;
n++;
}
lwsl_cache("%s: hit\n", __func__);
return 0; /* match */
}
/*
* Converts the start of a cookie file line into a tag
*/
static int
nsc_line_to_tag(const char *buf, size_t size, char *tag, size_t max_tag,
lws_usec_t *pexpiry)
{
int n, idx = 0, tl = 0;
lws_usec_t expiry = 0;
size_t bn = 0;
char col[64];
if (size < 3)
return 1;
while (bn < size && idx <= NSC_COL_NAME) {
n = 0;
while (bn < size && n < (int)sizeof(col) - 1 &&
buf[bn] != '\t')
col[n++] = buf[bn++];
col[n] = '\0';
if (buf[bn] == '\t')
bn++;
switch (idx) {
case NSC_COL_EXPIRY:
expiry = (lws_usec_t)((unsigned long long)atoll(col) *
(lws_usec_t)LWS_US_PER_SEC);
break;
case NSC_COL_HOST:
case NSC_COL_PATH:
case NSC_COL_NAME:
/*
* As we match the pieces of the wildcard,
* compose the matches into a specific tag
*/
if (tl + n + 2 > (int)max_tag)
return 1;
if (tl)
tag[tl++] = LWSCTAG_SEP;
memcpy(tag + tl, col, (size_t)n);
tl += n;
tag[tl] = '\0';
break;
default:
break;
}
idx++;
}
if (pexpiry)
*pexpiry = expiry;
lwsl_info("%s: %.*s: tag '%s'\n", __func__, (int)size, buf, tag);
return 0;
}
struct nsc_lookup_ctx {
const char *wildcard_key;
lws_dll2_owner_t *results_owner;
lws_cache_match_t *match; /* current match if any */
size_t wklen;
};
static int
nsc_lookup_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_lookup_ctx *ctx = (struct nsc_lookup_ctx *)opaque;
lws_usec_t expiry;
char tag[200];
int tl;
if (!(flags & LCN_SOL)) {
if (ctx->match)
ctx->match->payload_size += size;
return NIR_CONTINUE;
}
/*
* There should be enough in buf to match or reject it... let's
* synthesize a tag from the text "line" and then check the tags for
* a match
*/
ctx->match = NULL; /* new SOL means stop tracking payload len */
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &expiry))
return NIR_CONTINUE;
if (lws_cache_nscookiejar_tag_match(&cache->cache,
ctx->wildcard_key, tag, 1))
return NIR_CONTINUE;
tl = (int)strlen(tag);
/*
* ... it looks like a match then... create new match
* object with the specific tag, and add it to the owner list
*/
ctx->match = lws_fi(&cache->cache.info.cx->fic, "cache_lookup_oom") ? NULL :
lws_malloc(sizeof(*ctx->match) + (unsigned int)tl + 1u,
__func__);
if (!ctx->match)
/* caller of lookup will clean results list on fail */
return NIR_FINISH_ERROR;
ctx->match->payload_size = size;
ctx->match->tag_size = (size_t)tl;
ctx->match->expiry = expiry;
memset(&ctx->match->list, 0, sizeof(ctx->match->list));
memcpy(&ctx->match[1], tag, (size_t)tl + 1u);
lws_dll2_add_tail(&ctx->match->list, ctx->results_owner);
return NIR_CONTINUE;
}
static int
lws_cache_nscookiejar_lookup(struct lws_cache_ttl_lru *_c,
const char *wildcard_key,
lws_dll2_owner_t *results_owner)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
struct nsc_lookup_ctx ctx;
int ret, fd;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd == LWS_INVALID_FILE)
return 1;
ctx.wildcard_key = wildcard_key;
ctx.results_owner = results_owner;
ctx.wklen = strlen(wildcard_key);
ret = nscookiejar_iterate(cache, fd, nsc_lookup_cb, &ctx);
/*
* The cb can fail, eg, with OOM, making the whole lookup
* invalid and returning fail. Caller will clean
* results_owner on fail.
*/
nsc_backing_close_unlock(cache, fd);
return ret == NIR_FINISH_ERROR;
}
/*
* It's pretty horrible having to implement add or remove individual items by
* file regeneration, but if we don't want to keep it all in heap, and we want
* this cookie jar format, that is what we are into.
*
* Allow to optionally add a "line", optionally wildcard delete tags, and always
* delete expired entries.
*
* Although we can rely on the lws thread to be doing this, multiple processes
* may be using the cookie jar and can tread on each other. So we use flock()
* (linux only) to get exclusive access while we are processing this.
*
* We leave the existing file alone and generate a new one alongside it, with a
* fixed name.tmp format so it can't leak, if that went OK then we unlink the
* old and rename the new.
*/
struct nsc_regen_ctx {
const char *wildcard_key_delete;
const void *add_data;
lws_usec_t curr;
size_t add_size;
int fdt;
char drop;
};
/* only used by nsc_regen() */
static int
nsc_regen_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_regen_ctx *ctx = (struct nsc_regen_ctx *)opaque;
char tag[256];
lws_usec_t expiry;
if (flags & LCN_SOL) {
ctx->drop = 0;
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &expiry))
/* filter it out if it is unparseable */
goto drop;
/* routinely track the earliest expiry */
if (!cache->earliest_expiry ||
(expiry && cache->earliest_expiry > expiry))
cache->earliest_expiry = expiry;
if (expiry < ctx->curr)
/* routinely strip anything beyond its expiry */
goto drop;
if (ctx->wildcard_key_delete)
lwsl_cache("%s: %s vs %s\n", __func__,
tag, ctx->wildcard_key_delete);
if (ctx->wildcard_key_delete &&
!lws_cache_nscookiejar_tag_match(&cache->cache,
ctx->wildcard_key_delete,
tag, 0)) {
lwsl_cache("%s: %s matches wc delete %s\n", __func__,
tag, ctx->wildcard_key_delete);
goto drop;
}
}
if (ctx->drop)
return 0;
cache->cache.current_footprint += (uint64_t)size;
if ((size_t)write(ctx->fdt, buf, size) != size)
return NIR_FINISH_ERROR;
if (flags & LCN_EOL)
if ((size_t)write(ctx->fdt, "\n", 1) != 1)
return NIR_FINISH_ERROR;
return 0;
drop:
ctx->drop = 1;
return NIR_CONTINUE;
}
static int
nsc_regen(lws_cache_nscookiejar_t *cache, const char *wc_delete,
const void *pay, size_t pay_size)
{
struct nsc_regen_ctx ctx;
char filepath[128];
int fd, ret = 1;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd == LWS_INVALID_FILE)
return 1;
lws_snprintf(filepath, sizeof(filepath), "%s.tmp",
cache->cache.info.u.nscookiejar.filepath);
unlink(filepath);
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_temp_open"))
goto bail;
ctx.fdt = open(filepath, LWS_O_CREAT | LWS_O_WRONLY, 0600);
if (ctx.fdt == LWS_INVALID_FILE)
goto bail;
/* magic header */
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_temp_write") ||
/* other consumers insist to see this at start of cookie jar */
write(ctx.fdt, "# Netscape HTTP Cookie File\n", 28) != 28)
goto bail1;
/* if we are adding something, put it first */
if (pay && (size_t)write(ctx.fdt, pay, pay_size) != pay_size)
goto bail1;
if (pay && (size_t)write(ctx.fdt, "\n", 1) != 1)
goto bail1;
cache->cache.current_footprint = 0;
ctx.wildcard_key_delete = wc_delete;
ctx.add_data = pay;
ctx.add_size = pay_size;
ctx.curr = lws_now_usecs();
ctx.drop = 0;
cache->earliest_expiry = 0;
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_iter_fail") ||
nscookiejar_iterate(cache, fd, nsc_regen_cb, &ctx))
goto bail1;
close(ctx.fdt);
unlink(cache->cache.info.u.nscookiejar.filepath);
rename(filepath, cache->cache.info.u.nscookiejar.filepath);
if (cache->earliest_expiry)
lws_cache_schedule(&cache->cache, expiry_cb,
cache->earliest_expiry);
ret = 0;
goto bail1;
bail1:
close(ctx.fdt);
bail:
unlink(filepath);
nsc_backing_close_unlock(cache, fd);
return ret;
}
static void
expiry_cb(lws_sorted_usec_list_t *sul)
{
lws_cache_nscookiejar_t *cache = lws_container_of(sul,
lws_cache_nscookiejar_t, cache.sul);
/*
* regen the cookie jar without changes, so expired are removed and
* new earliest expired computed
*/
if (nsc_regen(cache, NULL, NULL, 0))
return;
if (cache->earliest_expiry)
lws_cache_schedule(&cache->cache, expiry_cb,
cache->earliest_expiry);
}
/* specific_key and expiry are ignored, since it must be encoded in payload */
static int
lws_cache_nscookiejar_write(struct lws_cache_ttl_lru *_c,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppvoid)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
char tag[128];
lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size);
assert(source);
if (nsc_line_to_tag((const char *)source, size, tag, sizeof(tag), NULL))
return 1;
if (ppvoid)
*ppvoid = NULL;
if (nsc_regen(cache, tag, source, size)) {
lwsl_err("%s: regen failed\n", __func__);
return 1;
}
return 0;
}
struct nsc_get_ctx {
struct lws_buflist *buflist;
const char *specific_key;
const void **pdata;
size_t *psize;
lws_cache_ttl_lru_t *l1;
lws_usec_t expiry;
};
/*
* We're looking for a specific key, if found, we want to make an entry for it
* in L1 and return information about that
*/
static int
nsc_get_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_get_ctx *ctx = (struct nsc_get_ctx *)opaque;
char tag[200];
uint8_t *q;
if (ctx->buflist)
goto collect;
if (!(flags & LCN_SOL))
return NIR_CONTINUE;
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &ctx->expiry)) {
lwsl_err("%s: can't get tag\n", __func__);
return NIR_CONTINUE;
}
lwsl_cache("%s: %s %s\n", __func__, ctx->specific_key, tag);
if (strcmp(ctx->specific_key, tag)) {
lwsl_cache("%s: no match\n", __func__);
return NIR_CONTINUE;
}
/* it's a match */
lwsl_cache("%s: IS match\n", __func__);
if (!(flags & LCN_EOL))
goto collect;
/* it all fit in the buffer, let's create it in L1 now */
*ctx->psize = size;
if (ctx->l1->info.ops->write(ctx->l1,
ctx->specific_key, (const uint8_t *)buf,
size, ctx->expiry, (void **)ctx->pdata))
return NIR_FINISH_ERROR;
return NIR_FINISH_OK;
collect:
/*
* it's bigger than one buffer-load, we have to stash what we're getting
* on a buflist and create it when we have it all
*/
if (lws_buflist_append_segment(&ctx->buflist, (const uint8_t *)buf,
size))
goto cleanup;
if (!(flags & LCN_EOL))
return NIR_CONTINUE;
/* we have all the payload, create the L1 entry without payload yet */
*ctx->psize = size;
if (ctx->l1->info.ops->write(ctx->l1, ctx->specific_key, NULL,
lws_buflist_total_len(&ctx->buflist),
ctx->expiry, (void **)&q))
goto cleanup;
*ctx->pdata = q;
/* dump the buflist into the L1 cache entry */
do {
uint8_t *p;
size_t len = lws_buflist_next_segment_len(&ctx->buflist, &p);
memcpy(q, p, len);
q += len;
lws_buflist_use_segment(&ctx->buflist, len);
} while (ctx->buflist);
return NIR_FINISH_OK;
cleanup:
lws_buflist_destroy_all_segments(&ctx->buflist);
return NIR_FINISH_ERROR;
}
static int
lws_cache_nscookiejar_get(struct lws_cache_ttl_lru *_c,
const char *specific_key, const void **pdata,
size_t *psize)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
struct nsc_get_ctx ctx;
int ret, fd;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd == LWS_INVALID_FILE)
return 1;
/* get a pointer to l1 */
ctx.l1 = &cache->cache;
while (ctx.l1->child)
ctx.l1 = ctx.l1->child;
ctx.pdata = pdata;
ctx.psize = psize;
ctx.specific_key = specific_key;
ctx.buflist = NULL;
ctx.expiry = 0;
ret = nscookiejar_iterate(cache, fd, nsc_get_cb, &ctx);
nsc_backing_close_unlock(cache, fd);
return ret != NIR_FINISH_OK;
}
static int
lws_cache_nscookiejar_invalidate(struct lws_cache_ttl_lru *_c,
const char *wc_key)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
return nsc_regen(cache, wc_key, NULL, 0);
}
static struct lws_cache_ttl_lru *
lws_cache_nscookiejar_create(const struct lws_cache_creation_info *info)
{
lws_cache_nscookiejar_t *cache;
cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL :
lws_zalloc(sizeof(*cache), __func__);
if (!cache)
return NULL;
cache->cache.info = *info;
/*
* We need to scan the file, if it exists, and find the earliest
* expiry while cleaning out any expired entries
*/
expiry_cb(&cache->cache.sul);
lwsl_notice("%s: create %s\n", __func__, info->name ? info->name : "?");
return (struct lws_cache_ttl_lru *)cache;
}
static int
lws_cache_nscookiejar_expunge(struct lws_cache_ttl_lru *_c)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
int r;
if (!cache)
return 0;
r = unlink(cache->cache.info.u.nscookiejar.filepath);
if (r)
lwsl_warn("%s: failed to unlink %s\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
return r;
}
static void
lws_cache_nscookiejar_destroy(struct lws_cache_ttl_lru **_pc)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)*_pc;
if (!cache)
return;
lws_sul_cancel(&cache->cache.sul);
lws_free_set_NULL(*_pc);
}
#if defined(_DEBUG)
static int
nsc_dump_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
lwsl_hexdump_cache(buf, size);
return 0;
}
static void
lws_cache_nscookiejar_debug_dump(struct lws_cache_ttl_lru *_c)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
int fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd == LWS_INVALID_FILE)
return;
lwsl_cache("%s: %s\n", __func__, _c->info.name);
nscookiejar_iterate(cache, fd, nsc_dump_cb, NULL);
nsc_backing_close_unlock(cache, fd);
}
#endif
const struct lws_cache_ops lws_cache_ops_nscookiejar = {
.create = lws_cache_nscookiejar_create,
.destroy = lws_cache_nscookiejar_destroy,
.expunge = lws_cache_nscookiejar_expunge,
.write = lws_cache_nscookiejar_write,
.tag_match = lws_cache_nscookiejar_tag_match,
.lookup = lws_cache_nscookiejar_lookup,
.invalidate = lws_cache_nscookiejar_invalidate,
.get = lws_cache_nscookiejar_get,
#if defined(_DEBUG)
.debug_dump = lws_cache_nscookiejar_debug_dump,
#endif
};

612
lib/misc/cache-ttl/heap.c Normal file
View file

@ -0,0 +1,612 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
#if defined(write)
#undef write
#endif
static void
update_sul(lws_cache_ttl_lru_t_heap_t *cache);
static int
lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *key);
static int
sort_expiry(const lws_dll2_t *a, const lws_dll2_t *b)
{
const lws_cache_ttl_item_heap_t
*c = lws_container_of(a, lws_cache_ttl_item_heap_t, list_expiry),
*d = lws_container_of(b, lws_cache_ttl_item_heap_t, list_expiry);
if (c->expiry > d->expiry)
return 1;
if (c->expiry < d->expiry)
return -1;
return 0;
}
static void
_lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
lws_cache_ttl_item_heap_t *item)
{
lwsl_cache("%s: %s (%s)\n", __func__, cache->cache.info.name,
(const char *)&item[1] + item->size);
lws_dll2_remove(&item->list_expiry);
lws_dll2_remove(&item->list_lru);
cache->cache.current_footprint -= item->size;
update_sul(cache);
if (cache->cache.info.cb)
cache->cache.info.cb((void *)((uint8_t *)&item[1]), item->size);
lws_free(item);
}
static void
lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
lws_cache_ttl_item_heap_t *item, int parent_too)
{
struct lws_cache_ttl_lru *backing = &cache->cache;
const char *tag = ((const char *)&item[1]) + item->size;
/*
* We're destroying a normal item?
*/
if (*tag == META_ITEM_LEADING)
/* no, nothing to check here then */
goto post;
if (backing->info.parent)
backing = backing->info.parent;
/*
* We need to check any cached meta-results from lookups that
* include this normal item, and if any, invalidate the meta-results
* since they have to be recalculated before being used again.
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
uint8_t *pay = (uint8_t *)&item[1], *end = pay + item->size;
if (*iname == META_ITEM_LEADING) {
size_t taglen = strlen(iname);
/*
* If the item about to be destroyed makes an
* appearance on the meta results list, we must kill
* the meta result item to force recalc next time
*/
while (pay < end) {
uint32_t tlen = lws_ser_ru32be(pay + 4);
if (tlen == taglen &&
!strcmp((const char *)pay + 8, iname)) {
#if defined(_DEBUG)
/*
* Sanity check that the item tag is
* really a match for that meta results
* item
*/
assert (!backing->info.ops->tag_match(
backing, iname + 1, tag, 1));
#endif
_lws_cache_heap_item_destroy(cache, i);
break;
}
pay += 8 + tlen + 1;
}
#if defined(_DEBUG)
/*
* Sanity check that the item tag really isn't a match
* for that meta results item
*/
assert (backing->info.ops->tag_match(backing, iname + 1,
tag, 1));
#endif
}
} lws_end_foreach_dll_safe(d, d1);
post:
_lws_cache_heap_item_destroy(cache, item);
}
static void
lws_cache_item_evict_lru(lws_cache_ttl_lru_t_heap_t *cache)
{
lws_cache_ttl_item_heap_t *ei;
if (!cache->items_lru.head)
return;
ei = lws_container_of(cache->items_lru.head,
lws_cache_ttl_item_heap_t, list_lru);
lws_cache_heap_item_destroy(cache, ei, 0);
}
/*
* We need to weed out expired entries in the backing file
*/
static void
expiry_cb(lws_sorted_usec_list_t *sul)
{
lws_cache_ttl_lru_t_heap_t *cache = lws_container_of(sul,
lws_cache_ttl_lru_t_heap_t, cache.sul);
lws_usec_t now = lws_now_usecs();
lwsl_cache("%s: %s\n", __func__, cache->cache.info.name);
while (cache->items_expiry.head) {
lws_cache_ttl_item_heap_t *item;
item = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
if (item->expiry > now)
return;
lws_cache_heap_item_destroy(cache, item, 1);
}
}
/*
* Let's figure out what the earliest next expiry is
*/
static int
earliest_expiry(lws_cache_ttl_lru_t_heap_t *cache, lws_usec_t *pearliest)
{
lws_cache_ttl_item_heap_t *item;
if (!cache->items_expiry.head)
return 1;
item = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
*pearliest = item->expiry;
return 0;
}
static void
update_sul(lws_cache_ttl_lru_t_heap_t *cache)
{
lws_usec_t earliest;
/* weed out any newly-expired */
expiry_cb(&cache->cache.sul);
/* figure out the next soonest expiring item */
if (earliest_expiry(cache, &earliest)) {
lws_sul_cancel(&cache->cache.sul);
return;
}
lwsl_debug("%s: setting exp %llu\n", __func__,
(unsigned long long)earliest);
if (earliest)
lws_cache_schedule(&cache->cache, expiry_cb, earliest);
}
static lws_cache_ttl_item_heap_t *
lws_cache_heap_specific(lws_cache_ttl_lru_t_heap_t *cache,
const char *specific_key)
{
lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
lws_cache_ttl_item_heap_t *item = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
if (!strcmp(specific_key, iname))
return item;
} lws_end_foreach_dll(d);
return NULL;
}
static int
lws_cache_heap_tag_match(struct lws_cache_ttl_lru *cache, const char *wc,
const char *tag, char lookup_rules)
{
return lws_strcmp_wildcard(wc, strlen(wc), tag, strlen(tag));
}
static int
lws_cache_heap_lookup(struct lws_cache_ttl_lru *_c, const char *wildcard_key,
lws_dll2_owner_t *results_owner)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
size_t sklen = strlen(wildcard_key);
lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
lws_cache_ttl_item_heap_t *item = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
if (!lws_strcmp_wildcard(wildcard_key, sklen, iname,
strlen(iname))) {
size_t ilen = strlen(iname);
lws_cache_match_t *m;
char hit = 0;
/*
* It musn't already be on the list from an earlier
* cache level
*/
lws_start_foreach_dll(struct lws_dll2 *, e,
results_owner->head) {
lws_cache_match_t *i = lws_container_of(e,
lws_cache_match_t, list);
if (i->tag_size == ilen &&
!strcmp(iname, ((const char *)&i[1]))) {
hit = 1;
break;
}
} lws_end_foreach_dll(e);
if (!hit) {
/*
* it's unique, instantiate a record for it
*/
m = lws_fi(&_c->info.cx->fic,
"cache_lookup_oom") ? NULL :
lws_malloc(sizeof(*m) + ilen + 1,
__func__);
if (!m) {
lws_cache_clear_matches(results_owner);
return 1;
}
memset(&m->list, 0, sizeof(m->list));
m->tag_size = ilen;
memcpy(&m[1], iname, ilen + 1);
lws_dll2_add_tail(&m->list, results_owner);
}
}
} lws_end_foreach_dll(d);
return 0;
}
static int
lws_cache_heap_write(struct lws_cache_ttl_lru *_c, const char *specific_key,
const uint8_t *source, size_t size, lws_usec_t expiry,
void **ppvoid)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
struct lws_cache_ttl_lru *backing = _c;
lws_cache_ttl_item_heap_t *item, *ei;
size_t kl = strlen(specific_key);
char *p;
lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size);
/*
* Is this new tag going to invalidate any existing cached meta-results?
*
* If so, let's destroy any of those first to recover the heap
*/
if (backing->info.parent)
backing = backing->info.parent;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&i[1]) + i->size;
if (*iname == META_ITEM_LEADING) {
/*
* If the item about to be added would match any cached
* results from before it was added, we have to
* invalidate them. To check this, we have to use the
* matching rules at the backing store level
*/
if (!backing->info.ops->tag_match(backing, iname + 1,
specific_key, 1))
_lws_cache_heap_item_destroy(cache, i);
}
} lws_end_foreach_dll_safe(d, d1);
/*
* Keep us under the limit if possible... note this will always allow
* caching a single large item even if it is above the limits
*/
while ((cache->cache.info.max_footprint &&
cache->cache.current_footprint + size >
cache->cache.info.max_footprint) ||
(cache->cache.info.max_items &&
cache->items_lru.count + 1 > cache->cache.info.max_items))
lws_cache_item_evict_lru(cache);
/* remove any existing entry of the same key */
lws_cache_heap_invalidate(&cache->cache, specific_key);
item = lws_fi(&_c->info.cx->fic, "cache_write_oom") ? NULL :
lws_malloc(sizeof(*item) + kl + 1u + size, __func__);
if (!item)
return 1;
cache->cache.current_footprint += item->size;
/* only need to zero down our item object */
memset(item, 0, sizeof(*item));
p = (char *)&item[1];
if (ppvoid)
*ppvoid = p;
/* copy the payload into place */
if (source)
memcpy(p, source, size);
/* copy the key string into place, with terminating NUL */
memcpy(p + size, specific_key, kl + 1);
item->expiry = expiry;
item->key_len = kl;
item->size = size;
if (expiry) {
/* adding to expiry is optional, on nonzero expiry */
lws_dll2_add_sorted(&item->list_expiry, &cache->items_expiry,
sort_expiry);
ei = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
lwsl_debug("%s: setting exp %llu\n", __func__,
(unsigned long long)ei->expiry);
lws_cache_schedule(&cache->cache, expiry_cb, ei->expiry);
}
/* always add outselves to head of lru list */
lws_dll2_add_head(&item->list_lru, &cache->items_lru);
return 0;
}
static int
lws_cache_heap_get(struct lws_cache_ttl_lru *_c, const char *specific_key,
const void **pdata, size_t *psize)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_cache_ttl_item_heap_t *item;
item = lws_cache_heap_specific(cache, specific_key);
if (!item)
return 1;
/* we are using it, move it to lru head */
lws_dll2_remove(&item->list_lru);
lws_dll2_add_head(&item->list_lru, &cache->items_lru);
if (pdata) {
*pdata = (const void *)&item[1];
*psize = item->size;
}
return 0;
}
static int
lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *specific_key)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
struct lws_cache_ttl_lru *backing = _c;
lws_cache_ttl_item_heap_t *item;
const void *user;
size_t size;
if (lws_cache_heap_get(_c, specific_key, &user, &size))
return 0;
if (backing->info.parent)
backing = backing->info.parent;
item = (lws_cache_ttl_item_heap_t *)(((uint8_t *)user) - sizeof(*item));
/*
* We must invalidate any cached results that would have included this
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&i[1]) + i->size;
if (*iname == META_ITEM_LEADING) {
/*
* If the item about to be added would match any cached
* results from before it was added, we have to
* invalidate them. To check this, we have to use the
* matching rules at the backing store level
*/
if (!backing->info.ops->tag_match(backing, iname + 1,
specific_key, 1))
_lws_cache_heap_item_destroy(cache, i);
}
} lws_end_foreach_dll_safe(d, d1);
lws_cache_heap_item_destroy(cache, item, 0);
return 0;
}
static struct lws_cache_ttl_lru *
lws_cache_heap_create(const struct lws_cache_creation_info *info)
{
lws_cache_ttl_lru_t_heap_t *cache;
assert(info->cx);
assert(info->name);
cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL :
lws_zalloc(sizeof(*cache), __func__);
if (!cache)
return NULL;
cache->cache.info = *info;
if (info->parent)
info->parent->child = &cache->cache;
lwsl_cache("%s: create %s\n", __func__, info->name);
return (struct lws_cache_ttl_lru *)cache;
}
static int
destroy_dll(struct lws_dll2 *d, void *user)
{
lws_cache_ttl_lru_t *_c = (struct lws_cache_ttl_lru *)user;
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_cache_ttl_item_heap_t *item =
lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
lws_cache_heap_item_destroy(cache, item, 0);
return 0;
}
static int
lws_cache_heap_expunge(struct lws_cache_ttl_lru *_c)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
return 0;
}
static void
lws_cache_heap_destroy(struct lws_cache_ttl_lru **_cache)
{
lws_cache_ttl_lru_t *c = *_cache;
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)c;
if (!cache)
return;
lws_sul_cancel(&c->sul);
lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
lwsl_cache("%s: destroy %s\n", __func__, c->info.name ?
c->info.name : "?");
lws_free_set_NULL(*_cache);
}
#if defined(_DEBUG)
static int
dump_dll(struct lws_dll2 *d, void *user)
{
lws_cache_ttl_item_heap_t *item =
lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
lwsl_cache(" %s: size %d, exp %llu\n",
(const char *)&item[1] + item->size,
(int)item->size, (unsigned long long)item->expiry);
lwsl_hexdump_cache((const char *)&item[1], item->size);
return 0;
}
static void
lws_cache_heap_debug_dump(struct lws_cache_ttl_lru *_c)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
#if !defined(LWS_WITH_NO_LOGS)
lws_cache_ttl_item_heap_t *item = NULL;
lws_dll2_t *d = cache->items_expiry.head;
if (d)
item = lws_container_of(d, lws_cache_ttl_item_heap_t,
list_expiry);
lwsl_cache("%s: %s: items %d, earliest %llu\n", __func__,
cache->cache.info.name, (int)cache->items_lru.count,
item ? (unsigned long long)item->expiry : 0ull);
#endif
lws_dll2_foreach_safe(&cache->items_lru, cache, dump_dll);
}
#endif
const struct lws_cache_ops lws_cache_ops_heap = {
.create = lws_cache_heap_create,
.destroy = lws_cache_heap_destroy,
.expunge = lws_cache_heap_expunge,
.write = lws_cache_heap_write,
.tag_match = lws_cache_heap_tag_match,
.lookup = lws_cache_heap_lookup,
.invalidate = lws_cache_heap_invalidate,
.get = lws_cache_heap_get,
#if defined(_DEBUG)
.debug_dump = lws_cache_heap_debug_dump,
#endif
};

View file

@ -0,0 +1,300 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
#include <assert.h>
#if defined(write)
#undef write
#endif
void
lws_cache_clear_matches(lws_dll2_owner_t *results_owner)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, results_owner->head) {
lws_cache_match_t *item = lws_container_of(d, lws_cache_match_t,
list);
lws_dll2_remove(d);
lws_free(item);
} lws_end_foreach_dll_safe(d, d1);
}
void
lws_cache_schedule(struct lws_cache_ttl_lru *cache, sul_cb_t cb, lws_usec_t e)
{
lwsl_cache("%s: %s schedule %llu\n", __func__, cache->info.name,
(unsigned long long)e);
lws_sul_schedule(cache->info.cx, cache->info.tsi, &cache->sul, cb,
e - lws_now_usecs());
}
int
lws_cache_write_through(struct lws_cache_ttl_lru *cache,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppay)
{
struct lws_cache_ttl_lru *levels[LWS_CACHE_MAX_LEVELS], *c = cache;
int n = 0, r = 0;
lws_cache_item_remove(cache, specific_key);
/* starting from L1 */
do {
levels[n++] = c;
c = c->info.parent;
} while (c && n < (int)LWS_ARRAY_SIZE(levels));
/* starting from outermost cache level */
while (n) {
n--;
r = levels[n]->info.ops->write(levels[n], specific_key,
source, size, expiry, ppay);
}
return r;
}
/*
* We want to make a list of unique keys that exist at any cache level
* matching a wildcard search key.
*
* If L1 has a cached version though, we will just use that.
*/
int
lws_cache_lookup(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
const void **pdata, size_t *psize)
{
struct lws_cache_ttl_lru *l1 = cache;
lws_dll2_owner_t results_owner;
lws_usec_t expiry = 0;
char meta_key[128];
uint8_t *p, *temp;
size_t sum = 0;
int n;
memset(&results_owner, 0, sizeof(results_owner));
meta_key[0] = META_ITEM_LEADING;
lws_strncpy(&meta_key[1], wildcard_key, sizeof(meta_key) - 2);
/*
* If we have a cached result set in L1 already, return that
*/
if (!l1->info.ops->get(l1, meta_key, pdata, psize))
return 0;
/*
* No, we have to do the actual lookup work in the backing store layer
* to get results for this...
*/
while (cache->info.parent)
cache = cache->info.parent;
if (cache->info.ops->lookup(cache, wildcard_key, &results_owner)) {
/* eg, OOM */
lwsl_cache("%s: bs lookup fail\n", __func__);
lws_cache_clear_matches(&results_owner);
return 1;
}
/*
* Scan the results, we want to know how big a payload it needs in
* the cache, and we want to know the earliest expiry of any of the
* component parts, so the meta cache entry for these results can be
* expired when any of the results would expire.
*/
lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
list);
sum += 8; /* payload size, name length */
sum += m->tag_size + 1;
if (m->expiry && (!expiry || expiry < m->expiry))
expiry = m->expiry;
} lws_end_foreach_dll(d);
lwsl_cache("%s: results %d, size %d\n", __func__,
(int)results_owner.count, (int)sum);
temp = lws_malloc(sum, __func__);
if (!temp) {
lws_cache_clear_matches(&results_owner);
return 1;
}
/*
* Fill temp with the serialized results
*/
p = temp;
lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
list);
/* we don't copy the payload in, but take note of its size */
lws_ser_wu32be(p, (uint32_t)m->payload_size);
p += 4;
/* length of the tag name (there is an uncounted NUL after) */
lws_ser_wu32be(p, (uint32_t)m->tag_size);
p += 4;
/* then the tag name, plus the extra NUL */
memcpy(p, &m[1], m->tag_size + 1);
p += m->tag_size + 1;
} lws_end_foreach_dll(d);
lws_cache_clear_matches(&results_owner);
/*
* Create the right amount of space for an L1 record of these results,
* with its expiry set to the earliest of the results, and copy it in
* from temp
*/
n = l1->info.ops->write(l1, meta_key, temp, sum, expiry, (void **)&p);
/* done with temp */
lws_free(temp);
if (n)
return 1;
/* point to the results in L1 */
*pdata = p;
*psize = sum;
return 0;
}
int
lws_cache_item_get(struct lws_cache_ttl_lru *cache, const char *specific_key,
const void **pdata, size_t *psize)
{
while (cache) {
if (!cache->info.ops->get(cache, specific_key, pdata, psize)) {
lwsl_cache("%s: hit\n", __func__);
return 0;
}
cache = cache->info.parent;
}
return 1;
}
int
lws_cache_expunge(struct lws_cache_ttl_lru *cache)
{
int ret = 0;
while (cache) {
ret |= cache->info.ops->expunge(cache);
cache = cache->info.parent;
}
return ret;
}
int
lws_cache_item_remove(struct lws_cache_ttl_lru *cache, const char *wildcard_key)
{
while (cache) {
if (cache->info.ops->invalidate(cache, wildcard_key))
return 1;
cache = cache->info.parent;
}
return 0;
}
uint64_t
lws_cache_footprint(struct lws_cache_ttl_lru *cache)
{
return cache->current_footprint;
}
void
lws_cache_debug_dump(struct lws_cache_ttl_lru *cache)
{
#if defined(_DEBUG)
if (cache->info.ops->debug_dump)
cache->info.ops->debug_dump(cache);
#endif
}
int
lws_cache_results_walk(lws_cache_results_t *walk_ctx)
{
if (!walk_ctx->size)
return 1;
walk_ctx->payload_len = lws_ser_ru32be(walk_ctx->ptr);
walk_ctx->tag_len = lws_ser_ru32be(walk_ctx->ptr + 4);
walk_ctx->tag = walk_ctx->ptr + 8;
walk_ctx->ptr += walk_ctx->tag_len + 1 + 8;
walk_ctx->size -= walk_ctx->tag_len + 1 + 8;
return 0;
}
struct lws_cache_ttl_lru *
lws_cache_create(const struct lws_cache_creation_info *info)
{
assert(info);
assert(info->ops);
assert(info->name);
assert(info->ops->create);
return info->ops->create(info);
}
void
lws_cache_destroy(struct lws_cache_ttl_lru **_cache)
{
lws_cache_ttl_lru_t *cache = *_cache;
if (!cache)
return;
assert(cache->info.ops->destroy);
lws_sul_cancel(&cache->sul);
cache->info.ops->destroy(_cache);
}

View file

@ -0,0 +1,98 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define lwsl_cache lwsl_debug
#define lwsl_hexdump_cache lwsl_hexdump_debug
#define LWS_CACHE_MAX_LEVELS 3
/*
* If we need structure inside the cache tag names, use this character as a
* separator
*/
#define LWSCTAG_SEP '|'
/*
* Our synthetic cache result items all have tags starting with this char
*/
#define META_ITEM_LEADING '!'
typedef struct lws_cache_ttl_item_heap {
lws_dll2_t list_expiry;
lws_dll2_t list_lru;
lws_usec_t expiry;
size_t key_len;
size_t size;
/*
* len + key_len + 1 bytes of data overcommitted, user object first
* so it is well-aligned, then the NUL-terminated key name
*/
} lws_cache_ttl_item_heap_t;
/* this is a "base class", all cache implementations have one at the start */
typedef struct lws_cache_ttl_lru {
struct lws_cache_creation_info info;
lws_sorted_usec_list_t sul;
struct lws_cache_ttl_lru *child;
uint64_t current_footprint;
} lws_cache_ttl_lru_t;
/*
* The heap-backed cache uses lws_dll2 linked-lists to track items that are
* in it.
*/
typedef struct lws_cache_ttl_lru_heap {
lws_cache_ttl_lru_t cache;
lws_dll2_owner_t items_expiry;
lws_dll2_owner_t items_lru;
} lws_cache_ttl_lru_t_heap_t;
/*
* We want to be able to work with a large file-backed implementation even on
* devices that don't have heap to track what is in it. It means if lookups
* reach this cache layer, we will be scanning a potentially large file.
*
* L1 caching of lookups (including null result list) reduces the expense of
* this on average. We keep a copy of the last computed earliest expiry.
*
* We can't keep an open file handle here. Because other processes may change
* the cookie file by deleting and replacing it, we have to open it fresh each
* time.
*/
typedef struct lws_cache_nscookiejar {
lws_cache_ttl_lru_t cache;
lws_usec_t earliest_expiry;
} lws_cache_nscookiejar_t;
void
lws_cache_clear_matches(lws_dll2_owner_t *results_owner);
void
lws_cache_schedule(struct lws_cache_ttl_lru *cache, sul_cb_t cb, lws_usec_t e);

View file

@ -0,0 +1,19 @@
project(lws-api-test-lws_cache C)
cmake_minimum_required(VERSION 2.8.12)
find_package(libwebsockets CONFIG REQUIRED)
list(APPEND CMAKE_MODULE_PATH ${LWS_CMAKE_DIR})
include(CheckCSourceCompiles)
include(LwsCheckRequirements)
set(SAMP lws-api-test-lws_cache)
set(SRCS main.c)
add_executable(${SAMP} ${SRCS})
add_test(NAME api-test-lws_cache COMMAND lws-api-test-lws_cache)
if (websockets_shared)
target_link_libraries(${SAMP} websockets_shared ${LIBWEBSOCKETS_DEP_LIBS})
add_dependencies(${SAMP} websockets_shared)
else()
target_link_libraries(${SAMP} websockets ${LIBWEBSOCKETS_DEP_LIBS})
endif()

View file

@ -0,0 +1,22 @@
# lws api test lwsac
Demonstrates how to use and performs selftests for lwsac
## build
```
$ cmake . && make
```
## usage
Commandline option|Meaning
---|---
-d <loglevel>|Debug verbosity in decimal, eg, -d15
```
$ ./lws-api-test-lwsac
[2018/10/09 09:14:17:4834] USER: LWS API selftest: lwsac
[2018/10/09 09:14:17:4835] USER: Completed: PASS
```

View file

@ -0,0 +1,512 @@
/*
* lws-api-test-lws_cache
*
* Written in 2010-2021 by Andy Green <andy@warmcat.com>
*
* This file is made available under the Creative Commons CC0 1.0
* Universal Public Domain Dedication.
*/
#include <libwebsockets.h>
static struct lws_context *cx;
static int tests, fail;
static int
test_just_l1(void)
{
struct lws_cache_creation_info ci;
struct lws_cache_ttl_lru *l1;
int ret = 1;
size_t size;
char *po;
lwsl_user("%s\n", __func__);
tests++;
/* just create a heap cache "L1" */
memset(&ci, 0, sizeof(ci));
ci.cx = cx;
ci.ops = &lws_cache_ops_heap;
ci.name = "L1";
l1 = lws_cache_create(&ci);
if (!l1)
goto cdone;
/* add two items, a has 1s expiry and b has 2s */
if (lws_cache_write_through(l1, "a", (const uint8_t *)"is_a", 5,
lws_now_usecs() + LWS_US_PER_SEC, NULL))
goto cdone;
if (lws_cache_write_through(l1, "b", (const uint8_t *)"is_b", 5,
lws_now_usecs() + LWS_US_PER_SEC * 2, NULL))
goto cdone;
/* check they exist as intended */
if (lws_cache_item_get(l1, "a", (const void **)&po, &size) ||
size != 5 || strcmp(po, "is_a"))
goto cdone;
if (lws_cache_item_get(l1, "b", (const void **)&po, &size) ||
size != 5 || strcmp(po, "is_b"))
goto cdone;
/* wait for 1.2s to pass, working the event loop by hand */
lws_cancel_service(cx);
if (lws_service(cx, 0) < 0)
goto cdone;
#if defined(WIN32)
Sleep(1200);
#else
/* netbsd cares about < 1M */
usleep(999999);
usleep(200001);
#endif
lws_cancel_service(cx);
if (lws_service(cx, 0) < 0)
goto cdone;
lws_cancel_service(cx);
if (lws_service(cx, 0) < 0)
goto cdone;
/* a only had 1s lifetime, he should be gone */
if (!lws_cache_item_get(l1, "a", (const void **)&po, &size)) {
lwsl_err("%s: cache: a still exists after expiry\n", __func__);
fail++;
goto cdone;
}
/* that's ok then */
ret = 0;
cdone:
lws_cache_destroy(&l1);
if (ret)
lwsl_warn("%s: fail\n", __func__);
return ret;
}
static int
test_just_l1_limits(void)
{
struct lws_cache_creation_info ci;
struct lws_cache_ttl_lru *l1;
int ret = 1;
size_t size;
char *po;
lwsl_user("%s\n", __func__);
tests++;
/* just create a heap cache "L1" */
memset(&ci, 0, sizeof(ci));
ci.cx = cx;
ci.ops = &lws_cache_ops_heap;
ci.name = "L1_lim";
ci.max_items = 1; /* ie, adding a second item destroys the first */
l1 = lws_cache_create(&ci);
if (!l1)
goto cdone;
/* add two items, a has 1s expiry and b has 2s */
if (lws_cache_write_through(l1, "a", (const uint8_t *)"is_a", 5,
lws_now_usecs() + LWS_US_PER_SEC, NULL))
goto cdone;
if (lws_cache_write_through(l1, "b", (const uint8_t *)"is_b", 5,
lws_now_usecs() + LWS_US_PER_SEC * 2, NULL))
goto cdone;
/* only b should exit, since we limit to cache to just one entry */
if (!lws_cache_item_get(l1, "a", (const void **)&po, &size))
goto cdone;
if (lws_cache_item_get(l1, "b", (const void **)&po, &size) ||
size != 5 || strcmp(po, "is_b"))
goto cdone;
/* that's ok then */
ret = 0;
cdone:
lws_cache_destroy(&l1);
if (ret)
lwsl_warn("%s: fail\n", __func__);
return ret;
}
#if defined(LWS_WITH_CACHE_NSCOOKIEJAR)
static const char
*cookie1 = "host.com\tFALSE\t/\tTRUE\t4000000000\tmycookie\tmycookievalue",
*tag_cookie1 = "host.com|/|mycookie",
*cookie2 = "host.com\tFALSE\t/xxx\tTRUE\t4000000000\tmycookie\tmyxxxcookievalue",
*tag_cookie2 = "host.com|/xxx|mycookie",
*cookie3 = "host.com\tFALSE\t/\tTRUE\t4000000000\textra\tcookie3value",
*tag_cookie3 = "host.com|/|extra",
*cookie4 = "host.com\tFALSE\t/yyy\tTRUE\t4000000000\tnewcookie\tnewcookievalue",
*tag_cookie4 = "host.com|/yyy|newcookie"
;
static int
test_nsc1(void)
{
struct lws_cache_creation_info ci;
struct lws_cache_ttl_lru *l1, *nsc;
lws_cache_results_t cr;
int n, ret = 1;
size_t size;
char *po;
lwsl_user("%s\n", __func__);
tests++;
/* First create a netscape cookie cache object */
memset(&ci, 0, sizeof(ci));
ci.cx = cx;
ci.ops = &lws_cache_ops_nscookiejar;
ci.name = "NSC";
ci.u.nscookiejar.filepath = "./cookies.txt";
nsc = lws_cache_create(&ci);
if (!nsc)
goto cdone;
/* Then a heap cache "L1" as a child of nsc */
ci.ops = &lws_cache_ops_heap;
ci.name = "L1";
ci.parent = nsc;
l1 = lws_cache_create(&ci);
if (!l1)
goto cdone;
lws_cache_debug_dump(nsc);
lws_cache_debug_dump(l1);
lwsl_user("%s: add cookies to L1\n", __func__);
/* add three cookies */
if (lws_cache_write_through(l1, tag_cookie1,
(const uint8_t *)cookie1, strlen(cookie1),
lws_now_usecs() + LWS_US_PER_SEC, NULL)) {
lwsl_err("%s: write1 failed\n", __func__);
goto cdone;
}
lws_cache_debug_dump(nsc);
lws_cache_debug_dump(l1);
if (lws_cache_write_through(l1, tag_cookie2,
(const uint8_t *)cookie2, strlen(cookie2),
lws_now_usecs() + LWS_US_PER_SEC * 2, NULL)) {
lwsl_err("%s: write2 failed\n", __func__);
goto cdone;
}
lws_cache_debug_dump(nsc);
lws_cache_debug_dump(l1);
if (lws_cache_write_through(l1, tag_cookie3,
(const uint8_t *)cookie3, strlen(cookie3),
lws_now_usecs() + LWS_US_PER_SEC * 2, NULL)) {
lwsl_err("%s: write3 failed\n", __func__);
goto cdone;
}
lws_cache_debug_dump(nsc);
lws_cache_debug_dump(l1);
lwsl_user("%s: check cookies in L1\n", __func__);
/* confirm that the cookies are individually in L1 */
if (lws_cache_item_get(l1, tag_cookie1, (const void **)&po, &size) ||
size != strlen(cookie1) || memcmp(po, cookie1, size)) {
lwsl_err("%s: L1 '%s' missing, size %llu, po %s\n", __func__,
tag_cookie1, (unsigned long long)size, po);
goto cdone;
}
if (lws_cache_item_get(l1, tag_cookie2, (const void **)&po, &size) ||
size != strlen(cookie2) || memcmp(po, cookie2, size)) {
lwsl_err("%s: L1 '%s' missing\n", __func__, tag_cookie2);
goto cdone;
}
if (lws_cache_item_get(l1, tag_cookie3, (const void **)&po, &size) ||
size != strlen(cookie3) || memcmp(po, cookie3, size)) {
lwsl_err("%s: L1 '%s' missing\n", __func__, tag_cookie3);
goto cdone;
}
/* confirm that the cookies are individually in L2 / NSC... normally
* we don't do this but check via L1 so we can get it from there if
* present. But as a unit test, we want to make sure it's in L2 / NSC
*/
lwsl_user("%s: check cookies written thru to NSC\n", __func__);
if (lws_cache_item_get(nsc, tag_cookie1, (const void **)&po, &size) ||
size != strlen(cookie1) || memcmp(po, cookie1, size)) {
lwsl_err("%s: NSC '%s' missing, size %llu, po %s\n", __func__,
tag_cookie1, (unsigned long long)size, po);
goto cdone;
}
if (lws_cache_item_get(nsc, tag_cookie2, (const void **)&po, &size) ||
size != strlen(cookie2) || memcmp(po, cookie2, size)) {
lwsl_err("%s: NSC '%s' missing\n", __func__, tag_cookie2);
goto cdone;
}
if (lws_cache_item_get(nsc, tag_cookie3, (const void **)&po, &size) ||
size != strlen(cookie3) || memcmp(po, cookie3, size)) {
lwsl_err("%s: NSC '%s' missing\n", __func__, tag_cookie3);
goto cdone;
}
/* let's do a lookup with no results */
lwsl_user("%s: nonexistant get must not pass\n", __func__);
if (!lws_cache_item_get(l1, "x.com|y|z", (const void **)&po, &size)) {
lwsl_err("%s: nonexistant found size %llu, po %s\n", __func__,
(unsigned long long)size, po);
goto cdone;
}
/*
* let's try some url paths and check we get the right results set...
* for / and any cookie, we expect only c1 and c3 to be listed
*/
lwsl_user("%s: wildcard lookup 1\n", __func__);
n = lws_cache_lookup(l1, "host.com|/|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
lwsl_hexdump_notice(cr.ptr, size);
if (cr.size != 53)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/*
* for /xxx and any cookie, we expect all 3 listed
*/
lwsl_user("%s: wildcard lookup 2\n", __func__);
n = lws_cache_lookup(l1, "host.com|/xxx|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 84)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/*
* for /yyyy and any cookie, we expect only c1 and c3
*/
lwsl_user("%s: wildcard lookup 3\n", __func__);
n = lws_cache_lookup(l1, "host.com|/yyyy|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 53)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/*
* repeat the above test, results should come from cache
*/
lwsl_user("%s: wildcard lookup 4\n", __func__);
n = lws_cache_lookup(l1, "host.com|/yyyy|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 53)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/* now let's try deleting cookie 1 */
if (lws_cache_item_remove(l1, tag_cookie1))
goto cdone;
lws_cache_debug_dump(nsc);
lws_cache_debug_dump(l1);
/* with c1 gone, we should only get c3 */
lwsl_user("%s: wildcard lookup 5\n", __func__);
n = lws_cache_lookup(l1, "host.com|/|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 25)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/*
* let's add a fourth cookie (third in cache now we deleted one)
*/
if (lws_cache_write_through(l1, tag_cookie4,
(const uint8_t *)cookie4, strlen(cookie4),
lws_now_usecs() + LWS_US_PER_SEC * 2, NULL)) {
lwsl_err("%s: write4 failed\n", __func__);
goto cdone;
}
/*
* for /yy and any cookie, we expect only c3
*/
lwsl_user("%s: wildcard lookup 6\n", __func__);
n = lws_cache_lookup(l1, "host.com|/yy|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 25)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/*
* for /yyy and any cookie, we expect c3 and c4
*/
lwsl_user("%s: wildcard lookup 7\n", __func__);
n = lws_cache_lookup(l1, "host.com|/yyy|*",
(const void **)&cr.ptr, &cr.size);
if (n) {
lwsl_err("%s: lookup failed %d\n", __func__, n);
goto cdone;
}
if (cr.size != 57)
goto cdone;
while (!lws_cache_results_walk(&cr))
lwsl_notice(" %s (%d)\n", (const char *)cr.tag,
(int)cr.payload_len);
/* that's ok then */
lwsl_user("%s: done\n", __func__);
ret = 0;
cdone:
lws_cache_destroy(&nsc);
lws_cache_destroy(&l1);
if (ret)
lwsl_warn("%s: fail\n", __func__);
return ret;
}
#endif
int main(int argc, const char **argv)
{
struct lws_context_creation_info info;
memset(&info, 0, sizeof info);
lws_cmdline_option_handle_builtin(argc, argv, &info);
info.fd_limit_per_thread = 1 + 6 + 1;
info.port = CONTEXT_PORT_NO_LISTEN;
lwsl_user("LWS API selftest: lws_cache\n");
cx = lws_create_context(&info);
if (!cx) {
lwsl_err("lws init failed\n");
return 1;
}
if (test_just_l1())
fail++;
if (test_just_l1_limits())
fail++;
#if defined(LWS_WITH_CACHE_NSCOOKIEJAR)
if (test_nsc1())
fail++;
#endif
lws_context_destroy(cx);
if (tests && !fail)
lwsl_user("Completed: PASS\n");
else
lwsl_err("Completed: FAIL %d / %d\n", fail, tests);
return 0;
}

View file

@ -0,0 +1,3 @@
# Netscape HTTP Cookie File
host.com FALSE / FALSE 1234 mycookie value
host.com FALSE /xxx FALSE 1234 mycookie valuexxx