2018-11-15 10:00:54 +08:00
|
|
|
/*
|
|
|
|
* lws-minimal-ws-client-spam
|
|
|
|
*
|
2019-05-01 07:57:56 +01:00
|
|
|
* Written in 2010-2019 by Andy Green <andy@warmcat.com>
|
2018-11-15 10:00:54 +08:00
|
|
|
*
|
|
|
|
* This file is made available under the Creative Commons CC0 1.0
|
|
|
|
* Universal Public Domain Dedication.
|
|
|
|
*
|
|
|
|
* This demonstrates a ws client that makes continuous mass ws connections
|
|
|
|
* asynchronously
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <libwebsockets.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
enum {
|
|
|
|
CLIENT_IDLE,
|
|
|
|
CLIENT_CONNECTING,
|
|
|
|
CLIENT_AWAITING_SEND,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct client {
|
|
|
|
struct lws *wsi;
|
|
|
|
int index;
|
|
|
|
int state;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct lws_context *context;
|
|
|
|
static struct client clients[200];
|
|
|
|
static int interrupted, port = 443, ssl_connection = LCCSCF_USE_SSL;
|
|
|
|
static const char *server_address = "libwebsockets.org",
|
|
|
|
*pro = "lws-mirror-protocol";
|
travis: restrict spam connections
Travis seems to be restricting the number of outgoing connections
or the rate of them... we have been using 10 concurrent and 100 connections
[2019/08/02 09:26:22:7950] USER: callback_minimal_spam: established (try 10, est 8, closed 0, err 0)
[2019/08/02 09:26:22:8041] USER: callback_minimal_spam: established (try 10, est 9, closed 0, err 0)
[2019/08/02 09:26:23:0098] USER: callback_minimal_spam: reopening (try 11, est 10, closed 1, err 0)
[2019/08/02 09:26:23:0105] USER: callback_minimal_spam: reopening (try 12, est 10, closed 2, err 0)
[2019/08/02 09:26:23:0111] USER: callback_minimal_spam: reopening (try 13, est 10, closed 3, err 0)
[2019/08/02 09:26:23:0117] USER: callback_minimalRROR: closed before established (try 25, est 14, closed 14, err 2)
[2019/08/02 09:26:44:6125] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 26, est 14, closed 14, err 3)
[2019/08/02 09:26:44:6129] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 27, est 14, closed 14, err 4)
[2019/08/02 09:26:44:6133] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 28, est 14, closed 14, err 5)
[2019/08/02 09:26:44:6137] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 29, est 14, closed 14, err 6)
[2019/08/02 09:26:45:6152] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 30, est 14, closed 14, err 7)
[2019/08/02 09:26:45:6163] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 31, est 14, closed 14, err 8)
[2019/08/02 09:26:45:6168] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 32, est 14, closed 14, err 9)
[2019/08/02 09:26:45:6174] ERR: CLIENT_CONNECTION_ERROR: closed before established (try 33, est 14, closed 14, err 10)
[2019/08/02 09:26:47:0635] USER: callback_minimal_spam: established (try 34, est 14, closed 14, err 10)
Reduce to 3 concurrent / 15 see if it helps travis get over the hump
2019-08-02 10:30:14 +01:00
|
|
|
static int concurrent = 3, conn, tries, est, errors, closed, sent, limit = 15;
|
2018-11-15 10:00:54 +08:00
|
|
|
|
|
|
|
struct pss {
|
|
|
|
int conn;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
connect_client(int idx)
|
|
|
|
{
|
|
|
|
struct lws_client_connect_info i;
|
|
|
|
|
|
|
|
if (tries == limit) {
|
|
|
|
lwsl_user("Reached limit... finishing\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&i, 0, sizeof(i));
|
|
|
|
|
|
|
|
i.context = context;
|
|
|
|
i.port = port;
|
|
|
|
i.address = server_address;
|
|
|
|
i.path = "/";
|
|
|
|
i.host = i.address;
|
|
|
|
i.origin = i.address;
|
|
|
|
i.ssl_connection = ssl_connection;
|
|
|
|
i.protocol = pro;
|
|
|
|
i.local_protocol_name = pro;
|
|
|
|
i.pwsi = &clients[idx].wsi;
|
|
|
|
|
|
|
|
clients[idx].state = CLIENT_CONNECTING;
|
|
|
|
tries++;
|
|
|
|
|
|
|
|
if (!lws_client_connect_via_info(&i)) {
|
|
|
|
clients[idx].wsi = NULL;
|
|
|
|
clients[idx].state = CLIENT_IDLE;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
callback_minimal_spam(struct lws *wsi, enum lws_callback_reasons reason,
|
|
|
|
void *user, void *in, size_t len)
|
|
|
|
{
|
|
|
|
struct pss *pss = (struct pss *)user;
|
|
|
|
uint8_t ping[LWS_PRE + 125];
|
|
|
|
int n, m;
|
|
|
|
|
|
|
|
switch (reason) {
|
|
|
|
|
|
|
|
case LWS_CALLBACK_PROTOCOL_INIT:
|
|
|
|
for (n = 0; n < concurrent; n++) {
|
|
|
|
clients[n].index = n;
|
|
|
|
connect_client(n);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
|
|
|
|
errors++;
|
|
|
|
lwsl_err("CLIENT_CONNECTION_ERROR: %s (try %d, est %d, closed %d, err %d)\n",
|
|
|
|
in ? (char *)in : "(null)", tries, est, closed, errors);
|
|
|
|
for (n = 0; n < concurrent; n++) {
|
|
|
|
if (clients[n].wsi == wsi) {
|
|
|
|
clients[n].wsi = NULL;
|
|
|
|
clients[n].state = CLIENT_IDLE;
|
|
|
|
connect_client(n);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tries == closed + errors)
|
|
|
|
interrupted = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* --- client callbacks --- */
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_ESTABLISHED:
|
|
|
|
lwsl_user("%s: established (try %d, est %d, closed %d, err %d)\n",
|
|
|
|
__func__, tries, est, closed, errors);
|
|
|
|
est++;
|
|
|
|
pss->conn = conn++;
|
|
|
|
lws_callback_on_writable(wsi);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_CLOSED:
|
|
|
|
closed++;
|
|
|
|
if (tries == closed + errors)
|
|
|
|
interrupted = 1;
|
|
|
|
if (tries == limit) {
|
2019-03-20 07:39:55 +08:00
|
|
|
lwsl_user("%s: leaving CLOSED (try %d, est %d, sent %d, closed %d, err %d)\n",
|
|
|
|
__func__, tries, est, sent, closed, errors);
|
2018-11-15 10:00:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = 0; n < concurrent; n++) {
|
|
|
|
if (clients[n].wsi == wsi) {
|
|
|
|
connect_client(n);
|
|
|
|
lwsl_user("%s: reopening (try %d, est %d, closed %d, err %d)\n",
|
|
|
|
__func__, tries, est, closed, errors);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (n == concurrent)
|
|
|
|
lwsl_user("CLOSED: can't find client wsi\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LWS_CALLBACK_CLIENT_WRITEABLE:
|
2019-03-20 07:39:55 +08:00
|
|
|
n = lws_snprintf((char *)ping + LWS_PRE, sizeof(ping) - LWS_PRE,
|
2018-11-15 10:00:54 +08:00
|
|
|
"hello %d", pss->conn);
|
|
|
|
|
|
|
|
m = lws_write(wsi, ping + LWS_PRE, n, LWS_WRITE_TEXT);
|
|
|
|
if (m < n) {
|
|
|
|
lwsl_err("sending ping failed: %d\n", m);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2019-08-10 07:32:32 +01:00
|
|
|
lws_set_timeout(wsi, PENDING_TIMEOUT_USER_OK, LWS_TO_KILL_ASYNC);
|
2018-11-15 10:00:54 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lws_callback_http_dummy(wsi, reason, user, in, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct lws_protocols protocols[] = {
|
|
|
|
{
|
|
|
|
"lws-spam-test",
|
|
|
|
callback_minimal_spam,
|
|
|
|
sizeof(struct pss),
|
|
|
|
0,
|
|
|
|
},
|
|
|
|
{ NULL, NULL, 0, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
sigint_handler(int sig)
|
|
|
|
{
|
|
|
|
interrupted = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
struct lws_context_creation_info info;
|
|
|
|
const char *p;
|
|
|
|
int n = 0, logs = LLL_USER | LLL_ERR | LLL_WARN | LLL_NOTICE
|
|
|
|
/* for LLL_ verbosity above NOTICE to be built into lws,
|
|
|
|
* lws must have been configured and built with
|
|
|
|
* -DCMAKE_BUILD_TYPE=DEBUG instead of =RELEASE */
|
|
|
|
/* | LLL_INFO */ /* | LLL_PARSER */ /* | LLL_HEADER */
|
|
|
|
/* | LLL_EXT */ /* | LLL_CLIENT */ /* | LLL_LATENCY */
|
|
|
|
/* | LLL_DEBUG */;
|
|
|
|
|
|
|
|
signal(SIGINT, sigint_handler);
|
|
|
|
|
|
|
|
if ((p = lws_cmdline_option(argc, argv, "-d")))
|
|
|
|
logs = atoi(p);
|
|
|
|
|
|
|
|
lws_set_log_level(logs, NULL);
|
|
|
|
lwsl_user("LWS minimal ws client SPAM\n");
|
|
|
|
|
|
|
|
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
|
|
|
|
info.options = LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
|
|
|
|
info.port = CONTEXT_PORT_NO_LISTEN; /* we do not run any server */
|
|
|
|
info.protocols = protocols;
|
|
|
|
#if defined(LWS_WITH_MBEDTLS)
|
|
|
|
/*
|
|
|
|
* OpenSSL uses the system trust store. mbedTLS has to be told which
|
|
|
|
* CA to trust explicitly.
|
|
|
|
*/
|
|
|
|
info.client_ssl_ca_filepath = "./libwebsockets.org.cer";
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if ((p = lws_cmdline_option(argc, argv, "--server"))) {
|
|
|
|
server_address = p;
|
|
|
|
ssl_connection |= LCCSCF_ALLOW_SELFSIGNED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((p = lws_cmdline_option(argc, argv, "--port")))
|
|
|
|
port = atoi(p);
|
|
|
|
|
|
|
|
if ((p = lws_cmdline_option(argc, argv, "-l")))
|
|
|
|
limit = atoi(p);
|
|
|
|
|
|
|
|
if ((p = lws_cmdline_option(argc, argv, "-c")))
|
|
|
|
concurrent = atoi(p);
|
|
|
|
|
|
|
|
if (lws_cmdline_option(argc, argv, "-n")) {
|
|
|
|
ssl_connection = 0;
|
|
|
|
info.options = 0;
|
|
|
|
}
|
|
|
|
|
2019-08-08 20:05:03 +01:00
|
|
|
if (concurrent < 0 ||
|
|
|
|
concurrent > (int)LWS_ARRAY_SIZE(clients)) {
|
2018-11-15 10:00:54 +08:00
|
|
|
lwsl_err("%s: -c %d larger than max concurrency %d\n", __func__,
|
|
|
|
concurrent, (int)LWS_ARRAY_SIZE(clients));
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
unix plat: add minimal wsi fd map option
An lws context usually contains a processwide fd -> wsi lookup table.
This allows any possible fd returned by a *nix type OS to be immediately
converted to a wsi just by indexing an array of struct lws * the size of
the highest possible fd, as found by ulimit -n or similar.
This works modestly for Linux type systems where the default ulimit -n for
a process is 1024, it means a 4KB or 8KB lookup table for 32-bit or
64-bit systems.
However in the case your lws usage is much simpler, like one outgoing
client connection and no serving, this represents increasing waste. It's
made much worse if the system has a much larger default ulimit -n, eg 1M,
the table is occupying 4MB or 8MB, of which you will only use one.
Even so, because lws can't be sure the OS won't return a socket fd at any
number up to (ulimit -n - 1), it has to allocate the whole lookup table
at the moment.
This patch looks to see if the context creation info is setting
info->fd_limit_per_thread... if it leaves it at the default 0, then
everything is as it was before this patch. However if finds that
(info->fd_limit_per_thread * actual_number_of_service_threads) where
the default number of service threads is 1, is less than the fd limit
set by ulimit -n, lws switches to a slower lookup table scheme, which
only allocates the requested number of slots. Lookups happen then by
iterating the table and comparing rather than indexing the array
directly, which is obviously somewhat of a performance hit.
However in the case where you know lws will only have a very few wsi
maximum, this method can very usefully trade off speed to be able to
avoid the allocation sized by ulimit -n.
minimal examples for client that can make use of this are also modified
by this patch to use the smaller context allocations.
2019-05-17 01:20:07 +01:00
|
|
|
/*
|
|
|
|
* since we know this lws context is only ever going to be used with
|
|
|
|
* one client wsis / fds / sockets at a time, let lws know it doesn't
|
|
|
|
* have to use the default allocations for fd tables up to ulimit -n.
|
|
|
|
* It will just allocate for 1 internal and n (+ 1 http2 nwsi) that we
|
|
|
|
* will use.
|
|
|
|
*/
|
|
|
|
info.fd_limit_per_thread = 1 + concurrent + 1;
|
|
|
|
|
2018-11-15 10:00:54 +08:00
|
|
|
context = lws_create_context(&info);
|
|
|
|
if (!context) {
|
|
|
|
lwsl_err("lws init failed\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (n >= 0 && !interrupted)
|
2019-08-09 10:12:09 +01:00
|
|
|
n = lws_service(context, 0);
|
2018-11-15 10:00:54 +08:00
|
|
|
|
|
|
|
lws_context_destroy(context);
|
|
|
|
|
|
|
|
if (tries == limit && closed == tries) {
|
|
|
|
lwsl_user("Completed\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lwsl_err("Failed\n");
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|