mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
tokenize: flag for # as rest of line comment
Add a flag for tokenizing config where # indicates the rest of the line is a comment (eg, /etc/resolv.conf)
This commit is contained in:
parent
35b23c3996
commit
8b37f98feb
3 changed files with 32 additions and 9 deletions
|
@ -39,6 +39,8 @@
|
|||
#define LWS_TOKENIZE_F_NO_FLOATS (1 << 5)
|
||||
/* Instead of LWS_TOKZE_INTEGER, report integers as any other string token */
|
||||
#define LWS_TOKENIZE_F_NO_INTEGERS (1 << 6)
|
||||
/* # makes the rest of the line a comment */
|
||||
#define LWS_TOKENIZE_F_HASH_COMMENT (1 << 7)
|
||||
|
||||
typedef enum {
|
||||
|
||||
|
@ -76,7 +78,7 @@ enum lws_tokenize_delimiter_tracking {
|
|||
LWSTZ_DT_NEED_NEXT_CONTENT,
|
||||
};
|
||||
|
||||
struct lws_tokenize {
|
||||
typedef struct lws_tokenize {
|
||||
const char *start; /**< set to the start of the string to tokenize */
|
||||
const char *token; /**< the start of an identified token or delimiter */
|
||||
int len; /**< set to the length of the string to tokenize */
|
||||
|
@ -84,7 +86,9 @@ struct lws_tokenize {
|
|||
|
||||
int flags; /**< optional LWS_TOKENIZE_F_ flags, or 0 */
|
||||
int delim;
|
||||
};
|
||||
|
||||
lws_tokenize_elem e; /**< convenient for storing lws_tokenize return */
|
||||
} lws_tokenize_t;
|
||||
|
||||
/**
|
||||
* lws_tokenize() - breaks down a string into tokens and delimiters in-place
|
||||
|
|
|
@ -652,17 +652,13 @@ typedef enum {
|
|||
LWS_TOKZS_TOKEN_POST_TERMINAL
|
||||
} lws_tokenize_state;
|
||||
|
||||
#if defined(LWS_AMAZON_RTOS)
|
||||
lws_tokenize_elem
|
||||
#else
|
||||
int
|
||||
#endif
|
||||
lws_tokenize(struct lws_tokenize *ts)
|
||||
{
|
||||
const char *rfc7230_delims = "(),/:;<=>?@[\\]{}";
|
||||
lws_tokenize_state state = LWS_TOKZS_LEADING_WHITESPACE;
|
||||
char c, flo = 0, d_minus = '-', d_dot = '.', s_minus = '\0',
|
||||
s_dot = '\0';
|
||||
s_dot = '\0', skipping = 0;
|
||||
signed char num = ts->flags & LWS_TOKENIZE_F_NO_INTEGERS ? 0 : -1;
|
||||
int utf8 = 0;
|
||||
|
||||
|
@ -691,6 +687,22 @@ lws_tokenize(struct lws_tokenize *ts)
|
|||
if (!c)
|
||||
break;
|
||||
|
||||
if (skipping) {
|
||||
if (c != '\r' && c != '\n')
|
||||
continue;
|
||||
else
|
||||
skipping = 0;
|
||||
}
|
||||
|
||||
/* comment */
|
||||
|
||||
if (ts->flags & LWS_TOKENIZE_F_HASH_COMMENT &&
|
||||
state != LWS_TOKZS_QUOTED_STRING &&
|
||||
c == '#') {
|
||||
skipping = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* whitespace */
|
||||
|
||||
if (c == ' ' || c == '\t' || c == '\n' || c == '\r' ||
|
||||
|
|
|
@ -169,8 +169,11 @@ struct expected expected1[] = {
|
|||
{ LWS_TOKZE_TOKEN_NAME_EQUALS, "a", 1 },
|
||||
{ LWS_TOKZE_TOKEN, "5", 1 },
|
||||
{ LWS_TOKZE_ENDED, "", 0 },
|
||||
},
|
||||
expected17[] = {
|
||||
{ LWS_TOKZE_TOKEN, "hello", 5 },
|
||||
{ LWS_TOKZE_ENDED, "", 0 },
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
struct tests tests[] = {
|
||||
|
@ -247,6 +250,10 @@ struct tests tests[] = {
|
|||
"a=5", expected16, LWS_ARRAY_SIZE(expected16),
|
||||
LWS_TOKENIZE_F_NO_INTEGERS
|
||||
},
|
||||
{
|
||||
"# comment1\r\nhello #comment2\r\n#comment3", expected17,
|
||||
LWS_ARRAY_SIZE(expected17), LWS_TOKENIZE_F_HASH_COMMENT
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -299,6 +306,7 @@ int main(int argc, const char **argv)
|
|||
int m = 0, in_fail = fail;
|
||||
struct expected *exp = tests[n].exp;
|
||||
|
||||
memset(&ts, 0, sizeof(ts));
|
||||
ts.start = tests[n].string;
|
||||
ts.len = strlen(ts.start);
|
||||
ts.flags = tests[n].flags;
|
||||
|
@ -401,7 +409,6 @@ int main(int argc, const char **argv)
|
|||
printf("\t}\n");
|
||||
}
|
||||
|
||||
|
||||
lwsl_user("Completed: PASS: %d, FAIL: %d\n", ok, fail);
|
||||
|
||||
return !(ok && !fail);
|
||||
|
|
Loading…
Add table
Reference in a new issue