aboutsummaryrefslogtreecommitdiff
path: root/src/test/modules/test_json_parser/test_json_parser_incremental.c
diff options
context:
space:
mode:
authorDaniel Gustafsson <dgustafsson@postgresql.org>2025-04-23 11:02:05 +0200
committerDaniel Gustafsson <dgustafsson@postgresql.org>2025-04-23 11:02:05 +0200
commit994a100b37ad8c2fb8282a9fce91a16b4c832277 (patch)
treeb08a5e1e17d950c431d6b8c1c4ba3b84a1e81d83 /src/test/modules/test_json_parser/test_json_parser_incremental.c
parent0ff95e0a5be1372bfba9db284ea17c8e0e5da3a0 (diff)
downloadpostgresql-994a100b37ad8c2fb8282a9fce91a16b4c832277.tar.gz
postgresql-994a100b37ad8c2fb8282a9fce91a16b4c832277.zip
Allocate JsonLexContexts on the heap to avoid warnings
The stack allocated JsonLexContexts, in combination with codepaths using goto, were causing warnings when compiling with LTO enabled as the optimizer is unable to figure out that is safe. Rather than contort the code with workarounds for this simply heap allocate the structs instead as these are not in any performance critical paths. Author: Daniel Gustafsson <daniel@yesql.se> Reported-by: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: Jacob Champion <jacob.champion@enterprisedb.com> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/2074634.1744839761@sss.pgh.pa.us
Diffstat (limited to 'src/test/modules/test_json_parser/test_json_parser_incremental.c')
-rw-r--r--src/test/modules/test_json_parser/test_json_parser_incremental.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/src/test/modules/test_json_parser/test_json_parser_incremental.c b/src/test/modules/test_json_parser/test_json_parser_incremental.c
index a529ee47e9b..d1e3e4ab4ea 100644
--- a/src/test/modules/test_json_parser/test_json_parser_incremental.c
+++ b/src/test/modules/test_json_parser/test_json_parser_incremental.c
@@ -84,7 +84,7 @@ main(int argc, char **argv)
char buff[BUFSIZE];
FILE *json_file;
JsonParseErrorType result;
- JsonLexContext lex;
+ JsonLexContext *lex;
StringInfoData json;
int n_read;
size_t chunk_size = DEFAULT_CHUNK_SIZE;
@@ -98,6 +98,10 @@ main(int argc, char **argv)
pg_logging_init(argv[0]);
+ lex = calloc(1, sizeof(JsonLexContext));
+ if (!lex)
+ pg_fatal("out of memory");
+
while ((c = getopt(argc, argv, "c:os")) != -1)
{
switch (c)
@@ -113,7 +117,7 @@ main(int argc, char **argv)
case 's': /* do semantic processing */
testsem = &sem;
sem.semstate = palloc(sizeof(struct DoState));
- ((struct DoState *) sem.semstate)->lex = &lex;
+ ((struct DoState *) sem.semstate)->lex = lex;
((struct DoState *) sem.semstate)->buf = makeStringInfo();
need_strings = true;
break;
@@ -131,8 +135,8 @@ main(int argc, char **argv)
exit(1);
}
- makeJsonLexContextIncremental(&lex, PG_UTF8, need_strings);
- setJsonLexContextOwnsTokens(&lex, lex_owns_tokens);
+ makeJsonLexContextIncremental(lex, PG_UTF8, need_strings);
+ setJsonLexContextOwnsTokens(lex, lex_owns_tokens);
initStringInfo(&json);
if ((json_file = fopen(testfile, PG_BINARY_R)) == NULL)
@@ -165,12 +169,12 @@ main(int argc, char **argv)
bytes_left -= n_read;
if (bytes_left > 0)
{
- result = pg_parse_json_incremental(&lex, testsem,
+ result = pg_parse_json_incremental(lex, testsem,
json.data, n_read,
false);
if (result != JSON_INCOMPLETE)
{
- fprintf(stderr, "%s\n", json_errdetail(result, &lex));
+ fprintf(stderr, "%s\n", json_errdetail(result, lex));
ret = 1;
goto cleanup;
}
@@ -178,12 +182,12 @@ main(int argc, char **argv)
}
else
{
- result = pg_parse_json_incremental(&lex, testsem,
+ result = pg_parse_json_incremental(lex, testsem,
json.data, n_read,
true);
if (result != JSON_SUCCESS)
{
- fprintf(stderr, "%s\n", json_errdetail(result, &lex));
+ fprintf(stderr, "%s\n", json_errdetail(result, lex));
ret = 1;
goto cleanup;
}
@@ -195,8 +199,9 @@ main(int argc, char **argv)
cleanup:
fclose(json_file);
- freeJsonLexContext(&lex);
+ freeJsonLexContext(lex);
free(json.data);
+ free(lex);
return ret;
}