aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils/adt
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils/adt')
-rw-r--r--src/backend/utils/adt/bytea.c61
-rw-r--r--src/backend/utils/adt/jsonb_util.c43
-rw-r--r--src/backend/utils/adt/pg_upgrade_support.c19
-rw-r--r--src/backend/utils/adt/selfuncs.c18
-rw-r--r--src/backend/utils/adt/tid.c2
-rw-r--r--src/backend/utils/adt/xml.c74
6 files changed, 101 insertions, 116 deletions
diff --git a/src/backend/utils/adt/bytea.c b/src/backend/utils/adt/bytea.c
index 2e539c2504e..6e7b914c563 100644
--- a/src/backend/utils/adt/bytea.c
+++ b/src/backend/utils/adt/bytea.c
@@ -182,27 +182,21 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
*
* Non-printable characters must be passed as '\nnn' (octal) and are
* converted to internal form. '\' must be passed as '\\'.
- * ereport(ERROR, ...) if bad form.
- *
- * BUGS:
- * The input is scanned twice.
- * The error checking of input is minimal.
*/
Datum
byteain(PG_FUNCTION_ARGS)
{
char *inputText = PG_GETARG_CSTRING(0);
Node *escontext = fcinfo->context;
+ size_t len = strlen(inputText);
+ size_t bc;
char *tp;
char *rp;
- int bc;
bytea *result;
/* Recognize hex input */
if (inputText[0] == '\\' && inputText[1] == 'x')
{
- size_t len = strlen(inputText);
-
bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */
result = palloc(bc);
bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result),
@@ -213,33 +207,7 @@ byteain(PG_FUNCTION_ARGS)
}
/* Else, it's the traditional escaped style */
- for (bc = 0, tp = inputText; *tp != '\0'; bc++)
- {
- if (tp[0] != '\\')
- tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
- (tp[2] >= '0' && tp[2] <= '7') &&
- (tp[3] >= '0' && tp[3] <= '7'))
- tp += 4;
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
- tp += 2;
- else
- {
- /*
- * one backslash, not followed by another or ### valid octal
- */
- ereturn(escontext, (Datum) 0,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type %s", "bytea")));
- }
- }
-
- bc += VARHDRSZ;
-
- result = (bytea *) palloc(bc);
- SET_VARSIZE(result, bc);
+ result = (bytea *) palloc(len + VARHDRSZ); /* maximum possible length */
tp = inputText;
rp = VARDATA(result);
@@ -247,21 +215,21 @@ byteain(PG_FUNCTION_ARGS)
{
if (tp[0] != '\\')
*rp++ = *tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
+ else if ((tp[1] >= '0' && tp[1] <= '3') &&
(tp[2] >= '0' && tp[2] <= '7') &&
(tp[3] >= '0' && tp[3] <= '7'))
{
- bc = VAL(tp[1]);
- bc <<= 3;
- bc += VAL(tp[2]);
- bc <<= 3;
- *rp++ = bc + VAL(tp[3]);
+ int v;
+
+ v = VAL(tp[1]);
+ v <<= 3;
+ v += VAL(tp[2]);
+ v <<= 3;
+ *rp++ = v + VAL(tp[3]);
tp += 4;
}
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
+ else if (tp[1] == '\\')
{
*rp++ = '\\';
tp += 2;
@@ -269,7 +237,7 @@ byteain(PG_FUNCTION_ARGS)
else
{
/*
- * We should never get here. The first pass should not allow it.
+ * one backslash, not followed by another or ### valid octal
*/
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -277,6 +245,9 @@ byteain(PG_FUNCTION_ARGS)
}
}
+ bc = rp - VARDATA(result); /* actual length */
+ SET_VARSIZE(result, bc + VARHDRSZ);
+
PG_RETURN_BYTEA_P(result);
}
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index c8b6c15e059..82b807d067a 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -277,22 +277,16 @@ compareJsonbContainers(JsonbContainer *a, JsonbContainer *b)
else
{
/*
- * It's safe to assume that the types differed, and that the va
- * and vb values passed were set.
- *
- * If the two values were of the same container type, then there'd
- * have been a chance to observe the variation in the number of
- * elements/pairs (when processing WJB_BEGIN_OBJECT, say). They're
- * either two heterogeneously-typed containers, or a container and
- * some scalar type.
- *
- * We don't have to consider the WJB_END_ARRAY and WJB_END_OBJECT
- * cases here, because we would have seen the corresponding
- * WJB_BEGIN_ARRAY and WJB_BEGIN_OBJECT tokens first, and
- * concluded that they don't match.
+ * It's not possible for one iterator to report end of array or
+ * object while the other one reports something else, because we
+ * would have detected a length mismatch when we processed the
+ * container-start tokens above. Likewise we can't see WJB_DONE
+ * from one but not the other. So we have two different-type
+ * containers, or a container and some scalar type, or two
+ * different scalar types. Sort on the basis of the type code.
*/
- Assert(ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
- Assert(rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
+ Assert(ra != WJB_DONE && ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
+ Assert(rb != WJB_DONE && rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
Assert(va.type != vb.type);
Assert(va.type != jbvBinary);
@@ -852,15 +846,20 @@ JsonbIteratorInit(JsonbContainer *container)
* It is our job to expand the jbvBinary representation without bothering them
* with it. However, clients should not take it upon themselves to touch array
* or Object element/pair buffers, since their element/pair pointers are
- * garbage. Also, *val will not be set when returning WJB_END_ARRAY or
- * WJB_END_OBJECT, on the assumption that it's only useful to access values
- * when recursing in.
+ * garbage.
+ *
+ * *val is not meaningful when the result is WJB_DONE, WJB_END_ARRAY or
+ * WJB_END_OBJECT. However, we set val->type = jbvNull in those cases,
+ * so that callers may assume that val->type is always well-defined.
*/
JsonbIteratorToken
JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
if (*it == NULL)
+ {
+ val->type = jbvNull;
return WJB_DONE;
+ }
/*
* When stepping into a nested container, we jump back here to start
@@ -898,6 +897,7 @@ recurse:
* nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_ARRAY;
}
@@ -951,6 +951,7 @@ recurse:
* of nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_OBJECT;
}
else
@@ -995,8 +996,10 @@ recurse:
return WJB_VALUE;
}
- elog(ERROR, "invalid iterator state");
- return -1;
+ elog(ERROR, "invalid jsonb iterator state");
+ /* satisfy compilers that don't know that elog(ERROR) doesn't return */
+ val->type = jbvNull;
+ return WJB_DONE;
}
/*
diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c
index d44f8c262ba..a4f8b4faa90 100644
--- a/src/backend/utils/adt/pg_upgrade_support.c
+++ b/src/backend/utils/adt/pg_upgrade_support.c
@@ -21,6 +21,7 @@
#include "commands/extension.h"
#include "miscadmin.h"
#include "replication/logical.h"
+#include "replication/logicallauncher.h"
#include "replication/origin.h"
#include "replication/worker_internal.h"
#include "storage/lmgr.h"
@@ -410,3 +411,21 @@ binary_upgrade_replorigin_advance(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+
+/*
+ * binary_upgrade_create_conflict_detection_slot
+ *
+ * Create a replication slot to retain information necessary for conflict
+ * detection such as dead tuples, commit timestamps, and origins.
+ */
+Datum
+binary_upgrade_create_conflict_detection_slot(PG_FUNCTION_ARGS)
+{
+ CHECK_IS_BINARY_UPGRADE;
+
+ CreateConflictDetectionSlot();
+
+ ReplicationSlotRelease();
+
+ PG_RETURN_VOID();
+}
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index ce6a626eba2..17fbfa9b410 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -3798,18 +3798,25 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
List *hashclauses,
Selectivity *innerbucketsize)
{
- List *clauses = list_copy(hashclauses);
- List *otherclauses = NIL;
- double ndistinct = 1.0;
+ List *clauses;
+ List *otherclauses;
+ double ndistinct;
if (list_length(hashclauses) <= 1)
-
+ {
/*
* Nothing to do for a single clause. Could we employ univariate
* extended stat here?
*/
return hashclauses;
+ }
+ /* "clauses" is the list of hashclauses we've not dealt with yet */
+ clauses = list_copy(hashclauses);
+ /* "otherclauses" holds clauses we are going to return to caller */
+ otherclauses = NIL;
+ /* current estimate of ndistinct */
+ ndistinct = 1.0;
while (clauses != NIL)
{
ListCell *lc;
@@ -3874,12 +3881,13 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
group_rel = root->simple_rel_array[relid];
}
else if (group_relid != relid)
-
+ {
/*
* Being in the group forming state we don't need other
* clauses.
*/
continue;
+ }
/*
* We're going to add the new clause to the varinfos list. We
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1b0df111717..39dab3e42df 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS)
/*
* Cope with possibility that unsigned long is wider than BlockNumber, in
* which case strtoul will not raise an error for some values that are out
- * of the range of BlockNumber. (See similar code in oidin().)
+ * of the range of BlockNumber. (See similar code in uint32in_subr().)
*/
#if SIZEOF_LONG > 4
if (cvt != (unsigned long) blockNumber &&
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index f7b731825fc..182e8f75db7 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1769,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and we parse in CONTENT mode, the list
- * of parsed nodes from the xmlParseInNodeContext call will be returned
+ * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes. (It is caller's responsibility to free that.)
*
* Errors normally result in ereport(ERROR), but if escontext is an
@@ -1795,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PgXmlErrorContext *xmlerrcxt;
volatile xmlParserCtxtPtr ctxt = NULL;
volatile xmlDocPtr doc = NULL;
+ volatile int save_keep_blanks = -1;
/*
* This step looks annoyingly redundant, but we must do it to have a
@@ -1822,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PG_TRY();
{
bool parse_as_document = false;
- int options;
int res_code;
size_t count = 0;
xmlChar *version = NULL;
@@ -1853,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
parse_as_document = true;
}
- /*
- * Select parse options.
- *
- * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
- * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by
- * internal DTD are applied'. As for external DTDs, we try to support
- * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really
- * happen because xmlPgEntityLoader prevents it.
- */
- options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
-
/* initialize output parameters */
if (parsed_xmloptiontype != NULL)
*parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT :
@@ -1874,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (parse_as_document)
{
+ int options;
+
+ /* set up parser context used by xmlCtxtReadDoc */
ctxt = xmlNewParserCtxt();
if (ctxt == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
+ /*
+ * Select parse options.
+ *
+ * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
+ * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined
+ * by internal DTD are applied'. As for external DTDs, we try to
+ * support them too (see SQL/XML:2008 GR 10.16.7.e), but that
+ * doesn't really happen because xmlPgEntityLoader prevents it.
+ */
+ options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
+
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL, /* no URL */
"UTF-8",
@@ -1900,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
}
else
{
- xmlNodePtr root;
- xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
-
- /* set up document with empty root node to be the context node */
+ /* set up document that xmlParseBalancedChunkMemory will add to */
doc = xmlNewDoc(version);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
@@ -1916,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
"could not allocate XML document");
doc->standalone = standalone;
- root = xmlNewNode(NULL, (const xmlChar *) "content-root");
- if (root == NULL || xmlerrcxt->err_occurred)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
- "could not allocate xml node");
-
- /*
- * This attaches root to doc, so we need not free it separately;
- * and there can't yet be any old root to free.
- */
- oldroot = xmlDocSetRootElement(doc, root);
- Assert(oldroot == NULL);
+ /* set parse options --- have to do this the ugly way */
+ save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0);
/* allow empty content */
if (*(utf8string + count))
{
- xmlNodePtr node_list = NULL;
- xmlParserErrors res;
-
- res = xmlParseInNodeContext(root,
- (char *) utf8string + count,
- strlen((char *) utf8string + count),
- options,
- &node_list);
-
- if (res != XML_ERR_OK || xmlerrcxt->err_occurred)
+ res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
+ utf8string + count,
+ parsed_nodes);
+ if (res_code != 0 || xmlerrcxt->err_occurred)
{
- xmlFreeNodeList(node_list);
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
goto fail;
}
-
- if (parsed_nodes != NULL)
- *parsed_nodes = node_list;
- else
- xmlFreeNodeList(node_list);
}
}
@@ -1961,6 +1940,8 @@ fail:
}
PG_CATCH();
{
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
if (doc != NULL)
xmlFreeDoc(doc);
if (ctxt != NULL)
@@ -1972,6 +1953,9 @@ fail:
}
PG_END_TRY();
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
+
if (ctxt != NULL)
xmlFreeParserCtxt(ctxt);