aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils/mmgr/portalmem.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2007-04-26 23:24:57 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2007-04-26 23:24:57 +0000
commitb26329654e15e93278c809c906221d6f364f9b63 (patch)
tree52bbc8ebd5f23fc0dada05b544e4dc7f8b8af7bf /src/backend/utils/mmgr/portalmem.c
parent3f92fd65244db0cd8fef46e88ef79c6acc3c84dd (diff)
downloadpostgresql-b26329654e15e93278c809c906221d6f364f9b63.tar.gz
postgresql-b26329654e15e93278c809c906221d6f364f9b63.zip
Fix dynahash.c to suppress hash bucket splits while a hash_seq_search() scan
is in progress on the same hashtable. This seems the least invasive way to fix the recently-recognized problem that a split could cause the scan to visit entries twice or (with much lower probability) miss them entirely. The only field-reported problem caused by this is the "failed to re-find shared lock object" PANIC in COMMIT PREPARED reported by Michel Dorochevsky, which was caused by multiply visited entries. However, it seems certain that mdsync() is vulnerable to missing required fsync's due to missed entries, and I am fearful that RelationCacheInitializePhase2() might be at risk as well. Because of that and the generalized hazard presented by this bug, back-patch all the supported branches. Along the way, fix pg_prepared_statement() and pg_cursor() to not assume that the hashtables they are examining will stay static between calls. This is risky regardless of the newly noted dynahash problem, because hash_seq_search() has never promised to cope with deletion of table entries other than the just-returned one. There may be no bug here because the only supported way to call these functions is via ExecMakeTableFunctionResult() which will cycle them to completion before doing anything very interesting, but it seems best to get rid of the assumption. This affects 8.2 and HEAD only, since those functions weren't there earlier.
Diffstat (limited to 'src/backend/utils/mmgr/portalmem.c')
-rw-r--r--src/backend/utils/mmgr/portalmem.c141
1 files changed, 71 insertions, 70 deletions
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 4f8cba9ba5a..3e3505caf05 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.97 2006/11/23 01:14:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.97.2.1 2007/04/26 23:24:57 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,6 @@
#include "access/xact.h"
#include "catalog/pg_type.h"
#include "commands/portalcmds.h"
-#include "funcapi.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
@@ -577,7 +576,9 @@ AtCommit_Portals(void)
/* Zap all non-holdable portals */
PortalDrop(portal, true);
- /* Restart the iteration */
+ /* Restart the iteration in case that led to other drops */
+ /* XXX is this really necessary? */
+ hash_seq_term(&status);
hash_seq_init(&status, PortalHashTable);
}
}
@@ -806,79 +807,68 @@ AtSubCleanup_Portals(SubTransactionId mySubid)
Datum
pg_cursor(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ HASH_SEQ_STATUS hash_seq;
PortalHashEnt *hentry;
- /* stuff done only on the first call of the function */
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- /* create a function context for cross-call persistence */
- funcctx = SRF_FIRSTCALL_INIT();
-
- /*
- * switch to memory context appropriate for multiple function calls
- */
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-
- if (PortalHashTable)
- {
- hash_seq = (HASH_SEQ_STATUS *) palloc(sizeof(HASH_SEQ_STATUS));
- hash_seq_init(hash_seq, PortalHashTable);
- funcctx->user_fctx = (void *) hash_seq;
- }
- else
- funcctx->user_fctx = NULL;
-
- /*
- * build tupdesc for result tuples. This must match the definition of
- * the pg_cursors view in system_views.sql
- */
- tupdesc = CreateTemplateTupleDesc(6, false);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
- TIMESTAMPTZOID, -1, 0);
-
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
- hash_seq = (HASH_SEQ_STATUS *) funcctx->user_fctx;
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not " \
+ "allowed in this context")));
+
+ /* need to build tuplestore in query context */
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
- /* if the hash table is uninitialized, we're done */
- if (hash_seq == NULL)
- SRF_RETURN_DONE(funcctx);
+ /*
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_cursors view in system_views.sql
+ */
+ tupdesc = CreateTemplateTupleDesc(6, false);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
+ TIMESTAMPTZOID, -1, 0);
- /* loop until we find a visible portal or hit the end of the list */
- while ((hentry = hash_seq_search(hash_seq)) != NULL)
- {
- if (hentry->portal->visible)
- break;
- }
+ /*
+ * We put all the tuples into a tuplestore in one scan of the hashtable.
+ * This avoids any issue of the hashtable possibly changing between calls.
+ */
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
- if (hentry)
+ hash_seq_init(&hash_seq, PortalHashTable);
+ while ((hentry = hash_seq_search(&hash_seq)) != NULL)
{
- Portal portal;
- Datum result;
+ Portal portal = hentry->portal;
HeapTuple tuple;
Datum values[6];
bool nulls[6];
- portal = hentry->portal;
+ /* report only "visible" entries */
+ if (!portal->visible)
+ continue;
+
+ /* generate junk in short-term context */
+ MemoryContextSwitchTo(oldcontext);
+
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin, CStringGetDatum(portal->name));
@@ -892,10 +882,21 @@ pg_cursor(PG_FUNCTION_ARGS)
values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
values[5] = TimestampTzGetDatum(portal->creation_time);
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
- result = HeapTupleGetDatum(tuple);
- SRF_RETURN_NEXT(funcctx, result);
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+
+ /* switch to appropriate context while storing the tuple */
+ MemoryContextSwitchTo(per_query_ctx);
+ tuplestore_puttuple(tupstore, tuple);
}
- SRF_RETURN_DONE(funcctx);
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ return (Datum) 0;
}