aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/execMain.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2015-05-12 14:10:10 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2015-05-12 14:10:17 -0400
commitafb9249d06f47d7a6d4a89fea0c3625fe43c5a5d (patch)
treee7f62e6cb2b6baefa9489536966229e4af912695 /src/backend/executor/execMain.c
parentaa4a0b9571232f44e4b8d9effca3c540e657cebb (diff)
downloadpostgresql-afb9249d06f47d7a6d4a89fea0c3625fe43c5a5d.tar.gz
postgresql-afb9249d06f47d7a6d4a89fea0c3625fe43c5a5d.zip
Add support for doing late row locking in FDWs.
Previously, FDWs could only do "early row locking", that is lock a row as soon as it's fetched, even though local restriction/join conditions might discard the row later. This patch adds callbacks that allow FDWs to do late locking in the same way that it's done for regular tables. To make use of this feature, an FDW must support the "ctid" column as a unique row identifier. Currently, since ctid has to be of type TID, the feature is of limited use, though in principle it could be used by postgres_fdw. We may eventually allow FDWs to specify another data type for ctid, which would make it possible for more FDWs to use this feature. This commit does not modify postgres_fdw to use late locking. We've tested some prototype code for that, but it's not in committable shape, and besides it's quite unclear whether it actually makes sense to do late locking against a remote server. The extra round trips required are likely to outweigh any benefit from improved concurrency. Etsuro Fujita, reviewed by Ashutosh Bapat, and hacked up a lot by me
Diffstat (limited to 'src/backend/executor/execMain.c')
-rw-r--r--src/backend/executor/execMain.c79
1 files changed, 61 insertions, 18 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 0dee9491788..43d3c44c827 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -898,8 +898,11 @@ InitPlan(QueryDesc *queryDesc, int eflags)
erm->prti = rc->prti;
erm->rowmarkId = rc->rowmarkId;
erm->markType = rc->markType;
+ erm->strength = rc->strength;
erm->waitPolicy = rc->waitPolicy;
+ erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
+ erm->ermExtra = NULL;
estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
}
@@ -1143,6 +1146,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
static void
CheckValidRowMarkRel(Relation rel, RowMarkType markType)
{
+ FdwRoutine *fdwroutine;
+
switch (rel->rd_rel->relkind)
{
case RELKIND_RELATION:
@@ -1178,11 +1183,13 @@ CheckValidRowMarkRel(Relation rel, RowMarkType markType)
RelationGetRelationName(rel))));
break;
case RELKIND_FOREIGN_TABLE:
- /* Should not get here; planner should have used ROW_MARK_COPY */
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot lock rows in foreign table \"%s\"",
- RelationGetRelationName(rel))));
+ /* Okay only if the FDW supports it */
+ fdwroutine = GetFdwRoutineForRelation(rel, false);
+ if (fdwroutine->RefetchForeignRow == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(rel))));
break;
default:
ereport(ERROR,
@@ -2005,9 +2012,11 @@ ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
/*
* ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ *
+ * If no such struct, either return NULL or throw error depending on missing_ok
*/
ExecRowMark *
-ExecFindRowMark(EState *estate, Index rti)
+ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
{
ListCell *lc;
@@ -2018,8 +2027,9 @@ ExecFindRowMark(EState *estate, Index rti)
if (erm->rti == rti)
return erm;
}
- elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
- return NULL; /* keep compiler quiet */
+ if (!missing_ok)
+ elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+ return NULL;
}
/*
@@ -2530,7 +2540,7 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
if (erm->markType == ROW_MARK_REFERENCE)
{
- Buffer buffer;
+ HeapTuple copyTuple;
Assert(erm->relation != NULL);
@@ -2541,17 +2551,50 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
/* non-locked rels could be on the inside of outer joins */
if (isNull)
continue;
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
- /* okay, fetch the tuple */
- if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
- elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+ /* fetch requests on foreign tables must be passed to their FDW */
+ if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ FdwRoutine *fdwroutine;
+ bool updated = false;
- /* successful, copy and store tuple */
- EvalPlanQualSetTuple(epqstate, erm->rti,
- heap_copytuple(&tuple));
- ReleaseBuffer(buffer);
+ fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
+ /* this should have been checked already, but let's be safe */
+ if (fdwroutine->RefetchForeignRow == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(erm->relation))));
+ copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
+ erm,
+ datum,
+ &updated);
+ if (copyTuple == NULL)
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /*
+ * Ideally we'd insist on updated == false here, but that
+ * assumes that FDWs can track that exactly, which they might
+ * not be able to. So just ignore the flag.
+ */
+ }
+ else
+ {
+ /* ordinary table, fetch the tuple */
+ Buffer buffer;
+
+ tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+ if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+ false, NULL))
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /* successful, copy tuple */
+ copyTuple = heap_copytuple(&tuple);
+ ReleaseBuffer(buffer);
+ }
+
+ /* store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
}
else
{