diff options
author | Andres Freund <andres@anarazel.de> | 2019-05-23 16:25:48 -0700 |
---|---|---|
committer | Andres Freund <andres@anarazel.de> | 2019-05-23 16:32:36 -0700 |
commit | 73b8c3bd2889fed986044e15aefd0911f96ccdd3 (patch) | |
tree | 8fcd867ac811feecc99ed5f645088b73c44d5051 /src/backend/executor | |
parent | 54487d1560619a0027e0651d1b8d715ca8fc388c (diff) | |
download | postgresql-73b8c3bd2889fed986044e15aefd0911f96ccdd3.tar.gz postgresql-73b8c3bd2889fed986044e15aefd0911f96ccdd3.zip |
tableam: Rename wrapper functions to match callback names.
Some of the wrapper functions didn't match the callback names. Many of
them due to staying "consistent" with historic naming of the wrapped
functionality. We decided that for most cases it's more important to
be for tableam to be consistent going forward, than with the past.
The one exception is beginscan/endscan/... because it'd have looked
odd to have systable_beginscan/endscan/... with a different naming
scheme, and changing the systable_* APIs would have caused way too
much churn (including breaking a lot of external users).
Author: Ashwin Agrawal, with some small additions by Andres Freund
Reviewed-By: Andres Freund
Discussion: https://postgr.es/m/CALfoeiugyrXZfX7n0ORCa4L-m834dzmaE8eFdbNR6PMpetU4Ww@mail.gmail.com
Diffstat (limited to 'src/backend/executor')
-rw-r--r-- | src/backend/executor/execMain.c | 8 | ||||
-rw-r--r-- | src/backend/executor/execReplication.c | 16 | ||||
-rw-r--r-- | src/backend/executor/nodeLockRows.c | 8 | ||||
-rw-r--r-- | src/backend/executor/nodeModifyTable.c | 80 | ||||
-rw-r--r-- | src/backend/executor/nodeTidscan.c | 4 |
5 files changed, 59 insertions, 57 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index e80abe1b8b3..8c8528b1340 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -2436,7 +2436,7 @@ ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist) * quals. For that result to be useful, typically the input tuple has to be * last row version (otherwise the result isn't particularly useful) and * locked (otherwise the result might be out of date). That's typically - * achieved by using table_lock_tuple() with the + * achieved by using table_tuple_lock() with the * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag. * * Returns a slot containing the new candidate update/delete tuple, or @@ -2654,9 +2654,9 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate) else { /* ordinary table, fetch the tuple */ - if (!table_fetch_row_version(erm->relation, - (ItemPointer) DatumGetPointer(datum), - SnapshotAny, slot)) + if (!table_tuple_fetch_row_version(erm->relation, + (ItemPointer) DatumGetPointer(datum), + SnapshotAny, slot)) elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck"); } } diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 0326284c83f..95e027c970b 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -173,7 +173,7 @@ retry: PushActiveSnapshot(GetLatestSnapshot()); - res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(), + res = table_tuple_lock(rel, &(outslot->tts_tid), GetLatestSnapshot(), outslot, GetCurrentCommandId(false), lockmode, @@ -208,7 +208,7 @@ retry: elog(ERROR, "attempted to lock invisible tuple"); break; default: - elog(ERROR, "unexpected table_lock_tuple status: %u", res); + elog(ERROR, "unexpected table_tuple_lock status: %u", res); break; } } @@ -337,7 +337,7 @@ retry: PushActiveSnapshot(GetLatestSnapshot()); - res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(), + res = table_tuple_lock(rel, &(outslot->tts_tid), GetLatestSnapshot(), outslot, GetCurrentCommandId(false), lockmode, @@ -372,7 +372,7 @@ retry: elog(ERROR, "attempted to lock invisible tuple"); break; default: - elog(ERROR, "unexpected table_lock_tuple status: %u", res); + elog(ERROR, "unexpected table_tuple_lock status: %u", res); break; } } @@ -425,7 +425,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) ExecPartitionCheck(resultRelInfo, slot, estate, true); /* OK, store the tuple and create index entries for it */ - simple_table_insert(resultRelInfo->ri_RelationDesc, slot); + simple_table_tuple_insert(resultRelInfo->ri_RelationDesc, slot); if (resultRelInfo->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, @@ -490,8 +490,8 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, if (resultRelInfo->ri_PartitionCheck) ExecPartitionCheck(resultRelInfo, slot, estate, true); - simple_table_update(rel, tid, slot, estate->es_snapshot, - &update_indexes); + simple_table_tuple_update(rel, tid, slot, estate->es_snapshot, + &update_indexes); if (resultRelInfo->ri_NumIndices > 0 && update_indexes) recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, @@ -535,7 +535,7 @@ ExecSimpleRelationDelete(EState *estate, EPQState *epqstate, if (!skip_tuple) { /* OK, delete the tuple */ - simple_table_delete(rel, tid, estate->es_snapshot); + simple_table_tuple_delete(rel, tid, estate->es_snapshot); /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 4067554ed94..41513ceec65 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -185,7 +185,7 @@ lnext: if (!IsolationUsesXactSnapshot()) lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; - test = table_lock_tuple(erm->relation, &tid, estate->es_snapshot, + test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot, markSlot, estate->es_output_cid, lockmode, erm->waitPolicy, lockflags, @@ -208,7 +208,7 @@ lnext: * to fetch the updated tuple instead, but doing so would * require changing heap_update and heap_delete to not * complain about updating "invisible" tuples, which seems - * pretty scary (table_lock_tuple will not complain, but few + * pretty scary (table_tuple_lock will not complain, but few * callers expect TM_Invisible, and we're not one of them). So * for now, treat the tuple as deleted and do not process. */ @@ -229,7 +229,7 @@ lnext: ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); - elog(ERROR, "unexpected table_lock_tuple status: %u", + elog(ERROR, "unexpected table_tuple_lock status: %u", test); break; @@ -246,7 +246,7 @@ lnext: break; default: - elog(ERROR, "unrecognized table_lock_tuple status: %u", + elog(ERROR, "unrecognized table_tuple_lock status: %u", test); } diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 3125e5c993d..a3c0e915434 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -236,7 +236,7 @@ ExecCheckTIDVisible(EState *estate, if (!IsolationUsesXactSnapshot()) return; - if (!table_fetch_row_version(rel, tid, SnapshotAny, tempSlot)) + if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot)) elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT"); ExecCheckTupleVisible(estate, rel, tempSlot); ExecClearTuple(tempSlot); @@ -544,11 +544,11 @@ ExecInsert(ModifyTableState *mtstate, specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId()); /* insert the tuple, with the speculative token */ - table_insert_speculative(resultRelationDesc, slot, - estate->es_output_cid, - 0, - NULL, - specToken); + table_tuple_insert_speculative(resultRelationDesc, slot, + estate->es_output_cid, + 0, + NULL, + specToken); /* insert index entries for tuple */ recheckIndexes = ExecInsertIndexTuples(slot, estate, true, @@ -556,8 +556,8 @@ ExecInsert(ModifyTableState *mtstate, arbiterIndexes); /* adjust the tuple's state accordingly */ - table_complete_speculative(resultRelationDesc, slot, - specToken, !specConflict); + table_tuple_complete_speculative(resultRelationDesc, slot, + specToken, !specConflict); /* * Wake up anyone waiting for our decision. They will re-check @@ -584,9 +584,9 @@ ExecInsert(ModifyTableState *mtstate, else { /* insert the tuple normally */ - table_insert(resultRelationDesc, slot, - estate->es_output_cid, - 0, NULL); + table_tuple_insert(resultRelationDesc, slot, + estate->es_output_cid, + 0, NULL); /* insert index entries for tuple */ if (resultRelInfo->ri_NumIndices > 0) @@ -766,13 +766,13 @@ ExecDelete(ModifyTableState *mtstate, * mode transactions. */ ldelete:; - result = table_delete(resultRelationDesc, tupleid, - estate->es_output_cid, - estate->es_snapshot, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &tmfd, - changingPart); + result = table_tuple_delete(resultRelationDesc, tupleid, + estate->es_output_cid, + estate->es_snapshot, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &tmfd, + changingPart); switch (result) { @@ -832,7 +832,7 @@ ldelete:; inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex); - result = table_lock_tuple(resultRelationDesc, tupleid, + result = table_tuple_lock(resultRelationDesc, tupleid, estate->es_snapshot, inputslot, estate->es_output_cid, LockTupleExclusive, LockWaitBlock, @@ -875,7 +875,7 @@ ldelete:; * out. * * See also TM_SelfModified response to - * table_delete() above. + * table_tuple_delete() above. */ if (tmfd.cmax != estate->es_output_cid) ereport(ERROR, @@ -900,7 +900,7 @@ ldelete:; * locking the latest version via * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. */ - elog(ERROR, "unexpected table_lock_tuple status: %u", + elog(ERROR, "unexpected table_tuple_lock status: %u", result); return NULL; } @@ -918,7 +918,8 @@ ldelete:; return NULL; default: - elog(ERROR, "unrecognized table_delete status: %u", result); + elog(ERROR, "unrecognized table_tuple_delete status: %u", + result); return NULL; } @@ -990,8 +991,8 @@ ldelete:; } else { - if (!table_fetch_row_version(resultRelationDesc, tupleid, - SnapshotAny, slot)) + if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid, + SnapshotAny, slot)) elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); } } @@ -1134,7 +1135,7 @@ ExecUpdate(ModifyTableState *mtstate, * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck any RLS policies and constraints. * (We don't need to redo triggers, however. If there are any BEFORE - * triggers then trigger.c will have done table_lock_tuple to lock the + * triggers then trigger.c will have done table_tuple_lock to lock the * correct tuple, so there's no need to do them again.) */ lreplace:; @@ -1309,12 +1310,12 @@ lreplace:; * needed for referential integrity updates in transaction-snapshot * mode transactions. */ - result = table_update(resultRelationDesc, tupleid, slot, - estate->es_output_cid, - estate->es_snapshot, - estate->es_crosscheck_snapshot, - true /* wait for commit */ , - &tmfd, &lockmode, &update_indexes); + result = table_tuple_update(resultRelationDesc, tupleid, slot, + estate->es_output_cid, + estate->es_snapshot, + estate->es_crosscheck_snapshot, + true /* wait for commit */ , + &tmfd, &lockmode, &update_indexes); switch (result) { @@ -1373,7 +1374,7 @@ lreplace:; inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex); - result = table_lock_tuple(resultRelationDesc, tupleid, + result = table_tuple_lock(resultRelationDesc, tupleid, estate->es_snapshot, inputslot, estate->es_output_cid, lockmode, LockWaitBlock, @@ -1412,7 +1413,7 @@ lreplace:; * otherwise error out. * * See also TM_SelfModified response to - * table_update() above. + * table_tuple_update() above. */ if (tmfd.cmax != estate->es_output_cid) ereport(ERROR, @@ -1422,8 +1423,8 @@ lreplace:; return NULL; default: - /* see table_lock_tuple call in ExecDelete() */ - elog(ERROR, "unexpected table_lock_tuple status: %u", + /* see table_tuple_lock call in ExecDelete() */ + elog(ERROR, "unexpected table_tuple_lock status: %u", result); return NULL; } @@ -1440,7 +1441,8 @@ lreplace:; return NULL; default: - elog(ERROR, "unrecognized table_update status: %u", result); + elog(ERROR, "unrecognized table_tuple_update status: %u", + result); return NULL; } @@ -1521,7 +1523,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, * previous conclusion that the tuple is conclusively committed is not * true anymore. */ - test = table_lock_tuple(relation, conflictTid, + test = table_tuple_lock(relation, conflictTid, estate->es_snapshot, existing, estate->es_output_cid, lockmode, LockWaitBlock, 0, @@ -1612,7 +1614,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, return false; default: - elog(ERROR, "unrecognized table_lock_tuple status: %u", test); + elog(ERROR, "unrecognized table_tuple_lock status: %u", test); } /* Success, the tuple is locked. */ @@ -1677,7 +1679,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, /* * Note that it is possible that the target tuple has been modified in - * this session, after the above table_lock_tuple. We choose to not error + * this session, after the above table_tuple_lock. We choose to not error * out in that case, in line with ExecUpdate's treatment of similar cases. * This can happen if an UPDATE is triggered from within ExecQual(), * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 93335864a1e..83ece6bf563 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -381,9 +381,9 @@ TidNext(TidScanState *node) * current according to our snapshot. */ if (node->tss_isCurrentOf) - table_get_latest_tid(scan, &tid); + table_tuple_get_latest_tid(scan, &tid); - if (table_fetch_row_version(heapRelation, &tid, snapshot, slot)) + if (table_tuple_fetch_row_version(heapRelation, &tid, snapshot, slot)) return slot; /* Bad TID or failed snapshot qual; try next */ |