diff options
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/storage/ipc/standby.c | 24 | ||||
-rw-r--r-- | src/backend/storage/lmgr/lock.c | 8 | ||||
-rw-r--r-- | src/backend/storage/lmgr/proc.c | 19 | ||||
-rw-r--r-- | src/backend/utils/adt/lockfuncs.c | 9 |
4 files changed, 2 insertions, 58 deletions
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 5877a60715f..39a30c00f7a 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -540,34 +540,12 @@ void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict) { TimestampTz ltime; - TimestampTz now; Assert(InHotStandby); ltime = GetStandbyLimitTime(); - now = GetCurrentTimestamp(); - /* - * Update waitStart if first time through after the startup process - * started waiting for the lock. It should not be updated every time - * ResolveRecoveryConflictWithLock() is called during the wait. - * - * Use the current time obtained for comparison with ltime as waitStart - * (i.e., the time when this process started waiting for the lock). Since - * getting the current time newly can cause overhead, we reuse the - * already-obtained time to avoid that overhead. - * - * Note that waitStart is updated without holding the lock table's - * partition lock, to avoid the overhead by additional lock acquisition. - * This can cause "waitstart" in pg_locks to become NULL for a very short - * period of time after the wait started even though "granted" is false. - * This is OK in practice because we can assume that users are likely to - * look at "waitstart" when waiting for the lock for a long time. - */ - if (pg_atomic_read_u64(&MyProc->waitStart) == 0) - pg_atomic_write_u64(&MyProc->waitStart, now); - - if (now >= ltime && ltime != 0) + if (GetCurrentTimestamp() >= ltime && ltime != 0) { /* * We're already behind, so clear a path as quickly as possible. diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 108b4d90238..79c1cf9b8b4 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -3619,12 +3619,6 @@ GetLockStatusData(void) instance->leaderPid = proc->pid; instance->fastpath = true; - /* - * Successfully taking fast path lock means there were no - * conflicting locks. - */ - instance->waitStart = 0; - el++; } @@ -3652,7 +3646,6 @@ GetLockStatusData(void) instance->pid = proc->pid; instance->leaderPid = proc->pid; instance->fastpath = true; - instance->waitStart = 0; el++; } @@ -3705,7 +3698,6 @@ GetLockStatusData(void) instance->pid = proc->pid; instance->leaderPid = proclock->groupLeader->pid; instance->fastpath = false; - instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart); el++; } diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index f059d60a0f6..c87ffc65491 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -402,7 +402,6 @@ InitProcess(void) MyProc->lwWaitMode = 0; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; - pg_atomic_init_u64(&MyProc->waitStart, 0); #ifdef USE_ASSERT_CHECKING { int i; @@ -1263,23 +1262,6 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) } else enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout); - - /* - * Use the current time obtained for the deadlock timeout timer as - * waitStart (i.e., the time when this process started waiting for the - * lock). Since getting the current time newly can cause overhead, we - * reuse the already-obtained time to avoid that overhead. - * - * Note that waitStart is updated without holding the lock table's - * partition lock, to avoid the overhead by additional lock - * acquisition. This can cause "waitstart" in pg_locks to become NULL - * for a very short period of time after the wait started even though - * "granted" is false. This is OK in practice because we can assume - * that users are likely to look at "waitstart" when waiting for the - * lock for a long time. - */ - pg_atomic_write_u64(&MyProc->waitStart, - get_timeout_start_time(DEADLOCK_TIMEOUT)); } else if (log_recovery_conflict_waits) { @@ -1696,7 +1678,6 @@ ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus) proc->waitLock = NULL; proc->waitProcLock = NULL; proc->waitStatus = waitStatus; - pg_atomic_write_u64(&MyProc->waitStart, 0); /* And awaken it */ SetLatch(&proc->procLatch); diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c index 97f0265c12d..b1cf5b79a75 100644 --- a/src/backend/utils/adt/lockfuncs.c +++ b/src/backend/utils/adt/lockfuncs.c @@ -63,7 +63,7 @@ typedef struct } PG_Lock_Status; /* Number of columns in pg_locks output */ -#define NUM_LOCK_STATUS_COLUMNS 16 +#define NUM_LOCK_STATUS_COLUMNS 15 /* * VXIDGetDatum - Construct a text representation of a VXID @@ -142,8 +142,6 @@ pg_lock_status(PG_FUNCTION_ARGS) BOOLOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 15, "fastpath", BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 16, "waitstart", - TIMESTAMPTZOID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); @@ -338,10 +336,6 @@ pg_lock_status(PG_FUNCTION_ARGS) values[12] = CStringGetTextDatum(GetLockmodeName(instance->locktag.locktag_lockmethodid, mode)); values[13] = BoolGetDatum(granted); values[14] = BoolGetDatum(instance->fastpath); - if (!granted && instance->waitStart != 0) - values[15] = TimestampTzGetDatum(instance->waitStart); - else - nulls[15] = true; tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); @@ -412,7 +406,6 @@ pg_lock_status(PG_FUNCTION_ARGS) values[12] = CStringGetTextDatum("SIReadLock"); values[13] = BoolGetDatum(true); values[14] = BoolGetDatum(false); - nulls[15] = true; tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); |