diff options
Diffstat (limited to 'src/backend/storage')
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 4 | ||||
-rw-r--r-- | src/backend/storage/file/fd.c | 4 | ||||
-rw-r--r-- | src/backend/storage/file/sharedfileset.c | 4 | ||||
-rw-r--r-- | src/backend/storage/ipc/latch.c | 6 | ||||
-rw-r--r-- | src/backend/storage/ipc/procarray.c | 14 | ||||
-rw-r--r-- | src/backend/storage/ipc/procsignal.c | 38 | ||||
-rw-r--r-- | src/backend/storage/ipc/signalfuncs.c | 23 | ||||
-rw-r--r-- | src/backend/storage/ipc/standby.c | 2 | ||||
-rw-r--r-- | src/backend/storage/lmgr/proc.c | 31 | ||||
-rw-r--r-- | src/backend/storage/lmgr/spin.c | 2 | ||||
-rw-r--r-- | src/backend/storage/page/bufpage.c | 2 | ||||
-rw-r--r-- | src/backend/storage/sync/sync.c | 2 |
12 files changed, 67 insertions, 65 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 0c5b87864b9..4b296a22c45 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3071,7 +3071,7 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int j; RelFileNodeBackend rnode; BlockNumber nForkBlock[MAX_FORKNUM]; - uint64 nBlocksToInvalidate = 0; + uint64 nBlocksToInvalidate = 0; rnode = smgr_reln->smgr_rnode; @@ -3195,7 +3195,7 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes) int n = 0; SMgrRelation *rels; BlockNumber (*block)[MAX_FORKNUM + 1]; - uint64 nBlocksToInvalidate = 0; + uint64 nBlocksToInvalidate = 0; RelFileNode *nodes; bool cached = true; bool use_bsearch; diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 06b57ae71f1..e8cd7ef0886 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -3288,7 +3288,7 @@ looks_like_temp_rel_name(const char *name) static void do_syncfs(const char *path) { - int fd; + int fd; fd = OpenTransientFile(path, O_RDONLY); if (fd < 0) @@ -3394,7 +3394,7 @@ SyncDataDirectory(void) do_syncfs("pg_wal"); return; } -#endif /* !HAVE_SYNCFS */ +#endif /* !HAVE_SYNCFS */ /* * If possible, hint to the kernel that we're soon going to fsync the data diff --git a/src/backend/storage/file/sharedfileset.c b/src/backend/storage/file/sharedfileset.c index de422b1ebdf..ed37c940adc 100644 --- a/src/backend/storage/file/sharedfileset.c +++ b/src/backend/storage/file/sharedfileset.c @@ -267,8 +267,8 @@ static void SharedFileSetDeleteOnProcExit(int status, Datum arg) { /* - * Remove all the pending shared fileset entries. We don't use foreach() here - * because SharedFileSetDeleteAll will remove the current element in + * Remove all the pending shared fileset entries. We don't use foreach() + * here because SharedFileSetDeleteAll will remove the current element in * filesetlist. Though we have used foreach_delete_current() to remove the * element from filesetlist it could only fix up the state of one of the * loops, see SharedFileSetUnregister. diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index ad781131e2a..1d893cf863d 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -1655,9 +1655,9 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, (cur_kqueue_event->fflags & NOTE_EXIT) != 0) { /* - * The kernel will tell this kqueue object only once about the exit - * of the postmaster, so let's remember that for next time so that - * we provide level-triggered semantics. + * The kernel will tell this kqueue object only once about the + * exit of the postmaster, so let's remember that for next time so + * that we provide level-triggered semantics. */ set->report_postmaster_not_running = true; diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 5ff8cab394e..42a89fc5dc9 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -2056,7 +2056,7 @@ GetSnapshotDataInitOldSnapshot(Snapshot snapshot) static bool GetSnapshotDataReuse(Snapshot snapshot) { - uint64 curXactCompletionCount; + uint64 curXactCompletionCount; Assert(LWLockHeldByMe(ProcArrayLock)); @@ -2080,8 +2080,8 @@ GetSnapshotDataReuse(Snapshot snapshot) * holding ProcArrayLock) exclusively). Thus the xactCompletionCount check * ensures we would detect if the snapshot would have changed. * - * As the snapshot contents are the same as it was before, it is safe - * to re-enter the snapshot's xmin into the PGPROC array. None of the rows + * As the snapshot contents are the same as it was before, it is safe to + * re-enter the snapshot's xmin into the PGPROC array. None of the rows * visible under the snapshot could already have been removed (that'd * require the set of running transactions to change) and it fulfills the * requirement that concurrent GetSnapshotData() calls yield the same @@ -2259,10 +2259,10 @@ GetSnapshotData(Snapshot snapshot) continue; /* - * The only way we are able to get here with a non-normal xid - * is during bootstrap - with this backend using - * BootstrapTransactionId. But the above test should filter - * that out. + * The only way we are able to get here with a non-normal xid is + * during bootstrap - with this backend using + * BootstrapTransactionId. But the above test should filter that + * out. */ Assert(TransactionIdIsNormal(xid)); diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index eac68951414..defb75aa26a 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -61,7 +61,7 @@ */ typedef struct { - volatile pid_t pss_pid; + volatile pid_t pss_pid; volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS]; pg_atomic_uint64 pss_barrierGeneration; pg_atomic_uint32 pss_barrierCheckMask; @@ -454,7 +454,7 @@ ProcessProcSignalBarrier(void) { uint64 local_gen; uint64 shared_gen; - volatile uint32 flags; + volatile uint32 flags; Assert(MyProcSignalSlot); @@ -484,15 +484,15 @@ ProcessProcSignalBarrier(void) * extract the flags, and that any subsequent state changes happen * afterward. * - * NB: In order to avoid race conditions, we must zero pss_barrierCheckMask - * first and only afterwards try to do barrier processing. If we did it - * in the other order, someone could send us another barrier of some - * type right after we called the barrier-processing function but before - * we cleared the bit. We would have no way of knowing that the bit needs - * to stay set in that case, so the need to call the barrier-processing - * function again would just get forgotten. So instead, we tentatively - * clear all the bits and then put back any for which we don't manage - * to successfully absorb the barrier. + * NB: In order to avoid race conditions, we must zero + * pss_barrierCheckMask first and only afterwards try to do barrier + * processing. If we did it in the other order, someone could send us + * another barrier of some type right after we called the + * barrier-processing function but before we cleared the bit. We would + * have no way of knowing that the bit needs to stay set in that case, so + * the need to call the barrier-processing function again would just get + * forgotten. So instead, we tentatively clear all the bits and then put + * back any for which we don't manage to successfully absorb the barrier. */ flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0); @@ -503,15 +503,15 @@ ProcessProcSignalBarrier(void) */ if (flags != 0) { - bool success = true; + bool success = true; PG_TRY(); { /* * Process each type of barrier. The barrier-processing functions - * should normally return true, but may return false if the barrier - * can't be absorbed at the current time. This should be rare, - * because it's pretty expensive. Every single + * should normally return true, but may return false if the + * barrier can't be absorbed at the current time. This should be + * rare, because it's pretty expensive. Every single * CHECK_FOR_INTERRUPTS() will return here until we manage to * absorb the barrier, and that cost will add up in a hurry. * @@ -521,8 +521,8 @@ ProcessProcSignalBarrier(void) */ while (flags != 0) { - ProcSignalBarrierType type; - bool processed = true; + ProcSignalBarrierType type; + bool processed = true; type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags); switch (type) @@ -533,8 +533,8 @@ ProcessProcSignalBarrier(void) } /* - * To avoid an infinite loop, we must always unset the bit - * in flags. + * To avoid an infinite loop, we must always unset the bit in + * flags. */ BARRIER_CLEAR_BIT(flags, type); diff --git a/src/backend/storage/ipc/signalfuncs.c b/src/backend/storage/ipc/signalfuncs.c index 0337b00226a..837699481ca 100644 --- a/src/backend/storage/ipc/signalfuncs.c +++ b/src/backend/storage/ipc/signalfuncs.c @@ -137,11 +137,12 @@ pg_wait_until_termination(int pid, int64 timeout) * Wait in steps of waittime milliseconds until this function exits or * timeout. */ - int64 waittime = 100; + int64 waittime = 100; + /* * Initially remaining time is the entire timeout specified by the user. */ - int64 remainingtime = timeout; + int64 remainingtime = timeout; /* * Check existence of the backend. If the backend still exists, then wait @@ -162,7 +163,7 @@ pg_wait_until_termination(int pid, int64 timeout) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("could not check the existence of the backend with PID %d: %m", - pid))); + pid))); } /* Process interrupts, if any, before waiting */ @@ -198,9 +199,9 @@ pg_wait_until_termination(int pid, int64 timeout) Datum pg_terminate_backend(PG_FUNCTION_ARGS) { - int pid; - int r; - int timeout; + int pid; + int r; + int timeout; pid = PG_GETARG_INT32(0); timeout = PG_GETARG_INT64(1); @@ -208,7 +209,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS) if (timeout < 0) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("\"timeout\" must not be negative"))); + errmsg("\"timeout\" must not be negative"))); r = pg_signal_backend(pid, SIGTERM); @@ -240,9 +241,9 @@ pg_terminate_backend(PG_FUNCTION_ARGS) Datum pg_wait_for_backend_termination(PG_FUNCTION_ARGS) { - int pid; - int64 timeout; - PGPROC *proc = NULL; + int pid; + int64 timeout; + PGPROC *proc = NULL; pid = PG_GETARG_INT32(0); timeout = PG_GETARG_INT64(1); @@ -250,7 +251,7 @@ pg_wait_for_backend_termination(PG_FUNCTION_ARGS) if (timeout <= 0) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("\"timeout\" must not be negative or zero"))); + errmsg("\"timeout\" must not be negative or zero"))); proc = BackendPidGetProc(pid); diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 1465ee44a12..553b6e54603 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -482,7 +482,7 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi * snapshots that still see it. */ FullTransactionId nextXid = ReadNextFullTransactionId(); - uint64 diff; + uint64 diff; diff = U64FromFullTransactionId(nextXid) - U64FromFullTransactionId(latestRemovedFullXid); diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 692f21ef6a8..2575ea1ca0d 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -103,7 +103,7 @@ ProcGlobalShmemSize(void) { Size size = 0; Size TotalProcs = - add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); + add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); /* ProcGlobal */ size = add_size(size, sizeof(PROC_HDR)); @@ -1245,8 +1245,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) /* * Set timer so we can wake up after awhile and check for a deadlock. If a * deadlock is detected, the handler sets MyProc->waitStatus = - * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure rather - * than success. + * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure + * rather than success. * * By delaying the check until we've waited for a bit, we can avoid * running the rather expensive deadlock-check code in most cases. @@ -1371,9 +1371,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) } /* - * waitStatus could change from PROC_WAIT_STATUS_WAITING to something else - * asynchronously. Read it just once per loop to prevent surprising - * behavior (such as missing log messages). + * waitStatus could change from PROC_WAIT_STATUS_WAITING to something + * else asynchronously. Read it just once per loop to prevent + * surprising behavior (such as missing log messages). */ myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus); @@ -1429,7 +1429,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) ereport(DEBUG1, (errmsg_internal("sending cancel to blocking autovacuum PID %d", - pid), + pid), errdetail_log("%s", logbuf.data))); pfree(locktagbuf.data); @@ -1587,11 +1587,12 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) /* * Currently, the deadlock checker always kicks its own - * process, which means that we'll only see PROC_WAIT_STATUS_ERROR when - * deadlock_state == DS_HARD_DEADLOCK, and there's no need to - * print redundant messages. But for completeness and - * future-proofing, print a message if it looks like someone - * else kicked us off the lock. + * process, which means that we'll only see + * PROC_WAIT_STATUS_ERROR when deadlock_state == + * DS_HARD_DEADLOCK, and there's no need to print redundant + * messages. But for completeness and future-proofing, print + * a message if it looks like someone else kicked us off the + * lock. */ if (deadlock_state != DS_HARD_DEADLOCK) ereport(LOG, @@ -1830,9 +1831,9 @@ CheckDeadLock(void) * preserve the flexibility to kill some other transaction than the * one detecting the deadlock.) * - * RemoveFromWaitQueue sets MyProc->waitStatus to PROC_WAIT_STATUS_ERROR, so - * ProcSleep will report an error after we return from the signal - * handler. + * RemoveFromWaitQueue sets MyProc->waitStatus to + * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we + * return from the signal handler. */ Assert(MyProc->waitLock != NULL); RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag))); diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c index 6fe0c6532c6..557672cadda 100644 --- a/src/backend/storage/lmgr/spin.c +++ b/src/backend/storage/lmgr/spin.c @@ -37,7 +37,7 @@ #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES) #else #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES) -#endif /* DISABLE_ATOMICS */ +#endif /* DISABLE_ATOMICS */ PGSemaphore *SpinlockSemaArray; diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index b231c438f95..82ca91f5977 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -259,7 +259,7 @@ PageAddItemExtended(Page page, * group at the end of the line pointer array. */ for (offsetNumber = FirstOffsetNumber; - offsetNumber < limit; /* limit is maxoff+1 */ + offsetNumber < limit; /* limit is maxoff+1 */ offsetNumber++) { itemId = PageGetItemId(phdr, offsetNumber); diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index 708215614db..bc3ceb27125 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -420,7 +420,7 @@ ProcessSyncRequests(void) ereport(DEBUG1, (errcode_for_file_access(), errmsg_internal("could not fsync file \"%s\" but retrying: %m", - path))); + path))); /* * Absorb incoming requests and check to see if a cancel |