diff options
Diffstat (limited to 'src/backend/access/transam')
-rw-r--r-- | src/backend/access/transam/commit_ts.c | 74 | ||||
-rw-r--r-- | src/backend/access/transam/multixact.c | 59 | ||||
-rw-r--r-- | src/backend/access/transam/parallel.c | 188 | ||||
-rw-r--r-- | src/backend/access/transam/twophase.c | 41 | ||||
-rw-r--r-- | src/backend/access/transam/xact.c | 141 | ||||
-rw-r--r-- | src/backend/access/transam/xlog.c | 242 | ||||
-rw-r--r-- | src/backend/access/transam/xloginsert.c | 22 | ||||
-rw-r--r-- | src/backend/access/transam/xlogreader.c | 27 |
8 files changed, 405 insertions, 389 deletions
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 63344327e3d..5ad35c0d7f8 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -55,8 +55,8 @@ */ typedef struct CommitTimestampEntry { - TimestampTz time; - RepOriginId nodeid; + TimestampTz time; + RepOriginId nodeid; } CommitTimestampEntry; #define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \ @@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry #define COMMIT_TS_XACTS_PER_PAGE \ (BLCKSZ / SizeOfCommitTimestampEntry) -#define TransactionIdToCTsPage(xid) \ +#define TransactionIdToCTsPage(xid) \ ((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE) #define TransactionIdToCTsEntry(xid) \ ((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE) @@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData; */ typedef struct CommitTimestampShared { - TransactionId xidLastCommit; + TransactionId xidLastCommit; CommitTimestampEntry dataLastCommit; } CommitTimestampShared; -CommitTimestampShared *commitTsShared; +CommitTimestampShared *commitTsShared; /* GUC variable */ -bool track_commit_timestamp; +bool track_commit_timestamp; static void SetXidCommitTsInPage(TransactionId xid, int nsubxids, TransactionId *subxids, TimestampTz ts, RepOriginId nodeid, int pageno); static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts, - RepOriginId nodeid, int slotno); + RepOriginId nodeid, int slotno); static int ZeroCommitTsPage(int pageno, bool writeXlog); static bool CommitTsPagePrecedes(int page1, int page2); static void WriteZeroPageXlogRec(int pageno); @@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids, return; /* - * Comply with the WAL-before-data rule: if caller specified it wants - * this value to be recorded in WAL, do so before touching the data. + * Comply with the WAL-before-data rule: if caller specified it wants this + * value to be recorded in WAL, do so before touching the data. */ if (do_xlog) WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid); @@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids, /* * We split the xids to set the timestamp to in groups belonging to the * same SLRU page; the first element in each such set is its head. The - * first group has the main XID as the head; subsequent sets use the - * first subxid not on the previous page as head. This way, we only have - * to lock/modify each SLRU page once. + * first group has the main XID as the head; subsequent sets use the first + * subxid not on the previous page as head. This way, we only have to + * lock/modify each SLRU page once. */ for (i = 0, headxid = xid;;) { @@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids, break; /* - * Set the new head and skip over it, as well as over the subxids - * we just wrote. + * Set the new head and skip over it, as well as over the subxids we + * just wrote. */ headxid = subxids[j]; i += j - i + 1; @@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts, ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not get commit timestamp data"), - errhint("Make sure the configuration parameter \"%s\" is set.", - "track_commit_timestamp"))); + errhint("Make sure the configuration parameter \"%s\" is set.", + "track_commit_timestamp"))); /* error if the given Xid doesn't normally commit */ if (!TransactionIdIsNormal(xid)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot retrieve commit timestamp for transaction %u", xid))); + errmsg("cannot retrieve commit timestamp for transaction %u", xid))); /* * Return empty if the requested value is outside our valid range. @@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts, TransactionId GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid) { - TransactionId xid; + TransactionId xid; /* Error if module not enabled */ if (!track_commit_timestamp) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not get commit timestamp data"), - errhint("Make sure the configuration parameter \"%s\" is set.", - "track_commit_timestamp"))); + errhint("Make sure the configuration parameter \"%s\" is set.", + "track_commit_timestamp"))); LWLockAcquire(CommitTsLock, LW_SHARED); xid = commitTsShared->xidLastCommit; @@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid) Datum pg_xact_commit_timestamp(PG_FUNCTION_ARGS) { - TransactionId xid = PG_GETARG_UINT32(0); - TimestampTz ts; - bool found; + TransactionId xid = PG_GETARG_UINT32(0); + TimestampTz ts; + bool found; found = TransactionIdGetCommitTsData(xid, &ts, NULL); @@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS) Datum pg_last_committed_xact(PG_FUNCTION_ARGS) { - TransactionId xid; - TimestampTz ts; - Datum values[2]; - bool nulls[2]; - TupleDesc tupdesc; + TransactionId xid; + TimestampTz ts; + Datum values[2]; + bool nulls[2]; + TupleDesc tupdesc; HeapTuple htup; /* and construct a tuple with our data */ @@ -462,7 +462,7 @@ CommitTsShmemSize(void) void CommitTsShmemInit(void) { - bool found; + bool found; CommitTsCtl->PagePrecedes = CommitTsPagePrecedes; SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0, @@ -495,8 +495,8 @@ BootStrapCommitTs(void) { /* * Nothing to do here at present, unlike most other SLRU modules; segments - * are created when the server is started with this module enabled. - * See StartupCommitTs. + * are created when the server is started with this module enabled. See + * StartupCommitTs. */ } @@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void) /* * Activate this module whenever necessary. - * This must happen during postmaster or standalong-backend startup, - * or during WAL replay anytime the track_commit_timestamp setting is - * changed in the master. + * This must happen during postmaster or standalong-backend startup, + * or during WAL replay anytime the track_commit_timestamp setting is + * changed in the master. * * The reason why this SLRU needs separate activation/deactivation functions is * that it can be enabled/disabled during start and the activation/deactivation @@ -612,7 +612,7 @@ ActivateCommitTs(void) /* Finally, create the current segment file, if necessary */ if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno)) { - int slotno; + int slotno; LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE); slotno = ZeroCommitTsPage(pageno, false); @@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids, TransactionId *subxids, TimestampTz timestamp, RepOriginId nodeid) { - xl_commit_ts_set record; + xl_commit_ts_set record; record.timestamp = timestamp; record.nodeid = nodeid; @@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record) subxids = NULL; TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids, - setts->timestamp, setts->nodeid, false); + setts->timestamp, setts->nodeid, false); if (subxids) pfree(subxids); } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 0218378ccb5..9568ff1ddb7 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) */ if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) || (MultiXactState->nextOffset - MultiXactState->oldestOffset - > MULTIXACT_MEMBER_SAFE_THRESHOLD)) + > MULTIXACT_MEMBER_SAFE_THRESHOLD)) { /* * For safety's sake, we release MultiXactGenLock while sending @@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, MultiXactIdSetOldestVisible(); /* - * If we know the multi is used only for locking and not for updates, - * then we can skip checking if the value is older than our oldest - * visible multi. It cannot possibly still be running. + * If we know the multi is used only for locking and not for updates, then + * we can skip checking if the value is older than our oldest visible + * multi. It cannot possibly still be running. */ if (onlyLock && MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId])) @@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * * An ID older than MultiXactState->oldestMultiXactId cannot possibly be * useful; it has already been removed, or will be removed shortly, by - * truncation. Returning the wrong values could lead - * to an incorrect visibility result. However, to support pg_upgrade we - * need to allow an empty set to be returned regardless, if the caller is - * willing to accept it; the caller is expected to check that it's an - * allowed condition (such as ensuring that the infomask bits set on the - * tuple are consistent with the pg_upgrade scenario). If the caller is - * expecting this to be called only on recently created multis, then we - * raise an error. + * truncation. Returning the wrong values could lead to an incorrect + * visibility result. However, to support pg_upgrade we need to allow an + * empty set to be returned regardless, if the caller is willing to accept + * it; the caller is expected to check that it's an allowed condition + * (such as ensuring that the infomask bits set on the tuple are + * consistent with the pg_upgrade scenario). If the caller is expecting + * this to be called only on recently created multis, then we raise an + * error. * * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is * seen, it implies undetected ID wraparound has occurred. This raises a @@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti, * enough to contain the next value that would be created. * * We need to do this pretty early during the first startup in binary - * upgrade mode: before StartupMultiXact() in fact, because this routine is - * called even before that by StartupXLOG(). And we can't do it earlier - * than at this point, because during that first call of this routine we - * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru - * needs. + * upgrade mode: before StartupMultiXact() in fact, because this routine + * is called even before that by StartupXLOG(). And we can't do it + * earlier than at this point, because during that first call of this + * routine we determine the MultiXactState->nextMXact value that + * MaybeExtendOffsetSlru needs. */ if (IsBinaryUpgrade) MaybeExtendOffsetSlru(); @@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) /* * Determine the offset of the oldest multixact that might still be - * referenced. Normally, we can read the offset from the multixact itself, - * but there's an important special case: if there are no multixacts in - * existence at all, oldest_datminmxid obviously can't point to one. It - * will instead point to the multixact ID that will be assigned the next - * time one is needed. + * referenced. Normally, we can read the offset from the multixact + * itself, but there's an important special case: if there are no + * multixacts in existence at all, oldest_datminmxid obviously can't point + * to one. It will instead point to the multixact ID that will be + * assigned the next time one is needed. * * NB: oldest_dataminmxid is the oldest multixact that might still be * referenced from a table, unlike in DetermineSafeOldestOffset, where we @@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact) * obviously can't point to one. It will instead point to the multixact * ID that will be assigned the next time one is needed. * - * NB: oldestMXact should be the oldest multixact that still exists in - * the SLRU, unlike in SetMultiXactIdLimit, where we do this same - * computation based on the oldest value that might be referenced in a - * table. + * NB: oldestMXact should be the oldest multixact that still exists in the + * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation + * based on the oldest value that might be referenced in a table. */ LWLockAcquire(MultiXactGenLock, LW_SHARED); if (MultiXactState->nextMXact == oldestMXact) @@ -2679,9 +2678,9 @@ int MultiXactMemberFreezeThreshold(void) { MultiXactOffset members; - uint32 multixacts; - uint32 victim_multixacts; - double fraction; + uint32 multixacts; + uint32 victim_multixacts; + double fraction; ReadMultiXactCounts(&multixacts, &members); @@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data) void TruncateMultiXact(void) { - MultiXactId oldestMXact; + MultiXactId oldestMXact; MultiXactOffset oldestOffset; MultiXactOffset nextOffset; mxtruncinfo trunc; diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 8d6a3606794..f4ba8518b12 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -39,7 +39,7 @@ * without blocking. That way, a worker that errors out can write the whole * message into the queue and terminate without waiting for the user backend. */ -#define PARALLEL_ERROR_QUEUE_SIZE 16384 +#define PARALLEL_ERROR_QUEUE_SIZE 16384 /* Magic number for parallel context TOC. */ #define PARALLEL_MAGIC 0x50477c7c @@ -71,7 +71,7 @@ typedef struct FixedParallelState BackendId parallel_master_backend_id; /* Entrypoint for parallel workers. */ - parallel_worker_main_type entrypoint; + parallel_worker_main_type entrypoint; /* Mutex protects remaining fields. */ slock_t mutex; @@ -90,10 +90,10 @@ typedef struct FixedParallelState * and < the number of workers before any user code is invoked; each parallel * worker will get a different parallel worker number. */ -int ParallelWorkerNumber = -1; +int ParallelWorkerNumber = -1; /* Is there a parallel message pending which we need to receive? */ -bool ParallelMessagePending = false; +bool ParallelMessagePending = false; /* Pointer to our fixed parallel state. */ static FixedParallelState *MyFixedParallelState; @@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg); ParallelContext * CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers) { - MemoryContext oldcontext; - ParallelContext *pcxt; + MemoryContext oldcontext; + ParallelContext *pcxt; /* It is unsafe to create a parallel context if not in parallel mode. */ Assert(IsInParallelMode()); @@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name, char *function_name, int nworkers) { - MemoryContext oldcontext; + MemoryContext oldcontext; ParallelContext *pcxt; /* We might be running in a very short-lived memory context. */ @@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name, void InitializeParallelDSM(ParallelContext *pcxt) { - MemoryContext oldcontext; - Size library_len = 0; - Size guc_len = 0; - Size combocidlen = 0; - Size tsnaplen = 0; - Size asnaplen = 0; - Size tstatelen = 0; - Size segsize = 0; - int i; + MemoryContext oldcontext; + Size library_len = 0; + Size guc_len = 0; + Size combocidlen = 0; + Size tsnaplen = 0; + Size asnaplen = 0; + Size tstatelen = 0; + Size segsize = 0; + int i; FixedParallelState *fps; Snapshot transaction_snapshot = GetTransactionSnapshot(); Snapshot active_snapshot = GetActiveSnapshot(); @@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_estimate_keys(&pcxt->estimator, 1); /* - * Normally, the user will have requested at least one worker process, - * but if by chance they have not, we can skip a bunch of things here. + * Normally, the user will have requested at least one worker process, but + * if by chance they have not, we can skip a bunch of things here. */ if (pcxt->nworkers > 0) { @@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt) /* Estimate space need for error queues. */ StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) == - PARALLEL_ERROR_QUEUE_SIZE, - "parallel error queue size not buffer-aligned"); + PARALLEL_ERROR_QUEUE_SIZE, + "parallel error queue size not buffer-aligned"); shm_toc_estimate_chunk(&pcxt->estimator, PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); shm_toc_estimate_keys(&pcxt->estimator, 1); @@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt) * memory segment; instead, just use backend-private memory. * * Also, if we can't create a dynamic shared memory segment because the - * maximum number of segments have already been created, then fall back - * to backend-private memory, and plan not to use any workers. We hope - * this won't happen very often, but it's better to abandon the use of + * maximum number of segments have already been created, then fall back to + * backend-private memory, and plan not to use any workers. We hope this + * won't happen very often, but it's better to abandon the use of * parallelism than to fail outright. */ segsize = shm_toc_estimate(&pcxt->estimator); @@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt) /* We can skip the rest of this if we're not budgeting for any workers. */ if (pcxt->nworkers > 0) { - char *libraryspace; - char *gucspace; - char *combocidspace; - char *tsnapspace; - char *asnapspace; - char *tstatespace; - char *error_queue_space; + char *libraryspace; + char *gucspace; + char *combocidspace; + char *tsnapspace; + char *asnapspace; + char *tstatespace; + char *error_queue_space; /* Serialize shared libraries we have loaded. */ libraryspace = shm_toc_allocate(pcxt->toc, library_len); @@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt) * should be transmitted via separate (possibly larger?) queues. */ error_queue_space = - shm_toc_allocate(pcxt->toc, - PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); + shm_toc_allocate(pcxt->toc, + PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); for (i = 0; i < pcxt->nworkers; ++i) { - char *start; - shm_mq *mq; + char *start; + shm_mq *mq; start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE; mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE); @@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt) /* Serialize extension entrypoint information. */ if (pcxt->library_name != NULL) { - Size lnamelen = strlen(pcxt->library_name); - char *extensionstate; + Size lnamelen = strlen(pcxt->library_name); + char *extensionstate; extensionstate = shm_toc_allocate(pcxt->toc, lnamelen + strlen(pcxt->function_name) + 2); @@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt) void LaunchParallelWorkers(ParallelContext *pcxt) { - MemoryContext oldcontext; - BackgroundWorker worker; - int i; - bool any_registrations_failed = false; + MemoryContext oldcontext; + BackgroundWorker worker; + int i; + bool any_registrations_failed = false; /* Skip this if we have no workers. */ if (pcxt->nworkers == 0) @@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt) * * The caller must be able to tolerate ending up with fewer workers than * expected, so there is no need to throw an error here if registration - * fails. It wouldn't help much anyway, because registering the worker - * in no way guarantees that it will start up and initialize successfully. + * fails. It wouldn't help much anyway, because registering the worker in + * no way guarantees that it will start up and initialize successfully. */ for (i = 0; i < pcxt->nworkers; ++i) { @@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt) else { /* - * If we weren't able to register the worker, then we've bumped - * up against the max_worker_processes limit, and future + * If we weren't able to register the worker, then we've bumped up + * against the max_worker_processes limit, and future * registrations will probably fail too, so arrange to skip them. * But we still have to execute this code for the remaining slots * to make sure that we forget about the error queues we budgeted @@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt) { for (;;) { - bool anyone_alive = false; - int i; + bool anyone_alive = false; + int i; /* - * This will process any parallel messages that are pending, which - * may change the outcome of the loop that follows. It may also - * throw an error propagated from a worker. + * This will process any parallel messages that are pending, which may + * change the outcome of the loop that follows. It may also throw an + * error propagated from a worker. */ CHECK_FOR_INTERRUPTS(); @@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt) void DestroyParallelContext(ParallelContext *pcxt) { - int i; + int i; /* * Be careful about order of operations here! We remove the parallel @@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt) /* Wait until the workers actually die. */ for (i = 0; i < pcxt->nworkers; ++i) { - BgwHandleStatus status; + BgwHandleStatus status; if (pcxt->worker[i].bgwhandle == NULL) continue; @@ -626,9 +626,9 @@ HandleParallelMessages(void) dlist_foreach(iter, &pcxt_list) { ParallelContext *pcxt; - int i; - Size nbytes; - void *data; + int i; + Size nbytes; + void *data; pcxt = dlist_container(ParallelContext, node, iter.cur); if (pcxt->worker == NULL) @@ -637,14 +637,14 @@ HandleParallelMessages(void) for (i = 0; i < pcxt->nworkers; ++i) { /* - * Read as many messages as we can from each worker, but stop - * when either (1) the error queue goes away, which can happen if - * we receive a Terminate message from the worker; or (2) no more + * Read as many messages as we can from each worker, but stop when + * either (1) the error queue goes away, which can happen if we + * receive a Terminate message from the worker; or (2) no more * messages can be read from the worker without blocking. */ while (pcxt->worker[i].error_mqh != NULL) { - shm_mq_result res; + shm_mq_result res; res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes, &data, true); @@ -652,7 +652,7 @@ HandleParallelMessages(void) break; else if (res == SHM_MQ_SUCCESS) { - StringInfoData msg; + StringInfoData msg; initStringInfo(&msg); appendBinaryStringInfo(&msg, data, nbytes); @@ -661,7 +661,7 @@ HandleParallelMessages(void) } else ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */ + (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */ errmsg("lost connection to parallel worker"))); /* This might make the error queue go away. */ @@ -677,23 +677,24 @@ HandleParallelMessages(void) static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg) { - char msgtype; + char msgtype; msgtype = pq_getmsgbyte(msg); switch (msgtype) { - case 'K': /* BackendKeyData */ + case 'K': /* BackendKeyData */ { - int32 pid = pq_getmsgint(msg, 4); + int32 pid = pq_getmsgint(msg, 4); + (void) pq_getmsgint(msg, 4); /* discard cancel key */ (void) pq_getmsgend(msg); pcxt->worker[i].pid = pid; break; } - case 'E': /* ErrorResponse */ - case 'N': /* NoticeResponse */ + case 'E': /* ErrorResponse */ + case 'N': /* NoticeResponse */ { ErrorData edata; ErrorContextCallback errctx; @@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg) break; } - case 'A': /* NotifyResponse */ + case 'A': /* NotifyResponse */ { /* Propagate NotifyResponse. */ pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1); break; } - case 'X': /* Terminate, indicating clean exit */ + case 'X': /* Terminate, indicating clean exit */ { pfree(pcxt->worker[i].bgwhandle); pfree(pcxt->worker[i].error_mqh); @@ -797,18 +798,18 @@ static void ParallelWorkerMain(Datum main_arg) { dsm_segment *seg; - shm_toc *toc; + shm_toc *toc; FixedParallelState *fps; - char *error_queue_space; - shm_mq *mq; + char *error_queue_space; + shm_mq *mq; shm_mq_handle *mqh; - char *libraryspace; - char *gucspace; - char *combocidspace; - char *tsnapspace; - char *asnapspace; - char *tstatespace; - StringInfoData msgbuf; + char *libraryspace; + char *gucspace; + char *combocidspace; + char *tsnapspace; + char *asnapspace; + char *tstatespace; + StringInfoData msgbuf; /* Establish signal handlers. */ pqsignal(SIGTERM, die); @@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg) ALLOCSET_DEFAULT_MAXSIZE); /* - * Now that we have a resource owner, we can attach to the dynamic - * shared memory segment and read the table of contents. + * Now that we have a resource owner, we can attach to the dynamic shared + * memory segment and read the table of contents. */ seg = dsm_attach(DatumGetUInt32(main_arg)); if (seg == NULL) @@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg) if (toc == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("bad magic number in dynamic shared memory segment"))); + errmsg("bad magic number in dynamic shared memory segment"))); /* Determine and set our worker number. */ fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED); @@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg) */ error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE); mq = (shm_mq *) (error_queue_space + - ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE); + ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE); shm_mq_set_sender(mq, MyProc); mqh = shm_mq_attach(mq, seg, NULL); pq_redirect_to_shm_mq(mq, mqh); @@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg) /* * Send a BackendKeyData message to the process that initiated parallelism * so that it has access to our PID before it receives any other messages - * from us. Our cancel key is sent, too, since that's the way the protocol - * message is defined, but it won't actually be used for anything in this - * case. + * from us. Our cancel key is sent, too, since that's the way the + * protocol message is defined, but it won't actually be used for anything + * in this case. */ pq_beginmessage(&msgbuf, 'K'); pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32)); @@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg) pq_endmessage(&msgbuf); /* - * Hooray! Primary initialization is complete. Now, we need to set up - * our backend-local state to match the original backend. + * Hooray! Primary initialization is complete. Now, we need to set up our + * backend-local state to match the original backend. */ /* - * Load libraries that were loaded by original backend. We want to do this - * before restoring GUCs, because the libraries might define custom + * Load libraries that were loaded by original backend. We want to do + * this before restoring GUCs, because the libraries might define custom * variables. */ libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY); @@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg) SetUserIdAndSecContext(fps->current_user_id, fps->sec_context); /* - * We've initialized all of our state now; nothing should change hereafter. + * We've initialized all of our state now; nothing should change + * hereafter. */ EnterParallelMode(); @@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg) static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc) { - char *extensionstate; - char *library_name; - char *function_name; + char *extensionstate; + char *library_name; + char *function_name; parallel_worker_main_type entrypt; extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE); @@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc) static void ParallelErrorContext(void *arg) { - errcontext("parallel worker, pid %d", * (int32 *) arg); + errcontext("parallel worker, pid %d", *(int32 *) arg); } /* diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 4743cacefe6..177d1e1432e 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -117,7 +117,7 @@ typedef struct GlobalTransactionData TimestampTz prepared_at; /* time of preparation */ XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */ Oid owner; /* ID of user that executed the xact */ - BackendId locking_backend; /* backend currently working on the xact */ + BackendId locking_backend; /* backend currently working on the xact */ bool valid; /* TRUE if PGPROC entry is in proc array */ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ } GlobalTransactionData; @@ -256,24 +256,24 @@ AtAbort_Twophase(void) return; /* - * What to do with the locked global transaction entry? If we were in - * the process of preparing the transaction, but haven't written the WAL + * What to do with the locked global transaction entry? If we were in the + * process of preparing the transaction, but haven't written the WAL * record and state file yet, the transaction must not be considered as * prepared. Likewise, if we are in the process of finishing an - * already-prepared transaction, and fail after having already written - * the 2nd phase commit or rollback record to the WAL, the transaction - * should not be considered as prepared anymore. In those cases, just - * remove the entry from shared memory. + * already-prepared transaction, and fail after having already written the + * 2nd phase commit or rollback record to the WAL, the transaction should + * not be considered as prepared anymore. In those cases, just remove the + * entry from shared memory. * - * Otherwise, the entry must be left in place so that the transaction - * can be finished later, so just unlock it. + * Otherwise, the entry must be left in place so that the transaction can + * be finished later, so just unlock it. * * If we abort during prepare, after having written the WAL record, we * might not have transferred all locks and other state to the prepared * transaction yet. Likewise, if we abort during commit or rollback, - * after having written the WAL record, we might not have released - * all the resources held by the transaction yet. In those cases, the - * in-memory state can be wrong, but it's too late to back out. + * after having written the WAL record, we might not have released all the + * resources held by the transaction yet. In those cases, the in-memory + * state can be wrong, but it's too late to back out. */ if (!MyLockedGxact->valid) { @@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid, TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact; /* - * Remember that we have this GlobalTransaction entry locked for us. - * If we abort after this, we must release it. + * Remember that we have this GlobalTransaction entry locked for us. If we + * abort after this, we must release it. */ MyLockedGxact = gxact; @@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user) if (gxact->locking_backend != InvalidBackendId) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("prepared transaction with identifier \"%s\" is busy", - gid))); + errmsg("prepared transaction with identifier \"%s\" is busy", + gid))); if (user != gxact->owner && !superuser_arg(user)) ereport(ERROR, @@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit) /* * In case we fail while running the callbacks, mark the gxact invalid so - * no one else will try to commit/rollback, and so it will be recycled - * if we fail after this point. It is still locked by our backend so it + * no one else will try to commit/rollback, and so it will be recycled if + * we fail after this point. It is still locked by our backend so it * won't go away yet. * * (We assume it's safe to do this without taking TwoPhaseStateLock.) @@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void) StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids); /* - * We're done with recovering this transaction. Clear MyLockedGxact, - * like we do in PrepareTransaction() during normal operation. + * We're done with recovering this transaction. Clear + * MyLockedGxact, like we do in PrepareTransaction() during normal + * operation. */ PostPrepare_Twophase(); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 23401057e2c..b53d95faf86 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -102,9 +102,9 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON; * The XIDs are stored sorted in numerical order (not logical order) to make * lookups as fast as possible. */ -TransactionId XactTopTransactionId = InvalidTransactionId; -int nParallelCurrentXids = 0; -TransactionId *ParallelCurrentXids; +TransactionId XactTopTransactionId = InvalidTransactionId; +int nParallelCurrentXids = 0; +TransactionId *ParallelCurrentXids; /* * MyXactAccessedTempRel is set when a temporary relation is accessed. @@ -142,7 +142,7 @@ typedef enum TBlockState /* transaction block states */ TBLOCK_BEGIN, /* starting transaction block */ TBLOCK_INPROGRESS, /* live transaction */ - TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */ + TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */ TBLOCK_END, /* COMMIT received */ TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */ TBLOCK_ABORT_END, /* failed xact, ROLLBACK received */ @@ -184,7 +184,7 @@ typedef struct TransactionStateData bool prevXactReadOnly; /* entry-time xact r/o state */ bool startedInRecovery; /* did we start in recovery? */ bool didLogXid; /* has xid been included in WAL record? */ - int parallelModeLevel; /* Enter/ExitParallelMode counter */ + int parallelModeLevel; /* Enter/ExitParallelMode counter */ struct TransactionStateData *parent; /* back link to parent */ } TransactionStateData; @@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s) Assert(s->state == TRANS_INPROGRESS); /* - * Workers synchronize transaction state at the beginning of each - * parallel operation, so we can't account for new XIDs at this point. + * Workers synchronize transaction state at the beginning of each parallel + * operation, so we can't account for new XIDs at this point. */ if (IsInParallelMode()) elog(ERROR, "cannot assign XIDs during a parallel operation"); @@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) return false; /* - * In parallel workers, the XIDs we must consider as current are stored - * in ParallelCurrentXids rather than the transaction-state stack. Note - * that the XIDs in this array are sorted numerically rather than - * according to transactionIdPrecedes order. + * In parallel workers, the XIDs we must consider as current are stored in + * ParallelCurrentXids rather than the transaction-state stack. Note that + * the XIDs in this array are sorted numerically rather than according to + * transactionIdPrecedes order. */ if (nParallelCurrentXids > 0) { @@ -1204,7 +1204,7 @@ RecordTransactionCommit(void) nchildren, children, nrels, rels, nmsgs, invalMessages, RelcacheInitFileInval, forceSyncCommit, - InvalidTransactionId /* plain commit */); + InvalidTransactionId /* plain commit */ ); /* * Record plain commit ts if not replaying remote actions, or if no @@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact) RelFileNode *rels; int nchildren; TransactionId *children; - TimestampTz xact_time; + TimestampTz xact_time; /* * If we haven't been assigned an XID, nobody will care whether we aborted @@ -2316,8 +2316,8 @@ PrepareTransaction(void) /* * In normal commit-processing, this is all non-critical post-transaction - * cleanup. When the transaction is prepared, however, it's important that - * the locks and other per-backend resources are transferred to the + * cleanup. When the transaction is prepared, however, it's important + * that the locks and other per-backend resources are transferred to the * prepared transaction's PGPROC entry. Note that if an error is raised * here, it's too late to abort the transaction. XXX: This probably should * be in a critical section, to force a PANIC if any of this fails, but @@ -2358,9 +2358,8 @@ PrepareTransaction(void) /* * Allow another backend to finish the transaction. After - * PostPrepare_Twophase(), the transaction is completely detached from - * our backend. The rest is just non-critical cleanup of backend-local - * state. + * PostPrepare_Twophase(), the transaction is completely detached from our + * backend. The rest is just non-critical cleanup of backend-local state. */ PostPrepare_Twophase(); @@ -2417,7 +2416,7 @@ AbortTransaction(void) { TransactionState s = CurrentTransactionState; TransactionId latestXid; - bool is_parallel_worker; + bool is_parallel_worker; /* Prevent cancel/die interrupt while cleaning up */ HOLD_INTERRUPTS(); @@ -2520,9 +2519,9 @@ AbortTransaction(void) latestXid = InvalidTransactionId; /* - * Since the parallel master won't get our value of XactLastRecEnd in this - * case, we nudge WAL-writer ourselves in this case. See related comments in - * RecordTransactionAbort for why this matters. + * Since the parallel master won't get our value of XactLastRecEnd in + * this case, we nudge WAL-writer ourselves in this case. See related + * comments in RecordTransactionAbort for why this matters. */ XLogSetAsyncXactLSN(XactLastRecEnd); } @@ -3720,7 +3719,7 @@ DefineSavepoint(char *name) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot define savepoints during a parallel operation"))); + errmsg("cannot define savepoints during a parallel operation"))); switch (s->blockState) { @@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot release savepoints during a parallel operation"))); + errmsg("cannot release savepoints during a parallel operation"))); switch (s->blockState) { @@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot rollback to savepoints during a parallel operation"))); + errmsg("cannot rollback to savepoints during a parallel operation"))); switch (s->blockState) { @@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name) /* * Workers synchronize transaction state at the beginning of each parallel - * operation, so we can't account for new subtransactions after that point. - * We might be able to make an exception for the type of subtransaction - * established by this function, which is typically used in contexts where - * we're going to release or roll back the subtransaction before proceeding - * further, so that no enduring change to the transaction state occurs. - * For now, however, we prohibit this case along with all the others. + * operation, so we can't account for new subtransactions after that + * point. We might be able to make an exception for the type of + * subtransaction established by this function, which is typically used in + * contexts where we're going to release or roll back the subtransaction + * before proceeding further, so that no enduring change to the + * transaction state occurs. For now, however, we prohibit this case along + * with all the others. */ if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot start subtransactions during a parallel operation"))); + errmsg("cannot start subtransactions during a parallel operation"))); switch (s->blockState) { @@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot commit subtransactions during a parallel operation"))); + errmsg("cannot commit subtransactions during a parallel operation"))); if (s->blockState != TBLOCK_SUBINPROGRESS) elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s", @@ -4773,7 +4773,8 @@ Size EstimateTransactionStateSpace(void) { TransactionState s; - Size nxids = 5; /* iso level, deferrable, top & current XID, XID count */ + Size nxids = 5; /* iso level, deferrable, top & current XID, + * XID count */ for (s = CurrentTransactionState; s != NULL; s = s->parent) { @@ -4804,8 +4805,8 @@ void SerializeTransactionState(Size maxsize, char *start_address) { TransactionState s; - Size nxids = 0; - Size i = 0; + Size nxids = 0; + Size i = 0; TransactionId *workspace; TransactionId *result = (TransactionId *) start_address; @@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address) } /* - * OK, we need to generate a sorted list of XIDs that our workers - * should view as current. First, figure out how many there are. + * OK, we need to generate a sorted list of XIDs that our workers should + * view as current. First, figure out how many there are. */ for (s = CurrentTransactionState; s != NULL; s = s->parent) { @@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr) */ XLogRecPtr XactLogCommitRecord(TimestampTz commit_time, - int nsubxacts, TransactionId *subxacts, - int nrels, RelFileNode *rels, - int nmsgs, SharedInvalidationMessage *msgs, - bool relcacheInval, bool forceSync, - TransactionId twophase_xid) + int nsubxacts, TransactionId *subxacts, + int nrels, RelFileNode *rels, + int nmsgs, SharedInvalidationMessage *msgs, + bool relcacheInval, bool forceSync, + TransactionId twophase_xid) { - xl_xact_commit xlrec; - xl_xact_xinfo xl_xinfo; - xl_xact_dbinfo xl_dbinfo; - xl_xact_subxacts xl_subxacts; + xl_xact_commit xlrec; + xl_xact_xinfo xl_xinfo; + xl_xact_dbinfo xl_dbinfo; + xl_xact_subxacts xl_subxacts; xl_xact_relfilenodes xl_relfilenodes; - xl_xact_invals xl_invals; - xl_xact_twophase xl_twophase; - xl_xact_origin xl_origin; + xl_xact_invals xl_invals; + xl_xact_twophase xl_twophase; + xl_xact_origin xl_origin; - uint8 info; + uint8 info; Assert(CritSectionCount > 0); @@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time, */ XLogRecPtr XactLogAbortRecord(TimestampTz abort_time, - int nsubxacts, TransactionId *subxacts, - int nrels, RelFileNode *rels, - TransactionId twophase_xid) + int nsubxacts, TransactionId *subxacts, + int nrels, RelFileNode *rels, + TransactionId twophase_xid) { - xl_xact_abort xlrec; - xl_xact_xinfo xl_xinfo; - xl_xact_subxacts xl_subxacts; + xl_xact_abort xlrec; + xl_xact_xinfo xl_xinfo; + xl_xact_subxacts xl_subxacts; xl_xact_relfilenodes xl_relfilenodes; - xl_xact_twophase xl_twophase; + xl_xact_twophase xl_twophase; - uint8 info; + uint8 info; Assert(CritSectionCount > 0); @@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, { TransactionId max_xid; int i; - TimestampTz commit_time; + TimestampTz commit_time; max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts); @@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, * recovered. It's unlikely but it's good to be safe. */ TransactionIdAsyncCommitTree( - xid, parsed->nsubxacts, parsed->subxacts, lsn); + xid, parsed->nsubxacts, parsed->subxacts, lsn); /* * We must mark clog before we update the ProcArray. */ ExpireTreeKnownAssignedTransactionIds( - xid, parsed->nsubxacts, parsed->subxacts, max_xid); + xid, parsed->nsubxacts, parsed->subxacts, max_xid); /* * Send any cache invalidations attached to the commit. We must @@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, * occurs in CommitTransaction(). */ ProcessCommittedInvalidationMessages( - parsed->msgs, parsed->nmsgs, - XactCompletionRelcacheInitFileInval(parsed->xinfo), - parsed->dbId, parsed->tsId); + parsed->msgs, parsed->nmsgs, + XactCompletionRelcacheInitFileInval(parsed->xinfo), + parsed->dbId, parsed->tsId); /* * Release locks, if any. We do this for both two phase and normal one @@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, { /* recover apply progress */ replorigin_advance(origin_id, parsed->origin_lsn, lsn, - false /* backward */, false /* WAL */); + false /* backward */ , false /* WAL */ ); } /* Make sure files supposed to be dropped are dropped */ @@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, static void xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid) { - int i; - TransactionId max_xid; + int i; + TransactionId max_xid; /* * Make sure nextXid is beyond any XID mentioned in the record. @@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid) * We must update the ProcArray after we have marked clog. */ ExpireTreeKnownAssignedTransactionIds( - xid, parsed->nsubxacts, parsed->subxacts, max_xid); + xid, parsed->nsubxacts, parsed->subxacts, max_xid); /* * There are no flat files that need updating, nor invalidation @@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record) xl_xact_parsed_abort parsed; ParseAbortRecord(XLogRecGetInfo(record), xlrec, - &parsed); + &parsed); if (info == XLOG_XACT_ABORT) { diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b913bf3ebcb..087b6be084d 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version; /* User-settable parameters */ -int max_wal_size = 64; /* 1 GB */ -int min_wal_size = 5; /* 80 MB */ +int max_wal_size = 64; /* 1 GB */ +int min_wal_size = 5; /* 80 MB */ int wal_keep_segments = 0; int XLOGbuffers = -1; int XLogArchiveTimeout = 0; @@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) /* * Check to see if my copy of RedoRecPtr or doPageWrites is out of date. * If so, may have to go back and have the caller recompute everything. - * This can only happen just after a checkpoint, so it's better to be - * slow in this case and fast otherwise. + * This can only happen just after a checkpoint, so it's better to be slow + * in this case and fast otherwise. * * If we aren't doing full-page writes then RedoRecPtr doesn't actually * affect the contents of the XLOG record, so we'll update our local copy * but not force a recomputation. (If doPageWrites was just turned off, - * we could recompute the record without full pages, but we choose not - * to bother.) + * we could recompute the record without full pages, but we choose not to + * bother.) */ if (RedoRecPtr != Insert->RedoRecPtr) { @@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites) { /* - * Oops, some buffer now needs to be backed up that the caller - * didn't back up. Start over. + * Oops, some buffer now needs to be backed up that the caller didn't + * back up. Start over. */ WALInsertLockRelease(); END_CRIT_SECTION(); @@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) { appendStringInfo(&buf, "error decoding record: out of memory"); } - else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data, - &errormsg)) + else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data, + &errormsg)) { appendStringInfo(&buf, "error decoding record: %s", errormsg ? errormsg : "no error message"); @@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) /* * Fill the new page's header */ - NewPage ->xlp_magic = XLOG_PAGE_MAGIC; + NewPage->xlp_magic = XLOG_PAGE_MAGIC; /* NewPage->xlp_info = 0; */ /* done by memset */ - NewPage ->xlp_tli = ThisTimeLineID; - NewPage ->xlp_pageaddr = NewPageBeginPtr; + NewPage->xlp_tli = ThisTimeLineID; + NewPage->xlp_pageaddr = NewPageBeginPtr; /* NewPage->xlp_rem_len = 0; */ /* done by memset */ @@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) * compress a few records. */ if (!Insert->forcePageWrites) - NewPage ->xlp_info |= XLP_BKP_REMOVABLE; + NewPage->xlp_info |= XLP_BKP_REMOVABLE; /* * If first page of an XLOG segment file, make it a long header. @@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) NewLongPage->xlp_sysid = ControlFile->system_identifier; NewLongPage->xlp_seg_size = XLogSegSize; NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ; - NewPage ->xlp_info |= XLP_LONG_HEADER; + NewPage->xlp_info |= XLP_LONG_HEADER; } /* @@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void) * * a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint. * b) during checkpoint, we consume checkpoint_completion_target * - * number of segments consumed between checkpoints. + * number of segments consumed between checkpoints. *------- */ - target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget); + target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget); /* round down */ CheckPointSegments = (int) target; @@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr) * remove enough segments to stay below the maximum. */ minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1; - maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1; + maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1; /* * Between those limits, recycle enough segments to get us through to the * estimated end of next checkpoint. * * To estimate where the next checkpoint will finish, assume that the - * system runs steadily consuming CheckPointDistanceEstimate - * bytes between every checkpoint. + * system runs steadily consuming CheckPointDistanceEstimate bytes between + * every checkpoint. * * The reason this calculation is done from the prior checkpoint, not the * one that just finished, is that this behaves better if some checkpoint @@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) /* * XXX: What should we use as max_segno? We used to use XLOGfileslop when * that was a constant, but that was always a bit dubious: normally, at a - * checkpoint, XLOGfileslop was the offset from the checkpoint record, - * but here, it was the offset from the insert location. We can't do the + * checkpoint, XLOGfileslop was the offset from the checkpoint record, but + * here, it was the offset from the insert location. We can't do the * normal XLOGfileslop calculation here because we don't have access to - * the prior checkpoint's redo location. So somewhat arbitrarily, just - * use CheckPointSegments. + * the prior checkpoint's redo location. So somewhat arbitrarily, just use + * CheckPointSegments. */ max_segno = logsegno + CheckPointSegments; if (!InstallXLogFileSegment(&installed_segno, tmppath, @@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto) nread = upto - nbytes; /* - * The part that is not read from the source file is filled with zeros. + * The part that is not read from the source file is filled with + * zeros. */ if (nread < sizeof(buffer)) memset(buffer, 0, sizeof(buffer)); @@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto) /* * Now move the segment into place with its final name. (Or just return - * the path to the file we created, if the caller wants to handle the - * rest on its own.) + * the path to the file we created, if the caller wants to handle the rest + * on its own.) */ if (dstfname) { @@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI) /* * Remove files that are on a timeline older than the new one we're - * switching to, but with a segment number >= the first segment on - * the new timeline. + * switching to, but with a segment number >= the first segment on the + * new timeline. */ if (strncmp(xlde->d_name, switchseg, 8) < 0 && strcmp(xlde->d_name + 8, switchseg + 8) > 0) @@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) segname))); #ifdef WIN32 + /* * On Windows, if another process (e.g another backend) holds the file * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file * will still show up in directory listing until the last handle is - * closed. To avoid confusing the lingering deleted file for a live WAL - * file that needs to be archived, rename it before deleting it. + * closed. To avoid confusing the lingering deleted file for a live + * WAL file that needs to be archived, rename it before deleting it. * * If another process holds the file open without FILE_SHARE_DELETE * flag, rename will fail. We'll try again at the next checkpoint. @@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not rename old transaction log file \"%s\": %m", - path))); + errmsg("could not rename old transaction log file \"%s\": %m", + path))); return; } rc = unlink(newpath); @@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not remove old transaction log file \"%s\": %m", - path))); + errmsg("could not remove old transaction log file \"%s\": %m", + path))); return; } CheckpointStats.ckpt_segs_removed++; @@ -4609,11 +4611,11 @@ XLOGShmemInit(void) int i; #ifdef WAL_DEBUG + /* - * Create a memory context for WAL debugging that's exempt from the - * normal "no pallocs in critical section" rule. Yes, that can lead to a - * PANIC if an allocation fails, but wal_debug is not for production use - * anyway. + * Create a memory context for WAL debugging that's exempt from the normal + * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if + * an allocation fails, but wal_debug is not for production use anyway. */ if (walDebugCxt == NULL) { @@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid value for recovery parameter \"recovery_target\""), - errhint("The only allowed value is \"immediate\"."))); + errhint("The only allowed value is \"immediate\"."))); ereport(DEBUG2, (errmsg_internal("recovery_target = '%s'", item->value))); @@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void) } /* - * Override any inconsistent requests. Not that this is a change - * of behaviour in 9.5; prior to this we simply ignored a request - * to pause if hot_standby = off, which was surprising behaviour. + * Override any inconsistent requests. Not that this is a change of + * behaviour in 9.5; prior to this we simply ignored a request to pause if + * hot_standby = off, which was surprising behaviour. */ if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE && recoveryTargetActionSet && @@ -6043,7 +6045,7 @@ StartupXLOG(void) if (read_backup_label(&checkPointLoc, &backupEndRequired, &backupFromStandby)) { - List *tablespaces = NIL; + List *tablespaces = NIL; /* * Archive recovery was requested, and thanks to the backup label @@ -6099,7 +6101,7 @@ StartupXLOG(void) foreach(lc, tablespaces) { tablespaceinfo *ti = lfirst(lc); - char *linkloc; + char *linkloc; linkloc = psprintf("pg_tblspc/%s", ti->oid); @@ -6112,26 +6114,26 @@ StartupXLOG(void) */ if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode)) { - if (!rmtree(linkloc,true)) + if (!rmtree(linkloc, true)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not remove directory \"%s\": %m", - linkloc))); + errmsg("could not remove directory \"%s\": %m", + linkloc))); } else { if (unlink(linkloc) < 0 && errno != ENOENT) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not remove symbolic link \"%s\": %m", - linkloc))); + errmsg("could not remove symbolic link \"%s\": %m", + linkloc))); } if (symlink(ti->path, linkloc) < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create symbolic link \"%s\": %m", - linkloc))); + errmsg("could not create symbolic link \"%s\": %m", + linkloc))); pfree(ti->oid); pfree(ti->path); @@ -6222,9 +6224,9 @@ StartupXLOG(void) * in place if the database had been cleanly shut down, but it seems * safest to just remove them always and let them be rebuilt during the * first backend startup. These files needs to be removed from all - * directories including pg_tblspc, however the symlinks are created - * only after reading tablesapce_map file in case of archive recovery - * from backup, so needs to clear old relcache files here after creating + * directories including pg_tblspc, however the symlinks are created only + * after reading tablesapce_map file in case of archive recovery from + * backup, so needs to clear old relcache files here after creating * symlinks. */ RelationCacheInitFileRemove(); @@ -6442,9 +6444,9 @@ StartupXLOG(void) * Also set backupEndPoint and use minRecoveryPoint as the backup end * location if we're starting recovery from a base backup which was * taken from a standby. In this case, the database system status in - * pg_control must indicate that the database was already in - * recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can - * be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted + * pg_control must indicate that the database was already in recovery. + * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be + * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted * before reaching this point; e.g. because restore_command or * primary_conninfo were faulty. * @@ -6500,10 +6502,10 @@ StartupXLOG(void) /* * If there was a tablespace_map file, it's done its job and the - * symlinks have been created. We must get rid of the map file - * so that if we crash during recovery, we don't create symlinks - * again. It seems prudent though to just rename the file out of - * the way rather than delete it completely. + * symlinks have been created. We must get rid of the map file so + * that if we crash during recovery, we don't create symlinks again. + * It seems prudent though to just rename the file out of the way + * rather than delete it completely. */ if (haveTblspcMap) { @@ -6859,7 +6861,8 @@ StartupXLOG(void) { /* * Before we continue on the new timeline, clean up any - * (possibly bogus) future WAL segments on the old timeline. + * (possibly bogus) future WAL segments on the old + * timeline. */ RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID); @@ -6890,32 +6893,33 @@ StartupXLOG(void) { if (!reachedConsistency) ereport(FATAL, - (errmsg("requested recovery stop point is before consistent recovery point"))); + (errmsg("requested recovery stop point is before consistent recovery point"))); /* * This is the last point where we can restart recovery with a * new recovery target, if we shutdown and begin again. After - * this, Resource Managers may choose to do permanent corrective - * actions at end of recovery. + * this, Resource Managers may choose to do permanent + * corrective actions at end of recovery. */ switch (recoveryTargetAction) { case RECOVERY_TARGET_ACTION_SHUTDOWN: - /* - * exit with special return code to request shutdown - * of postmaster. Log messages issued from - * postmaster. - */ - proc_exit(3); + + /* + * exit with special return code to request shutdown + * of postmaster. Log messages issued from + * postmaster. + */ + proc_exit(3); case RECOVERY_TARGET_ACTION_PAUSE: - SetRecoveryPause(true); - recoveryPausesHere(); + SetRecoveryPause(true); + recoveryPausesHere(); - /* drop into promote */ + /* drop into promote */ case RECOVERY_TARGET_ACTION_PROMOTE: - break; + break; } } @@ -7259,8 +7263,8 @@ StartupXLOG(void) * too. * * If a .done or .ready file already exists for the old timeline, - * however, we had already determined that the segment is complete, - * so we can let it be archived normally. (In particular, if it was + * however, we had already determined that the segment is complete, so + * we can let it be archived normally. (In particular, if it was * restored from the archive to begin with, it's expected to have a * .done file). */ @@ -7291,8 +7295,8 @@ StartupXLOG(void) if (rename(origpath, partialpath) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rename file \"%s\" to \"%s\": %m", - origpath, partialpath))); + errmsg("could not rename file \"%s\" to \"%s\": %m", + origpath, partialpath))); XLogArchiveNotify(partialfname); } } @@ -7366,8 +7370,8 @@ StartupXLOG(void) XLogReportParameters(); /* - * Local WAL inserts enabled, so it's time to finish initialization - * of commit timestamp. + * Local WAL inserts enabled, so it's time to finish initialization of + * commit timestamp. */ CompleteCommitTsInitialization(); @@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint) (flags & CHECKPOINT_WAIT) ? " wait" : "", (flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "", (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "", - (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :""); + (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : ""); } /* @@ -8056,8 +8060,8 @@ static void UpdateCheckPointDistanceEstimate(uint64 nbytes) { /* - * To estimate the number of segments consumed between checkpoints, keep - * a moving average of the amount of WAL generated in previous checkpoint + * To estimate the number of segments consumed between checkpoints, keep a + * moving average of the amount of WAL generated in previous checkpoint * cycles. However, if the load is bursty, with quiet periods and busy * periods, we want to cater for the peak load. So instead of a plain * moving average, let the average decline slowly if the previous cycle @@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record) } /* - * Update the commit timestamp tracking. If there was a change - * it needs to be activated or deactivated accordingly. + * Update the commit timestamp tracking. If there was a change it + * needs to be activated or deactivated accordingly. */ if (track_commit_timestamp != xlrec.track_commit_timestamp) { @@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record) if (track_commit_timestamp) ActivateCommitTs(); else + /* * We can't create a new WAL record here, but that's OK as * master did the WAL logging already and we will replay the @@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, char *relpath = NULL; int rllen; StringInfoData buflinkpath; - char *s = linkpath; + char *s = linkpath; /* Skip special stuff */ if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) @@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, linkpath[rllen] = '\0'; /* - * Add the escape character '\\' before newline in a string - * to ensure that we can distinguish between the newline in - * the tablespace path and end of line while reading - * tablespace_map file during archive recovery. + * Add the escape character '\\' before newline in a string to + * ensure that we can distinguish between the newline in the + * tablespace path and end of line while reading tablespace_map + * file during archive recovery. */ initStringInfo(&buflinkpath); @@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, ti->rpath = relpath ? pstrdup(relpath) : NULL; ti->size = infotbssize ? sendTablespace(fullpath, true) : -1; - if(tablespaces) - *tablespaces = lappend(*tablespaces, ti); + if (tablespaces) + *tablespaces = lappend(*tablespaces, ti); appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path); @@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, } else ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("a backup is already in progress"), - errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.", - TABLESPACE_MAP))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("a backup is already in progress"), + errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.", + TABLESPACE_MAP))); fp = AllocateFile(TABLESPACE_MAP, "w"); @@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) BACKUP_LABEL_FILE))); /* - * Remove tablespace_map file if present, it is created - * only if there are tablespaces. + * Remove tablespace_map file if present, it is created only if there + * are tablespaces. */ unlink(TABLESPACE_MAP); } @@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces) tablespaceinfo *ti; FILE *lfp; char tbsoid[MAXPGPATH]; - char *tbslinkpath; + char *tbslinkpath; char str[MAXPGPATH]; - int ch, prev_ch = -1, - i = 0, n; + int ch, + prev_ch = -1, + i = 0, + n; /* * See if tablespace_map file is present @@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces) /* * Read and parse the link name and path lines from tablespace_map file - * (this code is pretty crude, but we are not expecting any variability - * in the file format). While taking backup we embed escape character - * '\\' before newline in tablespace path, so that during reading of + * (this code is pretty crude, but we are not expecting any variability in + * the file format). While taking backup we embed escape character '\\' + * before newline in tablespace path, so that during reading of * tablespace_map file, we could distinguish newline in tablespace path * and end of line. Now while reading tablespace_map file, remove the * escape character that has been added in tablespace path during backup. @@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces) str[i] = '\0'; if (sscanf(str, "%s %n", tbsoid, &n) != 1) ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", TABLESPACE_MAP))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("invalid data in file \"%s\"", TABLESPACE_MAP))); tbslinkpath = str + n; i = 0; @@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces) continue; } else if ((ch == '\n' || ch == '\r') && prev_ch == '\\') - str[i-1] = ch; + str[i - 1] = ch; else str[i++] = ch; prev_ch = ch; @@ -10868,7 +10875,7 @@ BackupInProgress(void) /* * CancelBackup: rename the "backup_label" and "tablespace_map" - * files to cancel backup mode + * files to cancel backup mode * * If the "backup_label" file exists, it will be renamed to "backup_label.old". * Similarly, if the "tablespace_map" file exists, it will be renamed to @@ -11115,8 +11122,8 @@ static bool WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, bool fetching_ckpt, XLogRecPtr tliRecPtr) { - static TimestampTz last_fail_time = 0; - TimestampTz now; + static TimestampTz last_fail_time = 0; + TimestampTz now; /*------- * Standby mode is implemented by a state machine: @@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, */ now = GetCurrentTimestamp(); if (!TimestampDifferenceExceeds(last_fail_time, now, - wal_retrieve_retry_interval)) + wal_retrieve_retry_interval)) { - long secs, wait_time; + long secs, + wait_time; int usecs; TimestampDifference(last_fail_time, now, &secs, &usecs); @@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, (secs * 1000 + usecs / 1000); WaitLatch(&XLogCtl->recoveryWakeupLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, wait_time); ResetLatch(&XLogCtl->recoveryWakeupLatch); now = GetCurrentTimestamp(); @@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir) return; /* - * If possible, hint to the kernel that we're soon going to fsync - * the data directory and its contents. + * If possible, hint to the kernel that we're soon going to fsync the data + * directory and its contents. */ #if defined(HAVE_SYNC_FILE_RANGE) || \ (defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)) diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 419736da310..b96c39ac657 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -33,7 +33,7 @@ #include "pg_trace.h" /* Buffer size required to store a compressed version of backup block image */ -#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ) +#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ) /* * For each block reference registered with XLogRegisterBuffer, we fill in @@ -58,7 +58,7 @@ typedef struct /* buffer to store a compressed version of backup block image */ char compressed_page[PGLZ_MAX_BLCKSZ]; -} registered_buffer; +} registered_buffer; static registered_buffer *registered_buffers; static int max_registered_buffers; /* allocated size */ @@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn); static bool XLogCompressBackupBlock(char *page, uint16 hole_offset, - uint16 hole_length, char *dest, uint16 *dlen); + uint16 hole_length, char *dest, uint16 *dlen); /* * Begin constructing a WAL record. This must be called before the @@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, &compressed_len); } - /* Fill in the remaining fields in the XLogRecordBlockHeader struct */ + /* + * Fill in the remaining fields in the XLogRecordBlockHeader + * struct + */ bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE; /* @@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, * the length of compressed block image. */ static bool -XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length, +XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, char *dest, uint16 *dlen) { int32 orig_len = BLCKSZ - hole_length; @@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length, source = page; /* - * We recheck the actual size even if pglz_compress() reports success - * and see if the number of bytes saved by compression is larger than - * the length of extra data needed for the compressed version of block - * image. + * We recheck the actual size even if pglz_compress() reports success and + * see if the number of bytes saved by compression is larger than the + * length of extra data needed for the compressed version of block image. */ len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default); if (len >= 0 && len + extra_bytes < orig_len) { - *dlen = (uint16) len; /* successful compression */ + *dlen = (uint16) len; /* successful compression */ return true; } return false; diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 3661e7229aa..a9e926c5a28 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) blk->bimg_len == BLCKSZ)) { report_invalid_record(state, - "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X", + "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X", (unsigned int) blk->hole_offset, (unsigned int) blk->hole_length, (unsigned int) blk->bimg_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; } + /* - * cross-check that hole_offset == 0 and hole_length == 0 - * if the HAS_HOLE flag is not set. + * cross-check that hole_offset == 0 and hole_length == 0 if + * the HAS_HOLE flag is not set. */ if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) && (blk->hole_offset != 0 || blk->hole_length != 0)) { report_invalid_record(state, - "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X", + "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X", (unsigned int) blk->hole_offset, (unsigned int) blk->hole_length, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; } + /* - * cross-check that bimg_len < BLCKSZ - * if the IS_COMPRESSED flag is set. + * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED + * flag is set. */ if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) && blk->bimg_len == BLCKSZ) { report_invalid_record(state, - "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X", + "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X", (unsigned int) blk->bimg_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; } + /* - * cross-check that bimg_len = BLCKSZ if neither - * HAS_HOLE nor IS_COMPRESSED flag is set. + * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor + * IS_COMPRESSED flag is set. */ if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) && !(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) && blk->bimg_len != BLCKSZ) { report_invalid_record(state, - "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X", + "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X", (unsigned int) blk->data_len, (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr); goto err; @@ -1294,8 +1297,8 @@ bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) { DecodedBkpBlock *bkpb; - char *ptr; - char tmp[BLCKSZ]; + char *ptr; + char tmp[BLCKSZ]; if (!record->blocks[block_id].in_use) return false; |