diff options
Diffstat (limited to 'src/backend/postmaster')
-rw-r--r-- | src/backend/postmaster/autovacuum.c | 36 | ||||
-rw-r--r-- | src/backend/postmaster/bgworker.c | 1 | ||||
-rw-r--r-- | src/backend/postmaster/checkpointer.c | 232 | ||||
-rw-r--r-- | src/backend/postmaster/pgarch.c | 12 | ||||
-rw-r--r-- | src/backend/postmaster/pmchild.c | 18 | ||||
-rw-r--r-- | src/backend/postmaster/postmaster.c | 35 | ||||
-rw-r--r-- | src/backend/postmaster/walsummarizer.c | 28 |
7 files changed, 268 insertions, 94 deletions
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 451fb90a610..ff96b36d710 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -310,6 +310,16 @@ static AutoVacuumShmemStruct *AutoVacuumShmem; static dlist_head DatabaseList = DLIST_STATIC_INIT(DatabaseList); static MemoryContext DatabaseListCxt = NULL; +/* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * avl_dbase structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern avl_dbase *avl_dbase_array; +avl_dbase *avl_dbase_array; +#endif + /* Pointer to my own WorkerInfo, valid on each worker */ static WorkerInfo MyWorkerInfo = NULL; @@ -562,10 +572,10 @@ AutoVacLauncherMain(const void *startup_data, size_t startup_data_len) /* * Create the initial database list. The invariant we want this list to - * keep is that it's ordered by decreasing next_time. As soon as an entry - * is updated to a higher time, it will be moved to the front (which is - * correct because the only operation is to add autovacuum_naptime to the - * entry, and time always increases). + * keep is that it's ordered by decreasing next_worker. As soon as an + * entry is updated to a higher time, it will be moved to the front (which + * is correct because the only operation is to add autovacuum_naptime to + * the entry, and time always increases). */ rebuild_database_list(InvalidOid); @@ -1020,6 +1030,10 @@ rebuild_database_list(Oid newdb) /* put all the hash elements into an array */ dbary = palloc(nelems * sizeof(avl_dbase)); + /* keep Valgrind quiet */ +#ifdef USE_VALGRIND + avl_dbase_array = dbary; +#endif i = 0; hash_seq_init(&seq, dbhash); @@ -2565,8 +2579,18 @@ deleted: /* * We leak table_toast_map here (among other things), but since we're - * going away soon, it's not a problem. + * going away soon, it's not a problem normally. But when using Valgrind, + * release some stuff to reduce complaints about leaked storage. */ +#ifdef USE_VALGRIND + hash_destroy(table_toast_map); + FreeTupleDesc(pg_class_desc); + if (bstrategy) + pfree(bstrategy); +#endif + + /* Run the rest in xact context, mainly to avoid Valgrind leak warnings */ + MemoryContextSwitchTo(TopTransactionContext); /* * Update pg_database.datfrozenxid, and truncate pg_xact if possible. We @@ -3190,7 +3214,7 @@ autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy) rel_list = list_make1(rel); MemoryContextSwitchTo(old_context); - vacuum(rel_list, &tab->at_params, bstrategy, vac_context, true); + vacuum(rel_list, tab->at_params, bstrategy, vac_context, true); MemoryContextDelete(vac_context); } diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index 116ddf7b835..1ad65c237c3 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -613,6 +613,7 @@ ResetBackgroundWorkerCrashTimes(void) * resetting. */ rw->rw_crashed_at = 0; + rw->rw_pid = 0; /* * If there was anyone waiting for it, they're history. diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index fda91ffd1ce..8490148a47d 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -42,6 +42,8 @@ #include "access/xlog.h" #include "access/xlog_internal.h" #include "access/xlogrecovery.h" +#include "catalog/pg_authid.h" +#include "commands/defrem.h" #include "libpq/pqsignal.h" #include "miscadmin.h" #include "pgstat.h" @@ -61,6 +63,7 @@ #include "storage/shmem.h" #include "storage/smgr.h" #include "storage/spin.h" +#include "utils/acl.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/resowner.h" @@ -127,6 +130,13 @@ typedef struct int num_requests; /* current # of requests */ int max_requests; /* allocated array size */ + + int head; /* Index of the first request in the ring + * buffer */ + int tail; /* Index of the last request in the ring + * buffer */ + + /* The ring buffer of pending checkpointer requests */ CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER]; } CheckpointerShmemStruct; @@ -135,6 +145,12 @@ static CheckpointerShmemStruct *CheckpointerShmem; /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */ #define WRITES_PER_ABSORB 1000 +/* Maximum number of checkpointer requests to process in one batch */ +#define CKPT_REQ_BATCH_SIZE 10000 + +/* Max number of requests the checkpointer request queue can hold */ +#define MAX_CHECKPOINT_REQUESTS 10000000 + /* * GUC parameters */ @@ -161,7 +177,7 @@ static pg_time_t last_xlog_switch_time; static void ProcessCheckpointerInterrupts(void); static void CheckArchiveTimeout(void); static bool IsCheckpointOnSchedule(double progress); -static bool ImmediateCheckpointRequested(void); +static bool FastCheckpointRequested(void); static bool CompactCheckpointerRequestQueue(void); static void UpdateSharedMemoryConfig(void); @@ -734,12 +750,12 @@ CheckArchiveTimeout(void) } /* - * Returns true if an immediate checkpoint request is pending. (Note that - * this does not check the *current* checkpoint's IMMEDIATE flag, but whether - * there is one pending behind it.) + * Returns true if a fast checkpoint request is pending. (Note that this does + * not check the *current* checkpoint's FAST flag, but whether there is one + * pending behind it.) */ static bool -ImmediateCheckpointRequested(void) +FastCheckpointRequested(void) { volatile CheckpointerShmemStruct *cps = CheckpointerShmem; @@ -747,7 +763,7 @@ ImmediateCheckpointRequested(void) * We don't need to acquire the ckpt_lck in this case because we're only * looking at a single flag bit. */ - if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE) + if (cps->ckpt_flags & CHECKPOINT_FAST) return true; return false; } @@ -760,7 +776,7 @@ ImmediateCheckpointRequested(void) * checkpoint_completion_target. * * The checkpoint request flags should be passed in; currently the only one - * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes. + * examined is CHECKPOINT_FAST, which disables delays between writes. * * 'progress' is an estimate of how much of the work has been done, as a * fraction between 0.0 meaning none, and 1.0 meaning all done. @@ -778,10 +794,10 @@ CheckpointWriteDelay(int flags, double progress) * Perform the usual duties and take a nap, unless we're behind schedule, * in which case we just try to catch up as quickly as possible. */ - if (!(flags & CHECKPOINT_IMMEDIATE) && + if (!(flags & CHECKPOINT_FAST) && !ShutdownXLOGPending && !ShutdownRequestPending && - !ImmediateCheckpointRequested() && + !FastCheckpointRequested() && IsCheckpointOnSchedule(progress)) { if (ConfigReloadPending) @@ -970,24 +986,75 @@ CheckpointerShmemInit(void) */ MemSet(CheckpointerShmem, 0, size); SpinLockInit(&CheckpointerShmem->ckpt_lck); - CheckpointerShmem->max_requests = NBuffers; + CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS); + CheckpointerShmem->head = CheckpointerShmem->tail = 0; ConditionVariableInit(&CheckpointerShmem->start_cv); ConditionVariableInit(&CheckpointerShmem->done_cv); } } /* + * ExecCheckpoint + * Primary entry point for manual CHECKPOINT commands + * + * This is mainly a wrapper for RequestCheckpoint(). + */ +void +ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt) +{ + bool fast = true; + bool unlogged = false; + + foreach_ptr(DefElem, opt, stmt->options) + { + if (strcmp(opt->defname, "mode") == 0) + { + char *mode = defGetString(opt); + + if (strcmp(mode, "spread") == 0) + fast = false; + else if (strcmp(mode, "fast") != 0) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized MODE option \"%s\"", mode), + parser_errposition(pstate, opt->location))); + } + else if (strcmp(opt->defname, "flush_unlogged") == 0) + unlogged = defGetBoolean(opt); + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized CHECKPOINT option \"%s\"", opt->defname), + parser_errposition(pstate, opt->location))); + } + + if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */ + errmsg("permission denied to execute %s command", + "CHECKPOINT"), + errdetail("Only roles with privileges of the \"%s\" role may execute this command.", + "pg_checkpoint"))); + + RequestCheckpoint(CHECKPOINT_WAIT | + (fast ? CHECKPOINT_FAST : 0) | + (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) | + (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE)); +} + +/* * RequestCheckpoint * Called in backend processes to request a checkpoint * * flags is a bitwise OR of the following: * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown. * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery. - * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP, + * CHECKPOINT_FAST: finish the checkpoint ASAP, * ignoring checkpoint_completion_target parameter. * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or - * CHECKPOINT_END_OF_RECOVERY). + * CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command). * CHECKPOINT_WAIT: wait for completion before returning (otherwise, * just signal checkpointer to do it, and return). * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling. @@ -1009,7 +1076,7 @@ RequestCheckpoint(int flags) * There's no point in doing slow checkpoints in a standalone backend, * because there's no other backends the checkpoint could disrupt. */ - CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE); + CreateCheckPoint(flags | CHECKPOINT_FAST); /* Free all smgr objects, as CheckpointerMain() normally would. */ smgrdestroyall(); @@ -1148,6 +1215,7 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) { CheckpointerRequest *request; bool too_full; + int insert_pos; if (!IsUnderPostmaster) return false; /* probably shouldn't even get here */ @@ -1171,10 +1239,14 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) } /* OK, insert request */ - request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++]; + insert_pos = CheckpointerShmem->tail; + request = &CheckpointerShmem->requests[insert_pos]; request->ftag = *ftag; request->type = type; + CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests; + CheckpointerShmem->num_requests++; + /* If queue is more than half full, nudge the checkpointer to empty it */ too_full = (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests / 2); @@ -1216,12 +1288,16 @@ CompactCheckpointerRequestQueue(void) struct CheckpointerSlotMapping { CheckpointerRequest request; - int slot; + int ring_idx; }; - int n, - preserve_count; + int n; int num_skipped = 0; + int head; + int max_requests; + int num_requests; + int read_idx, + write_idx; HASHCTL ctl; HTAB *htab; bool *skip_slot; @@ -1233,8 +1309,13 @@ CompactCheckpointerRequestQueue(void) if (CritSectionCount > 0) return false; + max_requests = CheckpointerShmem->max_requests; + num_requests = CheckpointerShmem->num_requests; + /* Initialize skip_slot array */ - skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); + skip_slot = palloc0(sizeof(bool) * max_requests); + + head = CheckpointerShmem->head; /* Initialize temporary hash table */ ctl.keysize = sizeof(CheckpointerRequest); @@ -1258,7 +1339,8 @@ CompactCheckpointerRequestQueue(void) * away preceding entries that would end up being canceled anyhow), but * it's not clear that the extra complexity would buy us anything. */ - for (n = 0; n < CheckpointerShmem->num_requests; n++) + read_idx = head; + for (n = 0; n < num_requests; n++) { CheckpointerRequest *request; struct CheckpointerSlotMapping *slotmap; @@ -1271,16 +1353,19 @@ CompactCheckpointerRequestQueue(void) * CheckpointerShmemInit. Note also that RelFileLocator had better * contain no pad bytes. */ - request = &CheckpointerShmem->requests[n]; + request = &CheckpointerShmem->requests[read_idx]; slotmap = hash_search(htab, request, HASH_ENTER, &found); if (found) { /* Duplicate, so mark the previous occurrence as skippable */ - skip_slot[slotmap->slot] = true; + skip_slot[slotmap->ring_idx] = true; num_skipped++; } /* Remember slot containing latest occurrence of this request value */ - slotmap->slot = n; + slotmap->ring_idx = read_idx; + + /* Move to the next request in the ring buffer */ + read_idx = (read_idx + 1) % max_requests; } /* Done with the hash table. */ @@ -1294,17 +1379,34 @@ CompactCheckpointerRequestQueue(void) } /* We found some duplicates; remove them. */ - preserve_count = 0; - for (n = 0; n < CheckpointerShmem->num_requests; n++) + read_idx = write_idx = head; + for (n = 0; n < num_requests; n++) { - if (skip_slot[n]) - continue; - CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n]; + /* If this slot is NOT skipped, keep it */ + if (!skip_slot[read_idx]) + { + /* If the read and write positions are different, copy the request */ + if (write_idx != read_idx) + CheckpointerShmem->requests[write_idx] = + CheckpointerShmem->requests[read_idx]; + + /* Advance the write position */ + write_idx = (write_idx + 1) % max_requests; + } + + read_idx = (read_idx + 1) % max_requests; } + + /* + * Update ring buffer state: head remains the same, tail moves, count + * decreases + */ + CheckpointerShmem->tail = write_idx; + CheckpointerShmem->num_requests -= num_skipped; + ereport(DEBUG1, (errmsg_internal("compacted fsync request queue from %d entries to %d entries", - CheckpointerShmem->num_requests, preserve_count))); - CheckpointerShmem->num_requests = preserve_count; + num_requests, CheckpointerShmem->num_requests))); /* Cleanup. */ pfree(skip_slot); @@ -1325,40 +1427,64 @@ AbsorbSyncRequests(void) { CheckpointerRequest *requests = NULL; CheckpointerRequest *request; - int n; + int n, + i; + bool loop; if (!AmCheckpointerProcess()) return; - LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); - - /* - * We try to avoid holding the lock for a long time by copying the request - * array, and processing the requests after releasing the lock. - * - * Once we have cleared the requests from shared memory, we have to PANIC - * if we then fail to absorb them (eg, because our hashtable runs out of - * memory). This is because the system cannot run safely if we are unable - * to fsync what we have been told to fsync. Fortunately, the hashtable - * is so small that the problem is quite unlikely to arise in practice. - */ - n = CheckpointerShmem->num_requests; - if (n > 0) + do { - requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest)); - memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest)); - } + LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); + + /*--- + * We try to avoid holding the lock for a long time by: + * 1. Copying the request array and processing the requests after + * releasing the lock; + * 2. Processing not the whole queue, but only batches of + * CKPT_REQ_BATCH_SIZE at once. + * + * Once we have cleared the requests from shared memory, we must + * PANIC if we then fail to absorb them (e.g., because our hashtable + * runs out of memory). This is because the system cannot run safely + * if we are unable to fsync what we have been told to fsync. + * Fortunately, the hashtable is so small that the problem is quite + * unlikely to arise in practice. + * + * Note: The maximum possible size of a ring buffer is + * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc + * allocation size of 1Gb. Our maximum batch size, + * CKPT_REQ_BATCH_SIZE, is even smaller. + */ + n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE); + if (n > 0) + { + if (!requests) + requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest)); - START_CRIT_SECTION(); + for (i = 0; i < n; i++) + { + requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head]; + CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests; + } - CheckpointerShmem->num_requests = 0; + CheckpointerShmem->num_requests -= n; - LWLockRelease(CheckpointerCommLock); + } + + START_CRIT_SECTION(); + + /* Are there any requests in the queue? If so, keep going. */ + loop = CheckpointerShmem->num_requests != 0; + + LWLockRelease(CheckpointerCommLock); - for (request = requests; n > 0; request++, n--) - RememberSyncRequest(&request->ftag, request->type); + for (request = requests; n > 0; request++, n--) + RememberSyncRequest(&request->ftag, request->type); - END_CRIT_SECTION(); + END_CRIT_SECTION(); + } while (loop); if (requests) pfree(requests); diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index 7e622ae4bd2..78e39e5f866 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -718,15 +718,15 @@ pgarch_readyXlog(char *xlog) /* * Store the file in our max-heap if it has a high enough priority. */ - if (arch_files->arch_heap->bh_size < NUM_FILES_PER_DIRECTORY_SCAN) + if (binaryheap_size(arch_files->arch_heap) < NUM_FILES_PER_DIRECTORY_SCAN) { /* If the heap isn't full yet, quickly add it. */ - arch_file = arch_files->arch_filenames[arch_files->arch_heap->bh_size]; + arch_file = arch_files->arch_filenames[binaryheap_size(arch_files->arch_heap)]; strcpy(arch_file, basename); binaryheap_add_unordered(arch_files->arch_heap, CStringGetDatum(arch_file)); /* If we just filled the heap, make it a valid one. */ - if (arch_files->arch_heap->bh_size == NUM_FILES_PER_DIRECTORY_SCAN) + if (binaryheap_size(arch_files->arch_heap) == NUM_FILES_PER_DIRECTORY_SCAN) binaryheap_build(arch_files->arch_heap); } else if (ready_file_comparator(binaryheap_first(arch_files->arch_heap), @@ -744,21 +744,21 @@ pgarch_readyXlog(char *xlog) FreeDir(rldir); /* If no files were found, simply return. */ - if (arch_files->arch_heap->bh_size == 0) + if (binaryheap_empty(arch_files->arch_heap)) return false; /* * If we didn't fill the heap, we didn't make it a valid one. Do that * now. */ - if (arch_files->arch_heap->bh_size < NUM_FILES_PER_DIRECTORY_SCAN) + if (binaryheap_size(arch_files->arch_heap) < NUM_FILES_PER_DIRECTORY_SCAN) binaryheap_build(arch_files->arch_heap); /* * Fill arch_files array with the files to archive in ascending order of * priority. */ - arch_files->arch_files_size = arch_files->arch_heap->bh_size; + arch_files->arch_files_size = binaryheap_size(arch_files->arch_heap); for (int i = 0; i < arch_files->arch_files_size; i++) arch_files->arch_files[i] = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap)); diff --git a/src/backend/postmaster/pmchild.c b/src/backend/postmaster/pmchild.c index cde1d23a4ca..584bb58c8ab 100644 --- a/src/backend/postmaster/pmchild.c +++ b/src/backend/postmaster/pmchild.c @@ -60,6 +60,17 @@ NON_EXEC_STATIC int num_pmchild_slots = 0; dlist_head ActiveChildList; /* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * PMChild structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern PMChild *pmchild_array; +PMChild *pmchild_array; +#endif + + +/* * MaxLivePostmasterChildren * * This reports the number of postmaster child processes that can be active. @@ -125,8 +136,13 @@ InitPostmasterChildSlots(void) for (int i = 0; i < BACKEND_NUM_TYPES; i++) num_pmchild_slots += pmchild_pools[i].size; - /* Initialize them */ + /* Allocate enough slots, and make sure Valgrind doesn't complain */ slots = palloc(num_pmchild_slots * sizeof(PMChild)); +#ifdef USE_VALGRIND + pmchild_array = slots; +#endif + + /* Initialize them */ slotno = 0; for (int btype = 0; btype < BACKEND_NUM_TYPES; btype++) { diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 490f7ce3664..e01d9f0cfe8 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2630,6 +2630,13 @@ CleanupBackend(PMChild *bp, } bp = NULL; + /* + * In a crash case, exit immediately without resetting background worker + * state. However, if restart_after_crash is enabled, the background + * worker state (e.g., rw_pid) still needs be reset so the worker can + * restart after crash recovery. This reset is handled in + * ResetBackgroundWorkerCrashTimes(), not here. + */ if (crashed) { HandleChildCrash(bp_pid, exitstatus, procname); @@ -4337,15 +4344,15 @@ maybe_start_bgworkers(void) static bool maybe_reap_io_worker(int pid) { - for (int id = 0; id < MAX_IO_WORKERS; ++id) + for (int i = 0; i < MAX_IO_WORKERS; ++i) { - if (io_worker_children[id] && - io_worker_children[id]->pid == pid) + if (io_worker_children[i] && + io_worker_children[i]->pid == pid) { - ReleasePostmasterChildSlot(io_worker_children[id]); + ReleasePostmasterChildSlot(io_worker_children[i]); --io_worker_count; - io_worker_children[id] = NULL; + io_worker_children[i] = NULL; return true; } } @@ -4389,22 +4396,22 @@ maybe_adjust_io_workers(void) while (io_worker_count < io_workers) { PMChild *child; - int id; + int i; /* find unused entry in io_worker_children array */ - for (id = 0; id < MAX_IO_WORKERS; ++id) + for (i = 0; i < MAX_IO_WORKERS; ++i) { - if (io_worker_children[id] == NULL) + if (io_worker_children[i] == NULL) break; } - if (id == MAX_IO_WORKERS) - elog(ERROR, "could not find a free IO worker ID"); + if (i == MAX_IO_WORKERS) + elog(ERROR, "could not find a free IO worker slot"); /* Try to launch one. */ child = StartChildProcess(B_IO_WORKER); if (child != NULL) { - io_worker_children[id] = child; + io_worker_children[i] = child; ++io_worker_count; } else @@ -4415,11 +4422,11 @@ maybe_adjust_io_workers(void) if (io_worker_count > io_workers) { /* ask the IO worker in the highest slot to exit */ - for (int id = MAX_IO_WORKERS - 1; id >= 0; --id) + for (int i = MAX_IO_WORKERS - 1; i >= 0; --i) { - if (io_worker_children[id] != NULL) + if (io_worker_children[i] != NULL) { - kill(io_worker_children[id]->pid, SIGUSR2); + kill(io_worker_children[i]->pid, SIGUSR2); break; } } diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c index 0fec4f1f871..777c9a8d555 100644 --- a/src/backend/postmaster/walsummarizer.c +++ b/src/backend/postmaster/walsummarizer.c @@ -385,7 +385,7 @@ WalSummarizerMain(const void *startup_data, size_t startup_data_len) switch_lsn = tliSwitchPoint(current_tli, tles, &switch_tli); ereport(DEBUG1, - errmsg_internal("switch point from TLI %u to TLI %u is at %X/%X", + errmsg_internal("switch point from TLI %u to TLI %u is at %X/%08X", current_tli, switch_tli, LSN_FORMAT_ARGS(switch_lsn))); } @@ -741,7 +741,7 @@ WaitForWalSummarization(XLogRecPtr lsn) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL summarization is not progressing"), - errdetail("Summarization is needed through %X/%X, but is stuck at %X/%X on disk and %X/%X in memory.", + errdetail("Summarization is needed through %X/%08X, but is stuck at %X/%08X on disk and %X/%08X in memory.", LSN_FORMAT_ARGS(lsn), LSN_FORMAT_ARGS(summarized_lsn), LSN_FORMAT_ARGS(pending_lsn)))); @@ -755,12 +755,12 @@ WaitForWalSummarization(XLogRecPtr lsn) current_time) / 1000; ereport(WARNING, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg_plural("still waiting for WAL summarization through %X/%X after %ld second", - "still waiting for WAL summarization through %X/%X after %ld seconds", + errmsg_plural("still waiting for WAL summarization through %X/%08X after %ld second", + "still waiting for WAL summarization through %X/%08X after %ld seconds", elapsed_seconds, LSN_FORMAT_ARGS(lsn), elapsed_seconds), - errdetail("Summarization has reached %X/%X on disk and %X/%X in memory.", + errdetail("Summarization has reached %X/%08X on disk and %X/%08X in memory.", LSN_FORMAT_ARGS(summarized_lsn), LSN_FORMAT_ARGS(pending_lsn)))); } @@ -981,7 +981,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, if (private_data->end_of_wal) { ereport(DEBUG1, - errmsg_internal("could not read WAL from timeline %u at %X/%X: end of WAL at %X/%X", + errmsg_internal("could not read WAL from timeline %u at %X/%08X: end of WAL at %X/%08X", tli, LSN_FORMAT_ARGS(start_lsn), LSN_FORMAT_ARGS(private_data->read_upto))); @@ -1000,8 +1000,8 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, } else ereport(ERROR, - (errmsg("could not find a valid record after %X/%X", - LSN_FORMAT_ARGS(start_lsn)))); + errmsg("could not find a valid record after %X/%08X", + LSN_FORMAT_ARGS(start_lsn))); } /* We shouldn't go backward. */ @@ -1034,7 +1034,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, * able to read a complete record. */ ereport(DEBUG1, - errmsg_internal("could not read WAL from timeline %u at %X/%X: end of WAL at %X/%X", + errmsg_internal("could not read WAL from timeline %u at %X/%08X: end of WAL at %X/%08X", tli, LSN_FORMAT_ARGS(xlogreader->EndRecPtr), LSN_FORMAT_ARGS(private_data->read_upto))); @@ -1045,13 +1045,13 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, if (errormsg) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read WAL from timeline %u at %X/%X: %s", + errmsg("could not read WAL from timeline %u at %X/%08X: %s", tli, LSN_FORMAT_ARGS(xlogreader->EndRecPtr), errormsg))); else ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read WAL from timeline %u at %X/%X", + errmsg("could not read WAL from timeline %u at %X/%08X", tli, LSN_FORMAT_ARGS(xlogreader->EndRecPtr)))); } @@ -1222,7 +1222,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, /* Tell the user what we did. */ ereport(DEBUG1, - errmsg_internal("summarized WAL on TLI %u from %X/%X to %X/%X", + errmsg_internal("summarized WAL on TLI %u from %X/%08X to %X/%08X", tli, LSN_FORMAT_ARGS(summary_start_lsn), LSN_FORMAT_ARGS(summary_end_lsn))); @@ -1234,7 +1234,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, /* If we skipped a non-zero amount of WAL, log a debug message. */ if (summary_end_lsn > summary_start_lsn && fast_forward) ereport(DEBUG1, - errmsg_internal("skipped summarizing WAL on TLI %u from %X/%X to %X/%X", + errmsg_internal("skipped summarizing WAL on TLI %u from %X/%08X to %X/%08X", tli, LSN_FORMAT_ARGS(summary_start_lsn), LSN_FORMAT_ARGS(summary_end_lsn))); @@ -1580,7 +1580,7 @@ summarizer_read_local_xlog_page(XLogReaderState *state, /* Debugging output. */ ereport(DEBUG1, - errmsg_internal("timeline %u became historic, can read up to %X/%X", + errmsg_internal("timeline %u became historic, can read up to %X/%08X", private_data->tli, LSN_FORMAT_ARGS(private_data->read_upto))); } |