aboutsummaryrefslogtreecommitdiff
path: root/src/backend/postmaster/checkpointer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/postmaster/checkpointer.c')
-rw-r--r--src/backend/postmaster/checkpointer.c32
1 files changed, 2 insertions, 30 deletions
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index ace9893d957..4fe403c9a89 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -91,17 +91,11 @@
* requesting backends since the last checkpoint start. The flags are
* chosen so that OR'ing is the correct way to combine multiple requests.
*
- * num_backend_writes is used to count the number of buffer writes performed
- * by user backend processes. This counter should be wide enough that it
- * can't overflow during a single processing cycle. num_backend_fsync
- * counts the subset of those writes that also had to do their own fsync,
- * because the checkpointer failed to absorb their request.
- *
* The requests array holds fsync requests sent by backends and not yet
* absorbed by the checkpointer.
*
- * Unlike the checkpoint fields, num_backend_writes, num_backend_fsync, and
- * the requests fields are protected by CheckpointerCommLock.
+ * Unlike the checkpoint fields, requests related fields are protected by
+ * CheckpointerCommLock.
*----------
*/
typedef struct
@@ -125,9 +119,6 @@ typedef struct
ConditionVariable start_cv; /* signaled when ckpt_started advances */
ConditionVariable done_cv; /* signaled when ckpt_done advances */
- uint32 num_backend_writes; /* counts user backend buffer writes */
- uint32 num_backend_fsync; /* counts user backend fsync calls */
-
int num_requests; /* current # of requests */
int max_requests; /* allocated array size */
CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
@@ -1095,10 +1086,6 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
- /* Count all backend writes regardless of if they fit in the queue */
- if (!AmBackgroundWriterProcess())
- CheckpointerShmem->num_backend_writes++;
-
/*
* If the checkpointer isn't running or the request queue is full, the
* backend will have to perform its own fsync request. But before forcing
@@ -1108,12 +1095,6 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
(CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
!CompactCheckpointerRequestQueue()))
{
- /*
- * Count the subset of writes where backends have to do their own
- * fsync
- */
- if (!AmBackgroundWriterProcess())
- CheckpointerShmem->num_backend_fsync++;
LWLockRelease(CheckpointerCommLock);
return false;
}
@@ -1270,15 +1251,6 @@ AbsorbSyncRequests(void)
LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
- /* Transfer stats counts into pending pgstats message */
- PendingCheckpointerStats.buf_written_backend
- += CheckpointerShmem->num_backend_writes;
- PendingCheckpointerStats.buf_fsync_backend
- += CheckpointerShmem->num_backend_fsync;
-
- CheckpointerShmem->num_backend_writes = 0;
- CheckpointerShmem->num_backend_fsync = 0;
-
/*
* We try to avoid holding the lock for a long time by copying the request
* array, and processing the requests after releasing the lock.