aboutsummaryrefslogtreecommitdiff
path: root/src/backend/replication/logical/reorderbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/replication/logical/reorderbuffer.c')
-rw-r--r--src/backend/replication/logical/reorderbuffer.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 00a8327e771..b3139c41e2b 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -467,6 +467,9 @@ ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
/* Reset the toast hash */
ReorderBufferToastReset(rb, txn);
+ /* All changes must be deallocated */
+ Assert(txn->size == 0);
+
pfree(txn);
}
@@ -1506,6 +1509,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
{
bool found;
dlist_mutable_iter iter;
+ Size mem_freed = 0;
/* cleanup subtransactions & their changes */
dlist_foreach_modify(iter, &txn->subtxns)
@@ -1535,9 +1539,20 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
/* Check we're not mixing changes from different transactions. */
Assert(change->txn == txn);
+ /*
+ * Instead of updating the memory counter for individual changes,
+ * we sum up the size of memory to free so we can update the memory
+ * counter all together below. This saves costs of maintaining
+ * the max-heap.
+ */
+ mem_freed += ReorderBufferChangeSize(change);
+
ReorderBufferReturnChange(rb, change, false);
}
+ /* Update the memory counter */
+ ReorderBufferChangeMemoryUpdate(rb, NULL, txn, false, mem_freed);
+
/*
* Cleanup the tuplecids we stored for decoding catalog snapshot access.
* They are always stored in the toplevel transaction.
@@ -1594,9 +1609,6 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
if (rbtxn_is_serialized(txn))
ReorderBufferRestoreCleanup(rb, txn);
- /* Update the memory counter */
- ReorderBufferChangeMemoryUpdate(rb, NULL, txn, false, txn->size);
-
/* deallocate */
ReorderBufferReturnTXN(rb, txn);
}
@@ -1616,6 +1628,7 @@ static void
ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, bool txn_prepared)
{
dlist_mutable_iter iter;
+ Size mem_freed = 0;
/* cleanup subtransactions & their changes */
dlist_foreach_modify(iter, &txn->subtxns)
@@ -1648,11 +1661,19 @@ ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, bool txn_prep
/* remove the change from it's containing list */
dlist_delete(&change->node);
+ /*
+ * Instead of updating the memory counter for individual changes,
+ * we sum up the size of memory to free so we can update the memory
+ * counter all together below. This saves costs of maintaining
+ * the max-heap.
+ */
+ mem_freed += ReorderBufferChangeSize(change);
+
ReorderBufferReturnChange(rb, change, false);
}
/* Update the memory counter */
- ReorderBufferChangeMemoryUpdate(rb, NULL, txn, false, txn->size);
+ ReorderBufferChangeMemoryUpdate(rb, NULL, txn, false, mem_freed);
/*
* Mark the transaction as streamed.
@@ -2062,6 +2083,9 @@ ReorderBufferResetTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
rb->stream_stop(rb, txn, last_lsn);
ReorderBufferSaveTXNSnapshot(rb, txn, snapshot_now, command_id);
}
+
+ /* All changes must be deallocated */
+ Assert(txn->size == 0);
}
/*