aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2016-03-07 14:24:58 -0800
committerAndres Freund <andres@anarazel.de>2016-03-07 14:24:58 -0800
commite3e84fd35838a43feef94ea78534713d745e9a08 (patch)
treeea5e0de2b81e6018d20d12d3f7911fd04cbb8ddc /src
parent89f8372cb3a223cace21d4037336bfb37e8dbf3c (diff)
downloadpostgresql-e3e84fd35838a43feef94ea78534713d745e9a08.tar.gz
postgresql-e3e84fd35838a43feef94ea78534713d745e9a08.zip
Further improvements to c8f621c43.
Coverity and inspection for the issue addressed in fd45d16f found some questionable code. Specifically coverity noticed that the wrong length was added in ReorderBufferSerializeChange() - without immediate negative consequences as the variable isn't used afterwards. During code-review and testing I noticed that a bit of space was wasted when allocating tuple bufs in several places. Thirdly, the debug memset()s in ReorderBufferGetTupleBuf() reduce the error checking valgrind can do. Backpatch: 9.4, like c8f621c43.
Diffstat (limited to 'src')
-rw-r--r--src/backend/replication/logical/decode.c24
-rw-r--r--src/backend/replication/logical/reorderbuffer.c3
2 files changed, 18 insertions, 9 deletions
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 0f7ff1501be..fafbd5f0ec2 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -625,7 +625,8 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (xlrec->flags & XLOG_HEAP_CONTAINS_NEW_TUPLE)
{
- Size tuplelen = r->xl_len - SizeOfHeapInsert;
+ Size datalen = r->xl_len - SizeOfHeapInsert;
+ Size tuplelen = datalen - SizeOfHeapHeader;
Assert(r->xl_len > (SizeOfHeapInsert + SizeOfHeapHeader));
@@ -633,7 +634,7 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
DecodeXLogTuple((char *) xlrec + SizeOfHeapInsert,
- tuplelen, change->data.tp.newtuple);
+ datalen, change->data.tp.newtuple);
}
change->data.tp.clear_toast_afterwards = true;
@@ -670,6 +671,7 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (xlrec->flags & XLOG_HEAP_CONTAINS_NEW_TUPLE)
{
+ Size datalen;
Size tuplelen;
xl_heap_header_len xlhdr;
@@ -678,12 +680,13 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
memcpy(&xlhdr, data, sizeof(xlhdr));
data += offsetof(xl_heap_header_len, header);
- tuplelen = xlhdr.t_len + SizeOfHeapHeader;
+ datalen = xlhdr.t_len + SizeOfHeapHeader;
+ tuplelen = xlhdr.t_len;
change->data.tp.newtuple =
ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
- DecodeXLogTuple(data, tuplelen, change->data.tp.newtuple);
+ DecodeXLogTuple(data, datalen, change->data.tp.newtuple);
/* skip over the rest of the tuple header */
data += SizeOfHeapHeader;
/* skip over the tuple data */
@@ -692,18 +695,20 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (xlrec->flags & XLOG_HEAP_CONTAINS_OLD)
{
+ Size datalen;
Size tuplelen;
xl_heap_header_len xlhdr;
memcpy(&xlhdr, data, sizeof(xlhdr));
data += offsetof(xl_heap_header_len, header);
- tuplelen = xlhdr.t_len + SizeOfHeapHeader;
+ datalen = xlhdr.t_len + SizeOfHeapHeader;
+ tuplelen = xlhdr.t_len;
change->data.tp.oldtuple =
ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
- DecodeXLogTuple(data, tuplelen, change->data.tp.oldtuple);
+ DecodeXLogTuple(data, datalen, change->data.tp.oldtuple);
#ifdef NOT_USED
data += SizeOfHeapHeader;
data += xlhdr.t_len;
@@ -741,15 +746,16 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
/* old primary key stored */
if (xlrec->flags & XLOG_HEAP_CONTAINS_OLD)
{
- Size len = r->xl_len - SizeOfHeapDelete;
+ Size datalen = r->xl_len - SizeOfHeapDelete;
+ Size tuplelen = datalen - SizeOfHeapHeader;
Assert(r->xl_len > (SizeOfHeapDelete + SizeOfHeapHeader));
change->data.tp.oldtuple =
- ReorderBufferGetTupleBuf(ctx->reorder, len);
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
DecodeXLogTuple((char *) xlrec + SizeOfHeapDelete,
- len, change->data.tp.oldtuple);
+ datalen, change->data.tp.oldtuple);
}
change->data.tp.clear_toast_afterwards = true;
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 8eadb4bed18..1c8a5540516 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -469,12 +469,15 @@ ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len)
rb->nr_cached_tuplebufs--;
tuple = slist_container(ReorderBufferTupleBuf, node,
slist_pop_head_node(&rb->cached_tuplebufs));
+ Assert(tuple->alloc_tuple_size == MaxHeapTupleSize);
#ifdef USE_ASSERT_CHECKING
memset(&tuple->tuple, 0xa9, sizeof(HeapTupleData));
+ VALGRIND_MAKE_MEM_UNDEFINED(&tuple->tuple, sizeof(HeapTupleData));
#endif
tuple->tuple.t_data = ReorderBufferTupleBufData(tuple);
#ifdef USE_ASSERT_CHECKING
memset(tuple->tuple.t_data, 0xa8, tuple->alloc_tuple_size);
+ VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size);
#endif
}
else