aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeikki Linnakangas <heikki.linnakangas@iki.fi>2014-04-04 13:12:38 +0300
committerHeikki Linnakangas <heikki.linnakangas@iki.fi>2014-04-04 13:32:40 +0300
commit895243d69ba1972157d8d2644efbf87d557abec3 (patch)
treeb348f19bede0cbe2fd12c3f0a9ed08848f0e9586
parent447e23737cc82489258f9b2564fac68cf834188f (diff)
downloadpostgresql-895243d69ba1972157d8d2644efbf87d557abec3.tar.gz
postgresql-895243d69ba1972157d8d2644efbf87d557abec3.zip
Avoid allocations in critical sections.
If a palloc in a critical section fails, it becomes a PANIC.
-rw-r--r--src/backend/access/nbtree/nbtinsert.c55
-rw-r--r--src/backend/access/transam/xlog.c10
-rw-r--r--src/backend/storage/page/bufpage.c21
3 files changed, 43 insertions, 43 deletions
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index d758659c314..735291f342e 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1840,8 +1840,10 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
BTPageOpaque rootopaque;
ItemId itemid;
IndexTuple item;
- Size itemsz;
- IndexTuple new_item;
+ IndexTuple left_item;
+ Size left_item_sz;
+ IndexTuple right_item;
+ Size right_item_sz;
Buffer metabuf;
Page metapg;
BTMetaPageData *metad;
@@ -1860,6 +1862,26 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
+ /*
+ * Create downlink item for left page (old root). Since this will be the
+ * first item in a non-leaf page, it implicitly has minus-infinity key
+ * value, so we need not store any actual key in it.
+ */
+ left_item_sz = sizeof(IndexTupleData);
+ left_item = (IndexTuple) palloc(left_item_sz);
+ left_item->t_info = left_item_sz;
+ ItemPointerSet(&(left_item->t_tid), lbkno, P_HIKEY);
+
+ /*
+ * Create downlink item for right page. The key for it is obtained from
+ * the "high key" position in the left page.
+ */
+ itemid = PageGetItemId(lpage, P_HIKEY);
+ right_item_sz = ItemIdGetLength(itemid);
+ item = (IndexTuple) PageGetItem(lpage, itemid);
+ right_item = CopyIndexTuple(item);
+ ItemPointerSet(&(right_item->t_tid), rbkno, P_HIKEY);
+
/* NO EREPORT(ERROR) from here till newroot op is logged */
START_CRIT_SECTION();
@@ -1878,16 +1900,6 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
metad->btm_fastlevel = rootopaque->btpo.level;
/*
- * Create downlink item for left page (old root). Since this will be the
- * first item in a non-leaf page, it implicitly has minus-infinity key
- * value, so we need not store any actual key in it.
- */
- itemsz = sizeof(IndexTupleData);
- new_item = (IndexTuple) palloc(itemsz);
- new_item->t_info = itemsz;
- ItemPointerSet(&(new_item->t_tid), lbkno, P_HIKEY);
-
- /*
* Insert the left page pointer into the new root page. The root page is
* the rightmost page on its level so there is no "high key" in it; the
* two items will go into positions P_HIKEY and P_FIRSTKEY.
@@ -1895,32 +1907,20 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
* Note: we *must* insert the two items in item-number order, for the
* benefit of _bt_restore_page().
*/
- if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY,
+ if (PageAddItem(rootpage, (Item) left_item, left_item_sz, P_HIKEY,
false, false) == InvalidOffsetNumber)
elog(PANIC, "failed to add leftkey to new root page"
" while splitting block %u of index \"%s\"",
BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
- pfree(new_item);
-
- /*
- * Create downlink item for right page. The key for it is obtained from
- * the "high key" position in the left page.
- */
- itemid = PageGetItemId(lpage, P_HIKEY);
- itemsz = ItemIdGetLength(itemid);
- item = (IndexTuple) PageGetItem(lpage, itemid);
- new_item = CopyIndexTuple(item);
- ItemPointerSet(&(new_item->t_tid), rbkno, P_HIKEY);
/*
* insert the right page pointer into the new root page.
*/
- if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY,
+ if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY,
false, false) == InvalidOffsetNumber)
elog(PANIC, "failed to add rightkey to new root page"
" while splitting block %u of index \"%s\"",
BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
- pfree(new_item);
MarkBufferDirty(rootbuf);
MarkBufferDirty(metabuf);
@@ -1968,6 +1968,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* done with metapage */
_bt_relbuf(rel, metabuf);
+ pfree(left_item);
+ pfree(right_item);
+
return rootbuf;
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 42ec2fe71de..6b9209d882a 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -2348,6 +2348,7 @@ XLogFileInit(uint32 log, uint32 seg,
{
char path[MAXPGPATH];
char tmppath[MAXPGPATH];
+ char zbuffer_raw[BLCKSZ + MAXIMUM_ALIGNOF];
char *zbuffer;
uint32 installed_log;
uint32 installed_seg;
@@ -2405,11 +2406,11 @@ XLogFileInit(uint32 log, uint32 seg,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
- * Note: palloc zbuffer, instead of just using a local char array, to
- * ensure it is reasonably well-aligned; this may save a few cycles
- * transferring data to the kernel.
+ * Note: ensure the buffer is reasonably well-aligned; this may save a few
+ * cycles transferring data to the kernel.
*/
- zbuffer = (char *) palloc0(XLOG_BLCKSZ);
+ zbuffer = (char *) MAXALIGN(zbuffer_raw);
+ memset(zbuffer, 0, BLCKSZ);
for (nbytes = 0; nbytes < XLogSegSize; nbytes += XLOG_BLCKSZ)
{
errno = 0;
@@ -2429,7 +2430,6 @@ XLogFileInit(uint32 log, uint32 seg,
errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
- pfree(zbuffer);
if (pg_fsync(fd) != 0)
ereport(ERROR,
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 6bd38127105..4f580fa9b5f 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -15,6 +15,7 @@
#include "postgres.h"
#include "access/htup.h"
+#include "access/itup.h"
#include "storage/bufpage.h"
@@ -363,8 +364,6 @@ PageRepairFragmentation(Page page)
Offset pd_lower = ((PageHeader) page)->pd_lower;
Offset pd_upper = ((PageHeader) page)->pd_upper;
Offset pd_special = ((PageHeader) page)->pd_special;
- itemIdSort itemidbase,
- itemidptr;
ItemId lp;
int nline,
nstorage,
@@ -414,10 +413,11 @@ PageRepairFragmentation(Page page)
((PageHeader) page)->pd_upper = pd_special;
}
else
- { /* nstorage != 0 */
+ {
/* Need to compact the page the hard way */
- itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nstorage);
- itemidptr = itemidbase;
+ itemIdSortData itemidbase[MaxHeapTuplesPerPage];
+ itemIdSort itemidptr = itemidbase;
+
totallen = 0;
for (i = 0; i < nline; i++)
{
@@ -462,8 +462,6 @@ PageRepairFragmentation(Page page)
}
((PageHeader) page)->pd_upper = upper;
-
- pfree(itemidbase);
}
/* Set hint bit for PageAddItem */
@@ -712,8 +710,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Offset pd_lower = phdr->pd_lower;
Offset pd_upper = phdr->pd_upper;
Offset pd_special = phdr->pd_special;
- itemIdSort itemidbase,
- itemidptr;
+ itemIdSortData itemidbase[MaxIndexTuplesPerPage];
+ itemIdSort itemidptr;
ItemId lp;
int nline,
nused;
@@ -725,6 +723,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
int nextitm;
OffsetNumber offnum;
+ Assert(nitems < MaxIndexTuplesPerPage);
+
/*
* If there aren't very many items to delete, then retail
* PageIndexTupleDelete is the best way. Delete the items in reverse
@@ -759,7 +759,6 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
* still validity-checking.
*/
nline = PageGetMaxOffsetNumber(page);
- itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nline);
itemidptr = itemidbase;
totallen = 0;
nused = 0;
@@ -825,6 +824,4 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
phdr->pd_upper = upper;
-
- pfree(itemidbase);
}