aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/brin/brin_revmap.c7
-rw-r--r--src/backend/access/gin/ginbtree.c2
-rw-r--r--src/backend/access/gin/ginget.c4
-rw-r--r--src/backend/access/gist/gistget.c1
-rw-r--r--src/backend/access/hash/hashsearch.c6
-rw-r--r--src/backend/access/heap/heapam.c9
-rw-r--r--src/backend/access/heap/pruneheap.c120
-rw-r--r--src/backend/access/heap/vacuumlazy.c5
-rw-r--r--src/backend/access/nbtree/nbtsearch.c9
-rw-r--r--src/backend/access/spgist/spgscan.c1
10 files changed, 6 insertions, 158 deletions
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 84d627cb5c9..956dd927656 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -79,7 +79,6 @@ brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange,
meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
LockBuffer(meta, BUFFER_LOCK_SHARE);
page = BufferGetPage(meta);
- TestForOldSnapshot(snapshot, idxrel, page);
metadata = (BrinMetaPageData *) PageGetContents(page);
revmap = palloc(sizeof(BrinRevmap));
@@ -277,7 +276,6 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
}
LockBuffer(*buf, mode);
page = BufferGetPage(*buf);
- TestForOldSnapshot(snapshot, idxRel, page);
/* If we land on a revmap page, start over */
if (BRIN_IS_REGULAR_PAGE(page))
@@ -372,11 +370,6 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
LockBuffer(regBuf, BUFFER_LOCK_EXCLUSIVE);
regPg = BufferGetPage(regBuf);
- /*
- * We're only removing data, not reading it, so there's no need to
- * TestForOldSnapshot here.
- */
-
/* if this is no longer a regular page, tell caller to start over */
if (!BRIN_IS_REGULAR_PAGE(regPg))
{
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 35490c72832..7d097c75e05 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -100,7 +100,6 @@ ginFindLeafPage(GinBtree btree, bool searchMode,
stack->off = InvalidOffsetNumber;
page = BufferGetPage(stack->buffer);
- TestForOldSnapshot(snapshot, btree->index, page);
access = ginTraverseLock(stack->buffer, searchMode);
@@ -127,7 +126,6 @@ ginFindLeafPage(GinBtree btree, bool searchMode,
stack->buffer = ginStepRight(stack->buffer, btree->index, access);
stack->blkno = rightlink;
page = BufferGetPage(stack->buffer);
- TestForOldSnapshot(snapshot, btree->index, page);
if (!searchMode && GinPageIsIncompleteSplit(page))
ginFinishSplit(btree, stack, false, NULL);
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 1f0214498cd..10d2bb89008 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -158,7 +158,6 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
return true;
page = BufferGetPage(stack->buffer);
- TestForOldSnapshot(snapshot, btree->index, page);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off));
/*
@@ -1460,7 +1459,6 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
for (;;)
{
page = BufferGetPage(pos->pendingBuffer);
- TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
maxoff = PageGetMaxOffsetNumber(page);
if (pos->firstOffset > maxoff)
@@ -1641,7 +1639,6 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
sizeof(bool) * (pos->lastOffset - pos->firstOffset));
page = BufferGetPage(pos->pendingBuffer);
- TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
for (i = 0; i < so->nkeys; i++)
{
@@ -1844,7 +1841,6 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
LockBuffer(metabuffer, GIN_SHARE);
page = BufferGetPage(metabuffer);
- TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
blkno = GinPageGetMeta(page)->head;
/*
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index e2c9b5f069c..31349174280 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -346,7 +346,6 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem,
PredicateLockPage(r, BufferGetBlockNumber(buffer), scan->xs_snapshot);
gistcheckpage(scan->indexRelation, buffer);
page = BufferGetPage(buffer);
- TestForOldSnapshot(scan->xs_snapshot, r, page);
opaque = GistPageGetOpaque(page);
/*
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 9ea2a42a07f..0a031bfd18f 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -71,7 +71,6 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
if (BlockNumberIsValid(blkno))
{
buf = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
- TestForOldSnapshot(scan->xs_snapshot, rel, BufferGetPage(buf));
if (!_hash_readpage(scan, &buf, dir))
end_of_scan = true;
}
@@ -91,7 +90,6 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
{
buf = _hash_getbuf(rel, blkno, HASH_READ,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
- TestForOldSnapshot(scan->xs_snapshot, rel, BufferGetPage(buf));
/*
* We always maintain the pin on bucket page for whole scan
@@ -186,7 +184,6 @@ _hash_readnext(IndexScanDesc scan,
if (block_found)
{
*pagep = BufferGetPage(*bufp);
- TestForOldSnapshot(scan->xs_snapshot, rel, *pagep);
*opaquep = HashPageGetOpaque(*pagep);
}
}
@@ -232,7 +229,6 @@ _hash_readprev(IndexScanDesc scan,
*bufp = _hash_getbuf(rel, blkno, HASH_READ,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
- TestForOldSnapshot(scan->xs_snapshot, rel, *pagep);
*opaquep = HashPageGetOpaque(*pagep);
/*
@@ -351,7 +347,6 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_READ, NULL);
PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot);
page = BufferGetPage(buf);
- TestForOldSnapshot(scan->xs_snapshot, rel, page);
opaque = HashPageGetOpaque(page);
bucket = opaque->hasho_bucket;
@@ -387,7 +382,6 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
old_buf = _hash_getbuf(rel, old_blkno, HASH_READ, LH_BUCKET_PAGE);
- TestForOldSnapshot(scan->xs_snapshot, rel, BufferGetPage(old_buf));
/*
* remember the split bucket buffer so as to use it later for
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index dc3c4074ed6..88a123d38a6 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -425,7 +425,6 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
lines = PageGetMaxOffsetNumber(page);
ntup = 0;
@@ -565,8 +564,6 @@ heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
/* Caller is responsible for ensuring buffer is locked if needed */
page = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-
*linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
if (ScanDirectionIsForward(dir))
@@ -598,8 +595,6 @@ heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
/* Caller is responsible for ensuring buffer is locked if needed */
page = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-
if (ScanDirectionIsForward(dir))
{
*lineoff = OffsetNumberNext(scan->rs_coffset);
@@ -864,7 +859,6 @@ heapgettup_pagemode(HeapScanDesc scan,
/* continue from previously returned page/tuple */
block = scan->rs_cblock; /* current page */
page = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
lineindex = scan->rs_cindex + dir;
if (ScanDirectionIsForward(dir))
@@ -884,7 +878,6 @@ heapgettup_pagemode(HeapScanDesc scan,
{
heapgetpage((TableScanDesc) scan, block);
page = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
linesleft = scan->rs_ntuples;
lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
@@ -1372,7 +1365,6 @@ heap_fetch(Relation relation,
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
- TestForOldSnapshot(snapshot, relation, page);
/*
* We'd better check for out-of-range offnum in case of VACUUM since the
@@ -1663,7 +1655,6 @@ heap_get_latest_tid(TableScanDesc sscan,
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
- TestForOldSnapshot(snapshot, relation, page);
/*
* Check for bogus item number. This is not treated as an error
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 47b9e209154..18193efa238 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -36,18 +36,6 @@ typedef struct
/* tuple visibility test, initialized for the relation */
GlobalVisState *vistest;
- /*
- * Thresholds set by TransactionIdLimitedForOldSnapshots() if they have
- * been computed (done on demand, and only if
- * OldSnapshotThresholdActive()). The first time a tuple is about to be
- * removed based on the limited horizon, old_snap_used is set to true, and
- * SetOldSnapshotThresholdTimestamp() is called. See
- * heap_prune_satisfies_vacuum().
- */
- TimestampTz old_snap_ts;
- TransactionId old_snap_xmin;
- bool old_snap_used;
-
TransactionId new_prune_xid; /* new prune hint value for page */
TransactionId snapshotConflictHorizon; /* latest xid removed */
int nredirected; /* numbers of entries in arrays below */
@@ -110,8 +98,6 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
Page page = BufferGetPage(buffer);
TransactionId prune_xid;
GlobalVisState *vistest;
- TransactionId limited_xmin = InvalidTransactionId;
- TimestampTz limited_ts = 0;
Size minfree;
/*
@@ -123,15 +109,6 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
return;
/*
- * XXX: Magic to keep old_snapshot_threshold tests appear "working". They
- * currently are broken, and discussion of what to do about them is
- * ongoing. See
- * https://www.postgresql.org/message-id/20200403001235.e6jfdll3gh2ygbuc%40alap3.anarazel.de
- */
- if (old_snapshot_threshold == 0)
- SnapshotTooOldMagicForTest();
-
- /*
* First check whether there's any chance there's something to prune,
* determining the appropriate horizon is a waste if there's no prune_xid
* (i.e. no updates/deletes left potentially dead tuples around).
@@ -143,35 +120,11 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* Check whether prune_xid indicates that there may be dead rows that can
* be cleaned up.
- *
- * It is OK to check the old snapshot limit before acquiring the cleanup
- * lock because the worst that can happen is that we are not quite as
- * aggressive about the cleanup (by however many transaction IDs are
- * consumed between this point and acquiring the lock). This allows us to
- * save significant overhead in the case where the page is found not to be
- * prunable.
- *
- * Even if old_snapshot_threshold is set, we first check whether the page
- * can be pruned without. Both because
- * TransactionIdLimitedForOldSnapshots() is not cheap, and because not
- * unnecessarily relying on old_snapshot_threshold avoids causing
- * conflicts.
*/
vistest = GlobalVisTestFor(relation);
if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
- {
- if (!OldSnapshotThresholdActive())
- return;
-
- if (!TransactionIdLimitedForOldSnapshots(GlobalVisTestNonRemovableHorizon(vistest),
- relation,
- &limited_xmin, &limited_ts))
- return;
-
- if (!TransactionIdPrecedes(prune_xid, limited_xmin))
- return;
- }
+ return;
/*
* We prune when a previous UPDATE failed to find enough space on the page
@@ -205,8 +158,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
int ndeleted,
nnewlpdead;
- ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin,
- limited_ts, &nnewlpdead, NULL);
+ ndeleted = heap_page_prune(relation, buffer, vistest,
+ &nnewlpdead, NULL);
/*
* Report the number of tuples reclaimed to pgstats. This is
@@ -249,9 +202,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
*
* vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
* (see heap_prune_satisfies_vacuum and
- * HeapTupleSatisfiesVacuum). old_snap_xmin / old_snap_ts need to
- * either have been set by TransactionIdLimitedForOldSnapshots, or
- * InvalidTransactionId/0 respectively.
+ * HeapTupleSatisfiesVacuum).
*
* Sets *nnewlpdead for caller, indicating the number of items that were
* newly set LP_DEAD during prune operation.
@@ -264,8 +215,6 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
int
heap_page_prune(Relation relation, Buffer buffer,
GlobalVisState *vistest,
- TransactionId old_snap_xmin,
- TimestampTz old_snap_ts,
int *nnewlpdead,
OffsetNumber *off_loc)
{
@@ -291,9 +240,6 @@ heap_page_prune(Relation relation, Buffer buffer,
prstate.new_prune_xid = InvalidTransactionId;
prstate.rel = relation;
prstate.vistest = vistest;
- prstate.old_snap_xmin = old_snap_xmin;
- prstate.old_snap_ts = old_snap_ts;
- prstate.old_snap_used = false;
prstate.snapshotConflictHorizon = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
memset(prstate.marked, 0, sizeof(prstate.marked));
@@ -481,19 +427,6 @@ heap_page_prune(Relation relation, Buffer buffer,
/*
* Perform visibility checks for heap pruning.
- *
- * This is more complicated than just using GlobalVisTestIsRemovableXid()
- * because of old_snapshot_threshold. We only want to increase the threshold
- * that triggers errors for old snapshots when we actually decide to remove a
- * row based on the limited horizon.
- *
- * Due to its cost we also only want to call
- * TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have
- * done so in heap_page_prune_opt() if pd_prune_xid was old enough. But we
- * still want to be able to remove rows that are too new to be removed
- * according to prstate->vistest, but that can be removed based on
- * old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on
- * demand in here, if appropriate.
*/
static HTSV_Result
heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
@@ -506,53 +439,8 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
if (res != HEAPTUPLE_RECENTLY_DEAD)
return res;
- /*
- * If we are already relying on the limited xmin, there is no need to
- * delay doing so anymore.
- */
- if (prstate->old_snap_used)
- {
- Assert(TransactionIdIsValid(prstate->old_snap_xmin));
-
- if (TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
- res = HEAPTUPLE_DEAD;
- return res;
- }
-
- /*
- * First check if GlobalVisTestIsRemovableXid() is sufficient to find the
- * row dead. If not, and old_snapshot_threshold is enabled, try to use the
- * lowered horizon.
- */
if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
res = HEAPTUPLE_DEAD;
- else if (OldSnapshotThresholdActive())
- {
- /* haven't determined limited horizon yet, requests */
- if (!TransactionIdIsValid(prstate->old_snap_xmin))
- {
- TransactionId horizon =
- GlobalVisTestNonRemovableHorizon(prstate->vistest);
-
- TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
- &prstate->old_snap_xmin,
- &prstate->old_snap_ts);
- }
-
- if (TransactionIdIsValid(prstate->old_snap_xmin) &&
- TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
- {
- /*
- * About to remove row based on snapshot_too_old. Need to raise
- * the threshold so problematic accesses would error.
- */
- Assert(!prstate->old_snap_used);
- SetOldSnapshotThresholdTimestamp(prstate->old_snap_ts,
- prstate->old_snap_xmin);
- prstate->old_snap_used = true;
- res = HEAPTUPLE_DEAD;
- }
- }
return res;
}
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 6a41ee635d3..1a05adfa61c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1588,7 +1588,7 @@ retry:
* that were deleted from indexes.
*/
tuples_deleted = heap_page_prune(rel, buf, vacrel->vistest,
- InvalidTransactionId, 0, &nnewlpdead,
+ &nnewlpdead,
&vacrel->offnum);
/*
@@ -2875,8 +2875,7 @@ should_attempt_truncation(LVRelState *vacrel)
{
BlockNumber possibly_freeable;
- if (!vacrel->do_rel_truncate || VacuumFailsafeActive ||
- old_snapshot_threshold >= 0)
+ if (!vacrel->do_rel_truncate || VacuumFailsafeActive)
return false;
possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 3230b3b8940..1799089fb45 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -278,7 +278,6 @@ _bt_moveright(Relation rel,
for (;;)
{
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
if (P_RIGHTMOST(opaque))
@@ -2029,7 +2028,6 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
/* step right one page */
so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(so->currPos.buf);
- TestForOldSnapshot(scan->xs_snapshot, rel, page);
opaque = BTPageGetOpaque(page);
/* check for deleted page */
if (!P_IGNORE(opaque))
@@ -2132,7 +2130,6 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
* and do it all again.
*/
page = BufferGetPage(so->currPos.buf);
- TestForOldSnapshot(scan->xs_snapshot, rel, page);
opaque = BTPageGetOpaque(page);
if (!P_IGNORE(opaque))
{
@@ -2238,7 +2235,6 @@ _bt_walk_left(Relation rel, Buffer buf, Snapshot snapshot)
CHECK_FOR_INTERRUPTS();
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
/*
@@ -2265,14 +2261,12 @@ _bt_walk_left(Relation rel, Buffer buf, Snapshot snapshot)
blkno = opaque->btpo_next;
buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
}
/* Return to the original page to see what's up */
buf = _bt_relandgetbuf(rel, buf, obknum, BT_READ);
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
if (P_ISDELETED(opaque))
{
@@ -2290,7 +2284,6 @@ _bt_walk_left(Relation rel, Buffer buf, Snapshot snapshot)
blkno = opaque->btpo_next;
buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
if (!P_ISDELETED(opaque))
break;
@@ -2351,7 +2344,6 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
return InvalidBuffer;
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
for (;;)
@@ -2371,7 +2363,6 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
RelationGetRelationName(rel));
buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
page = BufferGetPage(buf);
- TestForOldSnapshot(snapshot, rel, page);
opaque = BTPageGetOpaque(page);
}
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index cbfaf0c00ac..17cab0087fe 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -862,7 +862,6 @@ redirect:
/* else new pointer points to the same page, no work needed */
page = BufferGetPage(buffer);
- TestForOldSnapshot(snapshot, index, page);
isnull = SpGistPageStoresNulls(page) ? true : false;