aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/common/indextuple.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/common/indextuple.c')
-rw-r--r--src/backend/access/common/indextuple.c69
1 files changed, 65 insertions, 4 deletions
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 6a22b172036..32c0ebb93a4 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -418,19 +418,80 @@ nocache_index_getattr(IndexTuple tup,
*
* The caller must allocate sufficient storage for the output arrays.
* (INDEX_MAX_KEYS entries should be enough.)
+ *
+ * This is nearly the same as heap_deform_tuple(), but for IndexTuples.
+ * One difference is that the tuple should never have any missing columns.
*/
void
index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor,
Datum *values, bool *isnull)
{
- int i;
+ int hasnulls = IndexTupleHasNulls(tup);
+ int natts = tupleDescriptor->natts; /* number of atts to extract */
+ int attnum;
+ char *tp; /* ptr to tuple data */
+ int off; /* offset in tuple data */
+ bits8 *bp; /* ptr to null bitmap in tuple */
+ bool slow = false; /* can we use/set attcacheoff? */
/* Assert to protect callers who allocate fixed-size arrays */
- Assert(tupleDescriptor->natts <= INDEX_MAX_KEYS);
+ Assert(natts <= INDEX_MAX_KEYS);
+
+ /* XXX "knows" t_bits are just after fixed tuple header! */
+ bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData));
- for (i = 0; i < tupleDescriptor->natts; i++)
+ tp = (char *) tup + IndexInfoFindDataOffset(tup->t_info);
+ off = 0;
+
+ for (attnum = 0; attnum < natts; attnum++)
{
- values[i] = index_getattr(tup, i + 1, tupleDescriptor, &isnull[i]);
+ Form_pg_attribute thisatt = TupleDescAttr(tupleDescriptor, attnum);
+
+ if (hasnulls && att_isnull(attnum, bp))
+ {
+ values[attnum] = (Datum) 0;
+ isnull[attnum] = true;
+ slow = true; /* can't use attcacheoff anymore */
+ continue;
+ }
+
+ isnull[attnum] = false;
+
+ if (!slow && thisatt->attcacheoff >= 0)
+ off = thisatt->attcacheoff;
+ else if (thisatt->attlen == -1)
+ {
+ /*
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
+ */
+ if (!slow &&
+ off == att_align_nominal(off, thisatt->attalign))
+ thisatt->attcacheoff = off;
+ else
+ {
+ off = att_align_pointer(off, thisatt->attalign, -1,
+ tp + off);
+ slow = true;
+ }
+ }
+ else
+ {
+ /* not varlena, so safe to use att_align_nominal */
+ off = att_align_nominal(off, thisatt->attalign);
+
+ if (!slow)
+ thisatt->attcacheoff = off;
+ }
+
+ values[attnum] = fetchatt(thisatt, tp + off);
+
+ off = att_addlength_pointer(off, thisatt->attlen, tp + off);
+
+ if (thisatt->attlen <= 0)
+ slow = true; /* can't use attcacheoff anymore */
}
}