Skip to content

Commit

Permalink
Reduce memory usage when indexing Hypercore TAM
Browse files Browse the repository at this point in the history
When building indexes over a table using Hypercore TAM, some memory
for decompressing data wasn't released until the index build
completes. This change optimizes the memory usage during index builds
to release memory after every compressed batch has been processed.
  • Loading branch information
erimatnor committed Mar 5, 2025
1 parent 910d8d4 commit c16bd7e
Showing 1 changed file with 19 additions and 3 deletions.
22 changes: 19 additions & 3 deletions tsl/src/hypercore/hypercore_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -2790,6 +2790,7 @@ typedef struct IndexBuildCallbackState
Bitmapset *orderby_cols;
bool is_segmentby_index;
MemoryContext decompression_mcxt;
MemoryContext batch_mcxt;
ArrowArray **arrow_columns;
} IndexBuildCallbackState;

Expand Down Expand Up @@ -2829,6 +2830,9 @@ hypercore_index_build_callback(Relation index, ItemPointer tid, Datum *values, b
* the actual number of rows to indexing
* only one row per segment. */

MemoryContext old_mcxt = MemoryContextSwitchTo(icstate->batch_mcxt);
MemoryContextReset(icstate->batch_mcxt);

/* Update ntuples for accurate statistics. When building the index, the
* relation's reltuples is updated based on this count. */
if (tupleIsAlive)
Expand Down Expand Up @@ -2878,7 +2882,7 @@ hypercore_index_build_callback(Relation index, ItemPointer tid, Datum *values, b
TupleDescAttr(tupdesc, AttrNumberGetAttrOffset(attno));
icstate->arrow_columns[i] = arrow_from_compressed(values[i],
attr->atttypid,
CurrentMemoryContext,
icstate->batch_mcxt,
icstate->decompression_mcxt);

/* The number of elements in the arrow array should be the
Expand Down Expand Up @@ -2911,7 +2915,7 @@ hypercore_index_build_callback(Relation index, ItemPointer tid, Datum *values, b
/* The slot is a table slot, not index slot. But we only fill in the
* columns needed for the index and predicate checks. Therefore, make sure
* other columns are initialized to "null" */
memset(slot->tts_isnull, true, sizeof(bool) * slot->tts_tupleDescriptor->natts);
MemSet(slot->tts_isnull, true, sizeof(bool) * slot->tts_tupleDescriptor->natts);
ExecClearTuple(slot);

for (int colnum = 0; colnum < natts; colnum++)
Expand Down Expand Up @@ -2950,6 +2954,9 @@ hypercore_index_build_callback(Relation index, ItemPointer tid, Datum *values, b
hypercore_tid_encode(&index_tid, tid, rownum + 1);
Assert(!icstate->is_segmentby_index || rownum == 0);

/* Reset memory for predicate checks */
MemoryContextReset(icstate->econtext->ecxt_per_tuple_memory);

/*
* In a partial index, discard tuples that don't satisfy the
* predicate.
Expand All @@ -2963,8 +2970,13 @@ hypercore_index_build_callback(Relation index, ItemPointer tid, Datum *values, b
continue;
}

/* Call the original callback on the original memory context */
MemoryContextSwitchTo(old_mcxt);
icstate->callback(index, &index_tid, values, isnull, tupleIsAlive, icstate->orig_state);
MemoryContextSwitchTo(icstate->batch_mcxt);
}

MemoryContextSwitchTo(old_mcxt);
}

/*
Expand Down Expand Up @@ -3125,8 +3137,11 @@ hypercore_index_build_range_scan(Relation relation, Relation indexRelation, Inde
.index_info = indexInfo,
.tuple_index = -1,
.ntuples = 0,
.batch_mcxt = AllocSetContextCreate(CurrentMemoryContext,
"Compressed batch for index build",
ALLOCSET_DEFAULT_SIZES),
.decompression_mcxt = AllocSetContextCreate(CurrentMemoryContext,
"bulk decompression",
"Bulk decompression for index build",
/* minContextSize = */ 0,
/* initBlockSize = */ 64 * 1024,
/* maxBlockSize = */ 64 * 1024),
Expand Down Expand Up @@ -3265,6 +3280,7 @@ hypercore_index_build_range_scan(Relation relation, Relation indexRelation, Inde
FreeExecutorState(icstate.estate);
ExecDropSingleTupleTableSlot(icstate.slot);
MemoryContextDelete(icstate.decompression_mcxt);
MemoryContextDelete(icstate.batch_mcxt);
pfree((void *) icstate.arrow_columns);
bms_free(icstate.segmentby_cols);
bms_free(icstate.orderby_cols);
Expand Down

0 comments on commit c16bd7e

Please sign in to comment.