aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThomas Munro <tmunro@postgresql.org>2023-07-04 15:16:34 +1200
committerThomas Munro <tmunro@postgresql.org>2023-07-04 15:21:36 +1200
commitaf8f9ec66bd19a920d02b4c0eb65b3dd6057d324 (patch)
treeb7a3dc5945cc2fa2bfeb7dc5b8e1e30d93c8550c /src
parent12529028a4e55eb0500477d2589eec625466facf (diff)
downloadpostgresql-af8f9ec66bd19a920d02b4c0eb65b3dd6057d324.tar.gz
postgresql-af8f9ec66bd19a920d02b4c0eb65b3dd6057d324.zip
Re-bin segment when memory pages are freed.
It's OK to be lazy about re-binning memory segments when allocating, because that can only leave segments in a bin that's too high. We'll search higher bins if necessary while allocating next time, and also eventually re-bin, so no memory can become unreachable that way. However, when freeing memory, the largest contiguous range of free pages might go up, so we should re-bin eagerly to make sure we don't leave the segment in a bin that is too low for get_best_segment() to find. The re-binning code is moved into a function of its own, so it can be called whenever free pages are returned to the segment's free page map. Back-patch to all supported releases. Author: Dongming Liu <ldming101@gmail.com> Reviewed-by: Robert Haas <robertmhaas@gmail.com> (earlier version) Reviewed-by: Thomas Munro <thomas.munro@gmail.com> Discussion: https://postgr.es/m/CAL1p7e8LzB2LSeAXo2pXCW4%2BRya9s0sJ3G_ReKOU%3DAjSUWjHWQ%40mail.gmail.com
Diffstat (limited to 'src')
-rw-r--r--src/backend/utils/mmgr/dsa.c65
1 files changed, 43 insertions, 22 deletions
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 7a3781466ed..8d1aace40ac 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -418,6 +418,7 @@ static dsa_area *attach_internal(void *place, dsm_segment *segment,
dsa_handle handle);
static void check_for_freed_segments(dsa_area *area);
static void check_for_freed_segments_locked(dsa_area *area);
+static void rebin_segment(dsa_area *area, dsa_segment_map *segment_map);
/*
* Create a new shared area in a new DSM segment. Further DSM segments will
@@ -869,7 +870,11 @@ dsa_free(dsa_area *area, dsa_pointer dp)
FreePageManagerPut(segment_map->fpm,
DSA_EXTRACT_OFFSET(span->start) / FPM_PAGE_SIZE,
span->npages);
+
+ /* Move segment to appropriate bin if necessary. */
+ rebin_segment(area, segment_map);
LWLockRelease(DSA_AREA_LOCK(area));
+
/* Unlink span. */
LWLockAcquire(DSA_SCLASS_LOCK(area, DSA_SCLASS_SPAN_LARGE),
LW_EXCLUSIVE);
@@ -1858,6 +1863,11 @@ destroy_superblock(dsa_area *area, dsa_pointer span_pointer)
segment_map->mapped_address = NULL;
}
}
+
+ /* Move segment to appropriate bin if necessary. */
+ if (segment_map->header != NULL)
+ rebin_segment(area, segment_map);
+
LWLockRelease(DSA_AREA_LOCK(area));
/*
@@ -2021,28 +2031,7 @@ get_best_segment(dsa_area *area, size_t npages)
/* Re-bin it if it's no longer in the appropriate bin. */
if (contiguous_pages < threshold)
{
- size_t new_bin;
-
- new_bin = contiguous_pages_to_segment_bin(contiguous_pages);
-
- /* Remove it from its current bin. */
- unlink_segment(area, segment_map);
-
- /* Push it onto the front of its new bin. */
- segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
- segment_map->header->next =
- area->control->segment_bins[new_bin];
- segment_map->header->bin = new_bin;
- area->control->segment_bins[new_bin] = segment_index;
- if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
- {
- dsa_segment_map *next;
-
- next = get_segment_by_index(area,
- segment_map->header->next);
- Assert(next->header->bin == new_bin);
- next->header->prev = segment_index;
- }
+ rebin_segment(area, segment_map);
/*
* But fall through to see if it's enough to satisfy this
@@ -2297,3 +2286,35 @@ check_for_freed_segments_locked(dsa_area *area)
area->freed_segment_counter = freed_segment_counter;
}
}
+
+/*
+ * Re-bin segment if it's no longer in the appropriate bin.
+ */
+static void
+rebin_segment(dsa_area *area, dsa_segment_map *segment_map)
+{
+ size_t new_bin;
+ dsa_segment_index segment_index;
+
+ new_bin = contiguous_pages_to_segment_bin(fpm_largest(segment_map->fpm));
+ if (segment_map->header->bin == new_bin)
+ return;
+
+ /* Remove it from its current bin. */
+ unlink_segment(area, segment_map);
+
+ /* Push it onto the front of its new bin. */
+ segment_index = get_segment_index(area, segment_map);
+ segment_map->header->prev = DSA_SEGMENT_INDEX_NONE;
+ segment_map->header->next = area->control->segment_bins[new_bin];
+ segment_map->header->bin = new_bin;
+ area->control->segment_bins[new_bin] = segment_index;
+ if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
+ {
+ dsa_segment_map *next;
+
+ next = get_segment_by_index(area, segment_map->header->next);
+ Assert(next->header->bin == new_bin);
+ next->header->prev = segment_index;
+ }
+}