Skip to content

Commit 71afaf8

Browse files
committed
Implement new heuristics
1 parent 879f6d4 commit 71afaf8

File tree

6 files changed

+95
-89
lines changed

6 files changed

+95
-89
lines changed

NEWS.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ Compiler/Runtime improvements
3030

3131
* The `@pure` macro is now deprecated. Use `Base.@assume_effects :foldable` instead ([#48682]).
3232
* The mark phase of the Garbage Collector is now multi-threaded ([#48600]).
33+
* Updated GC heuristics to count allocated pages instead of individual objects ([#50144]).
3334
* [JITLink](https://llvm.org/docs/JITLink.html) is enabled by default on Linux aarch64 when Julia is linked to LLVM 15 or later versions ([#49745]).
3435
This should resolve many segmentation faults previously observed on this platform.
3536

doc/src/devdocs/gc.md

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,10 @@ This scheme eliminates the need of explicitly keeping a flag to indicate a full
6767
## Heuristics
6868

6969
GC heuristics tune the GC by changing the size of the allocation interval between garbage collections.
70-
If a GC was unproductive, then we increase the size of the allocation interval to allow objects more time to die.
71-
If a GC returns a lot of space we can shrink the interval. The goal is to find a steady state where we are
72-
allocating just about the same amount as we are collecting.
70+
71+
The GC heuristics measure how big the heap size is after a collection and set the next collection to when the heap size is twice as big as the current size or to the maximum heap size.
72+
The heuristics measure the heap size by counting the number of pages that are in use and the objects that use malloc. Previously we measured the heap size by counting
73+
the alive objects, but that doesn't take into account fragmentation which could lead to bad decisions, that also meant that we used thread local information (allocations) to make
74+
decisions about a process wide (when to GC), measuring pages means the decision is global.
75+
76+
The GC will do full collections when the heap size reaches 80% of the maximum allowed size.

src/gc-debug.c

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
// This file is a part of Julia. License is MIT: https://julialang.org/license
22

33
#include "gc.h"
4+
#include "julia.h"
45
#include <inttypes.h>
6+
#include <stddef.h>
7+
#include <stdint.h>
58
#include <stdio.h>
69

710
// re-include assert.h without NDEBUG,
@@ -1224,7 +1227,7 @@ JL_DLLEXPORT void jl_enable_gc_logging(int enable) {
12241227
gc_logging_enabled = enable;
12251228
}
12261229

1227-
void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect) JL_NOTSAFEPOINT {
1230+
void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect, int64_t live_bytes) JL_NOTSAFEPOINT {
12281231
if (!gc_logging_enabled) {
12291232
return;
12301233
}
@@ -1233,6 +1236,21 @@ void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect
12331236
full ? "full" : "incr",
12341237
recollect ? "recollect" : ""
12351238
);
1239+
1240+
jl_safe_printf("Heap stats: bytes_mapped %.1f, bytes_decomitted %.1f, bytes_allocd %.1f\nbytes_freed %.1f, bytes_mallocd %.1f, malloc_bytes_freed %.1f\npages_perm_allocd %zu, heap_size %.1f, heap_target %.1f, live_bytes %1.f\n",
1241+
jl_atomic_load_relaxed(&gc_heap_stats.bytes_mapped)/1e6,
1242+
jl_atomic_load_relaxed(&gc_heap_stats.bytes_decomitted)/1e6,
1243+
jl_atomic_load_relaxed(&gc_heap_stats.bytes_allocd)/1e6,
1244+
jl_atomic_load_relaxed(&gc_heap_stats.bytes_freed)/1e6,
1245+
jl_atomic_load_relaxed(&gc_heap_stats.bytes_mallocd)/1e6,
1246+
jl_atomic_load_relaxed(&gc_heap_stats.malloc_bytes_freed)/1e6,
1247+
jl_atomic_load_relaxed(&gc_heap_stats.pages_perm_allocd),
1248+
jl_atomic_load_relaxed(&gc_heap_stats.heap_size)/1e6,
1249+
jl_atomic_load_relaxed(&gc_heap_stats.heap_target)/1e6,
1250+
live_bytes/1e6
1251+
);
1252+
double bytes_mapped = (jl_atomic_load_relaxed(&gc_heap_stats.bytes_mapped) - jl_atomic_load_relaxed(&gc_heap_stats.bytes_decomitted) + jl_atomic_load_relaxed(&gc_heap_stats.bytes_mallocd) - jl_atomic_load_relaxed(&gc_heap_stats.malloc_bytes_freed))/1e6;
1253+
jl_safe_printf("Fragmentation %f, mapped_bytes %1.f\n", (double)live_bytes/(double)jl_atomic_load_relaxed(&gc_heap_stats.heap_size), bytes_mapped);
12361254
}
12371255

12381256
#ifdef __cplusplus

src/gc-pages.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
5252
// round data pointer up to the nearest gc_page_data-aligned
5353
// boundary if mmap didn't already do so.
5454
mem = (char*)gc_page_data(mem + GC_PAGE_SZ - 1);
55+
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_mapped, pages_sz);
5556
return mem;
5657
}
5758

@@ -180,6 +181,7 @@ void jl_gc_free_page(jl_gc_pagemeta_t *pg) JL_NOTSAFEPOINT
180181
madvise(p, decommit_size, MADV_DONTNEED);
181182
#endif
182183
msan_unpoison(p, decommit_size);
184+
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_decomitted, GC_PAGE_SZ);
183185
}
184186

185187
#ifdef __cplusplus

0 commit comments

Comments
 (0)