Skip to content

Commit 5ff01bd

Browse files
mattborkandrewrk
authored andcommitted
gpa: fix memory limit accounting for large allocations
1 parent 544d7d9 commit 5ff01bd

File tree

1 file changed

+59
-40
lines changed

1 file changed

+59
-40
lines changed

lib/std/heap/general_purpose_allocator.zig

Lines changed: 59 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
210210

211211
const LargeAlloc = struct {
212212
bytes: []u8,
213+
requested_size: if (config.enable_memory_limit) usize else void,
213214
stack_addresses: [trace_n][stack_n]usize,
214215
freed: if (config.retain_metadata) bool else void,
215216
ptr_align: if (config.never_unmap and config.retain_metadata) u29 else void,
@@ -528,13 +529,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
528529
if (config.retain_metadata and entry.value_ptr.freed) {
529530
if (config.safety) {
530531
reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
531-
if (new_size == 0) {
532-
// Recoverable. Restore self.total_requested_bytes if needed.
533-
if (config.enable_memory_limit) {
534-
self.total_requested_bytes += old_mem.len;
535-
}
532+
// Recoverable if this is a free.
533+
if (new_size == 0)
536534
return @as(usize, 0);
537-
}
538535
@panic("Unrecoverable double free");
539536
} else {
540537
unreachable;
@@ -556,7 +553,29 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
556553
});
557554
}
558555

559-
const result_len = if (config.never_unmap and new_size == 0) 0 else try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
556+
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
557+
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
558+
// that is impossible to guarantee after calling backing_allocator.resizeFn.
559+
const prev_req_bytes = self.total_requested_bytes;
560+
if (config.enable_memory_limit) {
561+
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
562+
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
563+
return error.OutOfMemory;
564+
}
565+
self.total_requested_bytes = new_req_bytes;
566+
}
567+
errdefer if (config.enable_memory_limit) {
568+
self.total_requested_bytes = prev_req_bytes;
569+
};
570+
571+
const result_len = if (config.never_unmap and new_size == 0)
572+
0
573+
else
574+
try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
575+
576+
if (config.enable_memory_limit) {
577+
entry.value_ptr.requested_size = new_size;
578+
}
560579

561580
if (result_len == 0) {
562581
if (config.verbose_log) {
@@ -599,18 +618,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
599618
const held = self.mutex.acquire();
600619
defer held.release();
601620

602-
const prev_req_bytes = self.total_requested_bytes;
603-
if (config.enable_memory_limit) {
604-
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
605-
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
606-
return error.OutOfMemory;
607-
}
608-
self.total_requested_bytes = new_req_bytes;
609-
}
610-
errdefer if (config.enable_memory_limit) {
611-
self.total_requested_bytes = prev_req_bytes;
612-
};
613-
614621
assert(old_mem.len != 0);
615622

616623
const aligned_size = math.max(old_mem.len, old_align);
@@ -651,19 +658,28 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
651658
if (!is_used) {
652659
if (config.safety) {
653660
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
654-
if (new_size == 0) {
655-
// Recoverable. Restore self.total_requested_bytes if needed, as we
656-
// don't return an error value so the errdefer above does not run.
657-
if (config.enable_memory_limit) {
658-
self.total_requested_bytes = prev_req_bytes;
659-
}
661+
// Recoverable if this is a free.
662+
if (new_size == 0)
660663
return @as(usize, 0);
661-
}
662664
@panic("Unrecoverable double free");
663665
} else {
664666
unreachable;
665667
}
666668
}
669+
670+
// Definitely an in-use small alloc now.
671+
const prev_req_bytes = self.total_requested_bytes;
672+
if (config.enable_memory_limit) {
673+
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
674+
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
675+
return error.OutOfMemory;
676+
}
677+
self.total_requested_bytes = new_req_bytes;
678+
}
679+
errdefer if (config.enable_memory_limit) {
680+
self.total_requested_bytes = prev_req_bytes;
681+
};
682+
667683
if (new_size == 0) {
668684
// Capture stack trace to be the "first free", in case a double free happens.
669685
bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
@@ -745,21 +761,15 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
745761
const held = self.mutex.acquire();
746762
defer held.release();
747763

764+
if (!self.isAllocationAllowed(len)) {
765+
return error.OutOfMemory;
766+
}
767+
748768
const new_aligned_size = math.max(len, ptr_align);
749769
if (new_aligned_size > largest_bucket_object_size) {
750770
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
751-
752771
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
753772

754-
// The backing allocator may return a memory block bigger than
755-
// `len`, use the effective size for bookkeeping purposes
756-
if (!self.isAllocationAllowed(slice.len)) {
757-
// Free the block so no memory is leaked
758-
const new_len = try self.backing_allocator.resizeFn(self.backing_allocator, slice, ptr_align, 0, 0, ret_addr);
759-
assert(new_len == 0);
760-
return error.OutOfMemory;
761-
}
762-
763773
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
764774
if (config.retain_metadata and !config.never_unmap) {
765775
// Backing allocator may be reusing memory that we're retaining metadata for
@@ -768,6 +778,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
768778
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
769779
}
770780
gop.value_ptr.bytes = slice;
781+
if (config.enable_memory_limit)
782+
gop.value_ptr.requested_size = len;
771783
gop.value_ptr.captureStackTrace(ret_addr, .alloc);
772784
if (config.retain_metadata) {
773785
gop.value_ptr.freed = false;
@@ -782,10 +794,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
782794
return slice;
783795
}
784796

785-
if (!self.isAllocationAllowed(len)) {
786-
return error.OutOfMemory;
787-
}
788-
789797
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
790798
const ptr = try self.allocSlot(new_size_class, ret_addr);
791799
if (config.verbose_log) {
@@ -1183,3 +1191,14 @@ test "double frees" {
11831191
try std.testing.expect(gpa.large_allocations.contains(@ptrToInt(normal_large.ptr)));
11841192
try std.testing.expect(!gpa.large_allocations.contains(@ptrToInt(large.ptr)));
11851193
}
1194+
1195+
test "bug 9995 fix, large allocs count requested size not backing size" {
1196+
// with AtLeast, buffer likely to be larger than requested, especially when shrinking
1197+
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
1198+
var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
1199+
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
1200+
buf = try gpa.allocator.reallocAtLeast(buf, 1);
1201+
try std.testing.expect(gpa.total_requested_bytes == 1);
1202+
buf = try gpa.allocator.reallocAtLeast(buf, 2);
1203+
try std.testing.expect(gpa.total_requested_bytes == 2);
1204+
}

0 commit comments

Comments
 (0)