Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/native/libs/System.IO.Compression.Native/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ set(NATIVECOMPRESSION_SOURCES
pal_zlib.c
)

if (HOST_WIN32 OR CLR_CMAKE_TARGET_WIN32)
list(APPEND NATIVECOMPRESSION_SOURCES "zlib_allocator_win.c")
else()
list(APPEND NATIVECOMPRESSION_SOURCES "zlib_allocator_unix.c")
endif()

if (NOT CLR_CMAKE_TARGET_BROWSER AND NOT CLR_CMAKE_TARGET_WASI)
set (NATIVECOMPRESSION_SOURCES
${NATIVECOMPRESSION_SOURCES}
Expand Down
4 changes: 4 additions & 0 deletions src/native/libs/System.IO.Compression.Native/pal_zlib.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#else
#include "pal_utilities.h"
#endif
#include <zlib_allocator.h>
#include <zlib.h>

c_static_assert(PAL_Z_NOFLUSH == Z_NO_FLUSH);
Expand Down Expand Up @@ -39,6 +40,9 @@ static int32_t Init(PAL_ZStream* stream)
{
z_stream* zStream = (z_stream*)calloc(1, sizeof(z_stream));

zStream->zalloc = z_custom_calloc;
zStream->zfree = z_custom_cfree;

stream->internalState = zStream;

if (zStream != NULL)
Expand Down
8 changes: 8 additions & 0 deletions src/native/libs/System.IO.Compression.Native/zlib_allocator.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <zconf.h> // voidpf

voidpf z_custom_calloc(voidpf opaque, unsigned items, unsigned size);

void z_custom_cfree(voidpf opaque, voidpf ptr);
151 changes: 151 additions & 0 deletions src/native/libs/System.IO.Compression.Native/zlib_allocator_unix.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <zconf.h>
#include <zlib_allocator.h>

/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
* (non-Windows version)
*
* 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero
* the memory before using it, preventing use of uninitialized memory within these structures.
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
* these data structures contain most of the metadata used for managing the variable-length
* dynamically allocated buffers.
*
* 2. We put a cookie both before and after any allocated memory, which allows us to detect local
* buffer overruns on the call to free(). The cookie values are tied to the addresses where
* the data is located in memory.
*
* 3. We trash the aforementioned cookie on free(), which allows us to detect double-free.
*
* If any of these checks fails, the application raises SIGABRT.
*/

#ifndef MEMORY_ALLOCATION_ALIGNMENT
// malloc() returns an address suitably aligned for any built-in data type.
// Historically, this has been twice the arch's natural word size.
#ifdef HOST_64BIT
#define MEMORY_ALLOCATION_ALIGNMENT 16
#else
#define MEMORY_ALLOCATION_ALIGNMENT 8
#endif
#endif

typedef struct _DOTNET_ALLOC_COOKIE
{
void* Address;
size_t Size;
} DOTNET_ALLOC_COOKIE;

static bool SafeAdd(size_t a, size_t b, size_t* sum)
{
if (SIZE_MAX - a >= b) { *sum = a + b; return true; }
else { *sum = 0; return false; }
}

static bool SafeMult(size_t a, size_t b, size_t* product)
{
if (SIZE_MAX / a >= b) { *product = a * b; return true; }
else { *product = 0; return false; }
}

static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc)
{
DOTNET_ALLOC_COOKIE vCookie;
memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE));
return vCookie;
}

static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie)
{
memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE));
}

// Historically, the memory allocator always returns addresses aligned to some
// particular boundary. We'll make that same guarantee here just in case somebody
// depends on it.
const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1);
const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);

voidpf z_custom_calloc(voidpf opaque, unsigned items, unsigned size)
{
(void)opaque; // unreferenced formal parameter

// If initializing a fixed-size structure, zero the memory.
bool fZeroMemory = (items == 1);

size_t cbRequested;
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
{
// multiplication can't overflow; no need for safeint
cbRequested = (size_t)items * (size_t)size;
}
else
{
// multiplication can overflow; go through safeint
if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; }
}

// Make sure the actual allocation has enough room for our frontside & backside cookies.
size_t cbActualAllocationSize;
if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; }

void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize);
if (pAlloced == NULL) { return NULL; } // OOM

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;
uint8_t* pTrailerCookie = pReturnToCaller + cbRequested;

// Write out the same cookie for the header & the trailer, then we're done.

DOTNET_ALLOC_COOKIE vCookie = { 0 };
vCookie.Address = pReturnToCaller;
vCookie.Size = cbRequested;
*pHeaderCookie = vCookie; // aligned
WriteAllocCookieUnaligned(pTrailerCookie, vCookie);

return pReturnToCaller;
}

static void zcfree_trash_cookie(void* pCookie)
{
memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE));
}

void z_custom_cfree(voidpf opaque, voidpf ptr)
{
(void)opaque; // unreferenced formal parameter

if (ptr == NULL) { return; } // ok to free nullptr

// Check cookie at beginning

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
if (pHeaderCookie->Address != ptr) { goto Fail; }
size_t cbRequested = pHeaderCookie->Size;

// Check cookie at end

uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested;
DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie);
if (vTrailerCookie.Address != ptr) { goto Fail; }
if (vTrailerCookie.Size != cbRequested) { goto Fail; }

// Checks passed - now trash the cookies and free memory

zcfree_trash_cookie(pHeaderCookie);
zcfree_trash_cookie(pTrailerCookie);

free(pHeaderCookie);
return;

Fail:
abort(); // cookie check failed
}
180 changes: 180 additions & 0 deletions src/native/libs/System.IO.Compression.Native/zlib_allocator_win.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <Windows.h>
#include <heapapi.h>
#include <intsafe.h>
#include <winnt.h>
#include <crtdbg.h> /* _ASSERTE */

#include <string.h>
#include <stdlib.h>
#include <zconf.h>
#include <zlib_allocator.h>

/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
* (Windows-specific version)
*
* 1. In 64-bit processes, we use a custom heap rather than relying on the standard process heap.
* This should cause zlib's buffers to go into a separate address range from the rest of app
* data, making it more difficult for buffer overruns to affect non-zlib-related data structures.
*
* 2. When zlib allocates fixed-length data structures for containing stream metadata, we zero
* the memory before using it, preventing use of uninitialized memory within these structures.
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
* these data structures contain most of the metadata used for managing the variable-length
* dynamically allocated buffers.
*
* 3. We put a cookie both before and after any allocated memory, which allows us to detect local
* buffer overruns on the call to free(). The cookie values are enciphered to make it more
* difficult for somebody to guess a correct value.
*
* 4. We trash the aforementioned cookie on free(), which allows us to detect double-free.
*
* If any of these checks fails, the application terminates immediately, optionally triggering a
* crash dump. We use a special code that's easy to search for in Watson.
*/

// Gets the special heap we'll allocate from.
HANDLE GetZlibHeap()
{
#ifdef _WIN64
static HANDLE s_hPublishedHeap = NULL;

// If already initialized, return immediately.
// We don't need a volatile read here since the publish is performed with release semantics.
if (s_hPublishedHeap != NULL) { return s_hPublishedHeap; }

// Attempt to create a new heap. The heap will be dynamically sized.
HANDLE hNewHeap = HeapCreate(0, 0, 0);

if (hNewHeap != NULL)
{
// We created a new heap. Attempt to publish it.
if (InterlockedCompareExchangePointer(&s_hPublishedHeap, hNewHeap, NULL) != NULL)
{
HeapDestroy(hNewHeap); // Somebody published before us. Destroy our heap.
hNewHeap = NULL; // Guard against accidental use later in the method.
}
}
else
{
// If we can't create a new heap, fall back to the process default heap.
InterlockedCompareExchangePointer(&s_hPublishedHeap, GetProcessHeap(), NULL);
}

// Some thread - perhaps us, perhaps somebody else - published the heap. Return it.
// We don't need a volatile read here since the publish is performed with release semantics.
_ASSERTE(s_hPublishedHeap != NULL);
return s_hPublishedHeap;
#else
// We don't want to create a new heap in a 32-bit process because it could end up
// reserving too much of the address space. Instead, fall back to the normal process heap.
return GetProcessHeap();
#endif
}

typedef struct _DOTNET_ALLOC_COOKIE
{
PVOID CookieValue;
union _Size
{
SIZE_T RawValue;
LPVOID EncodedValue;
} Size;
} DOTNET_ALLOC_COOKIE;

// Historically, the Windows memory allocator always returns addresses aligned to some
// particular boundary. We'll make that same guarantee here just in case somebody
// depends on it.
const SIZE_T DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((SIZE_T)MEMORY_ALLOCATION_ALIGNMENT - 1);
const SIZE_T DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);

voidpf z_custom_calloc(opaque, items, size)
voidpf opaque;
unsigned items;
unsigned size;
{
(void)opaque; // suppress C4100 - unreferenced formal parameter

// If initializing a fixed-size structure, zero the memory.
DWORD dwFlags = (items == 1) ? HEAP_ZERO_MEMORY : 0;

SIZE_T cbRequested;
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
{
// multiplication can't overflow; no need for safeint
cbRequested = (SIZE_T)items * (SIZE_T)size;
}
else
{
// multiplication can overflow; go through safeint
if (FAILED(SIZETMult(items, size, &cbRequested))) { return NULL; }
}

// Make sure the actual allocation has enough room for our frontside & backside cookies.
SIZE_T cbActualAllocationSize;
if (FAILED(SIZETAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize))) { return NULL; }

LPVOID pAlloced = HeapAlloc(GetZlibHeap(), dwFlags, cbActualAllocationSize);
if (pAlloced == NULL) { return NULL; } // OOM

// Now set the header & trailer cookies
DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
pHeaderCookie->CookieValue = EncodePointer(&pHeaderCookie->CookieValue);
pHeaderCookie->Size.RawValue = cbRequested;

LPBYTE pReturnToCaller = (LPBYTE)pHeaderCookie + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;

UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)(pReturnToCaller + cbRequested);
pTrailerCookie->CookieValue = EncodePointer(&pTrailerCookie->CookieValue);
pTrailerCookie->Size.EncodedValue = EncodePointer((PVOID)cbRequested);

return pReturnToCaller;
}

FORCEINLINE
void zcfree_trash_cookie(UNALIGNED DOTNET_ALLOC_COOKIE* pCookie)
{
memset(pCookie, 0, sizeof(*pCookie));
pCookie->CookieValue = (PVOID)(SIZE_T)0xDEADBEEF;
}

// Marked noinline to keep it on the call stack during crash reports.
DECLSPEC_NOINLINE
DECLSPEC_NORETURN
void zcfree_cookie_check_failed()
{
__fastfail(FAST_FAIL_HEAP_METADATA_CORRUPTION);
}

void z_custom_cfree(opaque, ptr)
voidpf opaque;
voidpf ptr;
{
(void)opaque; // suppress C4100 - unreferenced formal parameter

if (ptr == NULL) { return; } // ok to free nullptr

// Check cookie at beginning and end

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
if (DecodePointer(pHeaderCookie->CookieValue) != &pHeaderCookie->CookieValue) { goto Fail; }
SIZE_T cbRequested = pHeaderCookie->Size.RawValue;

UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr + cbRequested);
if (DecodePointer(pTrailerCookie->CookieValue) != &pTrailerCookie->CookieValue) { goto Fail; }
if (DecodePointer(pTrailerCookie->Size.EncodedValue) != (LPVOID)cbRequested) { goto Fail; }

// Checks passed - now trash the cookies and free memory

zcfree_trash_cookie(pHeaderCookie);
zcfree_trash_cookie(pTrailerCookie);

if (!HeapFree(GetZlibHeap(), 0, pHeaderCookie)) { goto Fail; }
return;

Fail:
zcfree_cookie_check_failed();
}
Loading