1010#include <mono/utils/mono-os-mutex.h>
1111
1212// #define MWPM_LOGGING
13+ // #define MWPM_STATS
1314
1415typedef enum {
1516 MWPM_UNINITIALIZED = 0 ,
1617 MWPM_INITIALIZING = 1 ,
1718 MWPM_INITIALIZED = 2
1819} init_state ;
1920
21+ typedef enum {
22+ MWPM_MARK_DEAD_PAGES ,
23+ MWPM_MARK_NEW_PAGES ,
24+ MWPM_FREE_TO_ALLOCATED ,
25+ MWPM_FREE_TO_ALLOCATED_ZEROED ,
26+ MWPM_ALLOCATED_TO_FREE ,
27+ } page_action ;
28+
29+ #define is_page_free (state ) (state & MWPM_FREE_BIT)
30+ #define is_page_owned (state ) (state & MWPM_STATE_MASK)
31+ #define is_page_in_use (state ) ((state & MWPM_STATE_MASK) == MWPM_ALLOCATED)
32+ #define get_page_skip_count (state ) (state & MWPM_SKIP_MASK) + 1
33+
34+ typedef uint8_t mwpm_page_state ;
35+
2036static mono_mutex_t mutex ;
2137static uint8_t page_table [MWPM_MAX_PAGES ];
2238static gint32 is_initialized = MWPM_UNINITIALIZED ;
@@ -54,49 +70,74 @@ last_page_of_range (void *addr, size_t size) {
5470 return first_page + page_count_rounded_up - 1 ;
5571}
5672
57- // returns the number of pages in the range that were successfully transitioned.
58- static uint32_t
59- transition_page_states (mwpm_page_state from_state , mwpm_page_state to_state , uint32_t first_page , uint32_t page_count ) {
73+ static inline mwpm_page_state
74+ encode_page_state (uint8_t bits , uint32_t skip_count ) {
75+ if (skip_count > MWPM_SKIP_MASK )
76+ skip_count = MWPM_SKIP_MASK ;
77+
78+ return (bits & MWPM_STATE_MASK ) | (skip_count & MWPM_SKIP_MASK );
79+ }
80+
81+ static void
82+ transition_page_states (page_action action , uint32_t first_page , uint32_t page_count ) {
6083 if (page_count == 0 )
61- return 0 ;
84+ return ;
6285
63- if (first_page >= MWPM_MAX_PAGES )
64- return 0 ;
86+ g_assert (first_page < MWPM_MAX_PAGES );
6587
6688 uint32_t last_page = first_page + (page_count - 1 );
6789 g_assert (last_page >= first_page );
6890
69- if (last_page >= MWPM_MAX_PAGES )
70- return 0 ;
91+ g_assert (last_page < MWPM_MAX_PAGES );
7192
72- uint32_t result = 0 ;
7393 // POSIX specifies that munmap () on an address range that isn't mapped has no,
7494 // effect, so we need to make sure that it's harmless to try and unmap pages we
7595 // don't control. We can't use memset since it might trample UNKNOWN pages.
76- for (uint32_t i = first_page ; i <= last_page ; i ++ ) {
96+ for (uint32_t i = first_page , skip_value = page_count - 1 ; i <= last_page ; i ++ ) {
7797 mwpm_page_state page_state = page_table [i ];
78- // Normalize skip data
79- if (page_state > MWPM_UNKNOWN )
80- page_state = MWPM_UNKNOWN ;
8198
82- if (page_state != from_state )
83- continue ;
84-
85- page_table [i ] = to_state ;
86- result ++ ;
99+ // TODO: Remove the duplication in here
100+ switch (action ) {
101+ case MWPM_MARK_DEAD_PAGES :
102+ g_assert (!is_page_owned (page_state ));
103+ page_table [i ] = encode_page_state (MWPM_EXTERNAL , skip_value -- );
104+ break ;
105+ case MWPM_MARK_NEW_PAGES :
106+ g_assert (!is_page_owned (page_state ));
107+ page_table [i ] = encode_page_state (MWPM_FREE_ZEROED , skip_value -- );
108+ break ;
109+ case MWPM_FREE_TO_ALLOCATED :
110+ g_assert (is_page_free (page_state ));
111+ page_table [i ] = encode_page_state (MWPM_ALLOCATED , skip_value -- );
112+ break ;
113+ case MWPM_FREE_TO_ALLOCATED_ZEROED :
114+ g_assert (is_page_free (page_state ));
115+ page_table [i ] = encode_page_state (MWPM_ALLOCATED , skip_value -- );
116+ if (!(page_state & MWPM_META_BIT ))
117+ // TODO: Don't recalculate the address from scratch each time
118+ memset (address_from_page_index (i ), 0 , MWPM_PAGE_SIZE );
119+ break ;
120+ case MWPM_ALLOCATED_TO_FREE :
121+ // FIXME: Can we generate correct skip_value here? This is used
122+ // by munmap, which is valid to call even on pages that are not mapped
123+ if (is_page_in_use (page_state ))
124+ page_table [i ] = encode_page_state (MWPM_FREE_DIRTY , 0 );
125+ break ;
126+ default :
127+ g_assert_not_reached ();
128+ break ;
129+ }
87130 }
88-
89- return result ;
90131}
91132
92133static void
93134print_stats () {
94- #ifdef MWPM_LOGGING
135+ #if defined( MWPM_LOGGING ) || defined( MWPM_STATS )
95136 uint32_t in_use = 0 , free = 0 , unallocated = 0 ,
96137 max_run = 0 , current_run = 0 ;
97138
98139 for (uint32_t i = first_controlled_page_index ; i <= last_controlled_page_index ; i ++ ) {
99- switch (page_table [i ]) {
140+ switch (page_table [i ] & MWPM_STATE_MASK ) {
100141 case MWPM_ALLOCATED :
101142 in_use ++ ;
102143 current_run = 0 ;
@@ -125,28 +166,6 @@ print_stats () {
125166#endif
126167}
127168
128- static void
129- optimize_unknown_pages (uint8_t * start , uint8_t * end ) {
130- g_assert (end > start );
131-
132- uint32_t first_page = first_page_from_address (start ),
133- page_count = page_count_from_size (end - start );
134-
135- for (uint32_t i = 0 , skip_count = page_count - 1 ; i < page_count ; i ++ , skip_count -- ) {
136- uint32_t j = i + first_page , skip_value = MWPM_UNKNOWN + skip_count ;
137- if (skip_value > 255 )
138- skip_value = 255 ;
139- g_assert (page_table [j ] >= MWPM_UNKNOWN );
140- g_print (
141- "#%u = %u " ,
142- j , skip_value
143- );
144- page_table [j ] = skip_value ;
145- }
146-
147- g_print ("\n" );
148- }
149-
150169static void *
151170acquire_new_pages_initialized (uint32_t page_count ) {
152171 if (page_count < 1 )
@@ -175,7 +194,11 @@ acquire_new_pages_initialized (uint32_t page_count) {
175194 recovered_bytes = allocation - prev_waste_start ;
176195 allocation = prev_waste_start ;
177196 } else {
178- optimize_unknown_pages (prev_waste_end , allocation );
197+ // Update the dead pages that were allocated by someone else via sbrk()
198+ // so that they have skip data
199+ uint32_t first_dead_page = first_page_from_address (prev_waste_end ),
200+ dead_page_count = page_count_from_size (allocation - prev_waste_end );
201+ transition_page_states (MWPM_MARK_DEAD_PAGES , first_dead_page , dead_page_count );
179202 }
180203
181204 uint8_t * result = allocation ;
@@ -205,9 +228,8 @@ acquire_new_pages_initialized (uint32_t page_count) {
205228 }
206229
207230 // g_print ("mwpm allocated %u bytes (%u pages) starting at @%u (%u recovered)\n", (uint32_t)bytes, page_count, (uint32_t)allocation, recovered_bytes);
208- uint32_t pages_transitioned = transition_page_states (MWPM_UNKNOWN , MWPM_FREE_ZEROED , first_page_index , page_count );
231+ transition_page_states (MWPM_MARK_NEW_PAGES , first_page_index , page_count );
209232 print_stats ();
210- g_assert (pages_transitioned == page_count );
211233 last_controlled_page_index = last_page_index ;
212234 return result ;
213235}
@@ -217,7 +239,23 @@ free_pages_initialized (uint32_t first_page, uint32_t page_count) {
217239 // expected behavior: freeing UNKNOWN pages leaves them unknown.
218240 // freeing FREE_ZEROED pages leaves them zeroed.
219241 // freeing ALLOCATED or FREE_DIRTY pages makes them FREE_DIRTY.
220- transition_page_states (MWPM_ALLOCATED , MWPM_FREE_DIRTY , first_page , page_count );
242+ transition_page_states (MWPM_ALLOCATED_TO_FREE , first_page , page_count );
243+ }
244+
245+ static inline const char *
246+ get_state_name (uint8_t state ) {
247+ switch (state & MWPM_STATE_MASK ) {
248+ case MWPM_EXTERNAL :
249+ return "external" ;
250+ case MWPM_FREE_DIRTY :
251+ return "dirty" ;
252+ case MWPM_FREE_ZEROED :
253+ return "zeroed" ;
254+ case MWPM_ALLOCATED :
255+ return "in use" ;
256+ default :
257+ g_assert_not_reached ();
258+ }
221259}
222260
223261static uint32_t
@@ -233,22 +271,27 @@ find_n_free_pages_in_range (uint32_t start_scan_where, uint32_t end_scan_where,
233271 if (j > last_controlled_page_index )
234272 break ;
235273
274+ // TODO: If we find a free page with a skip count in it, that would indicate
275+ // that there are N sequential free pages left we can claim without doing
276+ // the scan below.
277+
236278 // Scan backwards from the last candidate page to look for any non-free pages
237279 // the first non-free page we find is the next place we will search from.
238280 for (; j >= i ; j -- ) {
239281 mwpm_page_state page_state = page_table [j ];
240- if (page_state > MWPM_UNKNOWN ) {
282+
283+ if (!is_page_free (page_state )) {
241284 // Skip multiple pages
242- uint32_t skip_count = page_state - MWPM_UNKNOWN ;
285+ uint32_t skip_count = get_page_skip_count ( page_state ) ;
243286 i = j + skip_count ;
244- g_print (
245- "scan skipping %u unknown page(s); new page is #%u with state %u\n" ,
246- skip_count , i , page_table [ i ]
247- );
248- found_obstruction = 1 ;
249- break ;
250- } else if ( page_state >= MWPM_ALLOCATED ) {
251- i = j + 1 ;
287+ #ifdef MWPM_LOGGING
288+ if ( skip_count > 1 )
289+ g_print (
290+ "scan skipping %u %s page(s); new page is #%u with state %s\n" ,
291+ skip_count , get_state_name ( page_state ),
292+ i , get_state_name ( page_table [ i ])
293+ );
294+ #endif
252295 found_obstruction = 1 ;
253296 break ;
254297 }
@@ -282,7 +325,7 @@ mwpm_init () {
282325 mono_os_mutex_init_recursive (& mutex );
283326 // Set the entire page table to 'unknown state'. As we acquire pages from sbrk, we will
284327 // set those respective ranges in the table to a known state.
285- memset (page_table , MWPM_UNKNOWN , sizeof (page_table ));
328+ memset (page_table , MWPM_EXTERNAL , sizeof (page_table ));
286329 void * first_controlled_page_address = acquire_new_pages_initialized (MWPM_MINIMUM_PAGE_COUNT );
287330 g_assert (first_controlled_page_address );
288331 first_controlled_page_index = first_page_from_address (first_controlled_page_address );
@@ -339,23 +382,8 @@ mwpm_alloc_range (size_t size, uint8_t zeroed) {
339382 if (!result )
340383 goto exit ;
341384
342- uint32_t first_result_page = first_page_from_address (result ),
343- zeroed_pages = transition_page_states (MWPM_FREE_ZEROED , MWPM_ALLOCATED , first_result_page , page_count ),
344- nonzeroed_pages = 0 ;
345- // FIXME: Do this in one pass instead of two
346- if (zeroed_pages != page_count ) {
347- // g_print ("only %u of %u page(s) were zeroed\n", zeroed_pages, page_count);
348- // If we got here, not all of the pages in our allocation were in FREE_ZEROED state, so we need to
349- // zero at least one of them.
350- if (zeroed ) {
351- // g_print ("mwpm zeroing %u bytes at %u\n", size, (uint32_t)result);
352- // FIXME: Only zero the dirty pages instead of the whole region.
353- memset (result , 0 , size );
354- }
355- }
356- nonzeroed_pages = transition_page_states (MWPM_FREE_DIRTY , MWPM_ALLOCATED , first_result_page , page_count );
357-
358- g_assert ((nonzeroed_pages + zeroed_pages ) == page_count );
385+ uint32_t first_result_page = first_page_from_address (result );
386+ transition_page_states (zeroed ? MWPM_FREE_TO_ALLOCATED_ZEROED : MWPM_FREE_TO_ALLOCATED , first_result_page , page_count );
359387
360388#ifdef MWPM_LOGGING
361389 g_print ("mwpm allocated %u bytes at %u\n" , size , (uint32_t )result );
0 commit comments