@@ -278,90 +278,78 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
278
278
279
279
/// A partial, owned list of provenance to transfer into another allocation.
280
280
///
281
- /// Offsets are already adjusted to the destination allocation .
281
+ /// Offsets are relative to the beginning of the copied range .
282
282
pub struct ProvenanceCopy < Prov > {
283
- dest_ptrs : Option < Box < [ ( Size , Prov ) ] > > ,
284
- dest_bytes : Option < Box < [ ( Size , ( Prov , u8 ) ) ] > > ,
283
+ ptrs : Box < [ ( Size , Prov ) ] > ,
284
+ bytes : Box < [ ( Size , ( Prov , u8 ) ) ] > ,
285
285
}
286
286
287
287
impl < Prov : Provenance > ProvenanceMap < Prov > {
288
288
pub fn prepare_copy (
289
289
& self ,
290
- src : AllocRange ,
291
- dest : Size ,
292
- count : u64 ,
290
+ range : AllocRange ,
293
291
cx : & impl HasDataLayout ,
294
292
) -> AllocResult < ProvenanceCopy < Prov > > {
295
- let shift_offset = move |idx, offset| {
296
- // compute offset for current repetition
297
- let dest_offset = dest + src. size * idx; // `Size` operations
298
- // shift offsets from source allocation to destination allocation
299
- ( offset - src. start ) + dest_offset // `Size` operations
300
- } ;
293
+ let shift_offset = move |offset| offset - range. start ;
301
294
let ptr_size = cx. data_layout ( ) . pointer_size ( ) ;
302
295
303
296
// # Pointer-sized provenances
304
297
// Get the provenances that are entirely within this range.
305
298
// (Different from `range_get_ptrs` which asks if they overlap the range.)
306
299
// Only makes sense if we are copying at least one pointer worth of bytes.
307
- let mut dest_ptrs_box = None ;
308
- if src. size >= ptr_size {
309
- let adjusted_end = Size :: from_bytes ( src. end ( ) . bytes ( ) - ( ptr_size. bytes ( ) - 1 ) ) ;
310
- let ptrs = self . ptrs . range ( src. start ..adjusted_end) ;
311
- // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
312
- // is mostly filled with redundant information since it's just N copies of the same `Prov`s
313
- // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
314
- // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
315
- // the right sequence of provenance for all N copies.
316
- // Basically, this large array would have to be created anyway in the target allocation.
317
- let mut dest_ptrs = Vec :: with_capacity ( ptrs. len ( ) * ( count as usize ) ) ;
318
- for i in 0 ..count {
319
- dest_ptrs
320
- . extend ( ptrs. iter ( ) . map ( |& ( offset, reloc) | ( shift_offset ( i, offset) , reloc) ) ) ;
321
- }
322
- debug_assert_eq ! ( dest_ptrs. len( ) , dest_ptrs. capacity( ) ) ;
323
- dest_ptrs_box = Some ( dest_ptrs. into_boxed_slice ( ) ) ;
300
+ let mut ptrs_box: Box < [ _ ] > = Box :: new ( [ ] ) ;
301
+ if range. size >= ptr_size {
302
+ let adjusted_end = Size :: from_bytes ( range. end ( ) . bytes ( ) - ( ptr_size. bytes ( ) - 1 ) ) ;
303
+ let ptrs = self . ptrs . range ( range. start ..adjusted_end) ;
304
+ ptrs_box = ptrs. iter ( ) . map ( |& ( offset, reloc) | ( shift_offset ( offset) , reloc) ) . collect ( ) ;
324
305
} ;
325
306
326
307
// # Byte-sized provenances
327
308
// This includes the existing bytewise provenance in the range, and ptr provenance
328
309
// that overlaps with the begin/end of the range.
329
- let mut dest_bytes_box = None ;
330
- let begin_overlap = self . range_ptrs_get ( alloc_range ( src . start , Size :: ZERO ) , cx) . first ( ) ;
331
- let end_overlap = self . range_ptrs_get ( alloc_range ( src . end ( ) , Size :: ZERO ) , cx) . first ( ) ;
310
+ let mut bytes_box : Box < [ _ ] > = Box :: new ( [ ] ) ;
311
+ let begin_overlap = self . range_ptrs_get ( alloc_range ( range . start , Size :: ZERO ) , cx) . first ( ) ;
312
+ let end_overlap = self . range_ptrs_get ( alloc_range ( range . end ( ) , Size :: ZERO ) , cx) . first ( ) ;
332
313
// We only need to go here if there is some overlap or some bytewise provenance.
333
314
if begin_overlap. is_some ( ) || end_overlap. is_some ( ) || self . bytes . is_some ( ) {
334
315
let mut bytes: Vec < ( Size , ( Prov , u8 ) ) > = Vec :: new ( ) ;
335
316
// First, if there is a part of a pointer at the start, add that.
336
317
if let Some ( entry) = begin_overlap {
337
318
trace ! ( "start overlapping entry: {entry:?}" ) ;
338
- // For really small copies, make sure we don't run off the end of the `src` range.
339
- let entry_end = cmp:: min ( entry. 0 + ptr_size, src . end ( ) ) ;
340
- for offset in src . start ..entry_end {
341
- bytes. push ( ( offset, ( entry. 1 , ( offset - entry. 0 ) . bytes ( ) as u8 ) ) ) ;
319
+ // For really small copies, make sure we don't run off the end of the range.
320
+ let entry_end = cmp:: min ( entry. 0 + ptr_size, range . end ( ) ) ;
321
+ for offset in range . start ..entry_end {
322
+ bytes. push ( ( shift_offset ( offset) , ( entry. 1 , ( offset - entry. 0 ) . bytes ( ) as u8 ) ) ) ;
342
323
}
343
324
} else {
344
325
trace ! ( "no start overlapping entry" ) ;
345
326
}
346
327
347
328
// Then the main part, bytewise provenance from `self.bytes`.
348
- bytes. extend ( self . range_bytes_get ( src) ) ;
329
+ bytes. extend (
330
+ self . range_bytes_get ( range)
331
+ . iter ( )
332
+ . map ( |& ( offset, reloc) | ( shift_offset ( offset) , reloc) ) ,
333
+ ) ;
349
334
350
335
// And finally possibly parts of a pointer at the end.
351
336
if let Some ( entry) = end_overlap {
352
337
trace ! ( "end overlapping entry: {entry:?}" ) ;
353
- // For really small copies, make sure we don't start before `src ` does.
354
- let entry_start = cmp:: max ( entry. 0 , src . start ) ;
355
- for offset in entry_start..src . end ( ) {
338
+ // For really small copies, make sure we don't start before `range ` does.
339
+ let entry_start = cmp:: max ( entry. 0 , range . start ) ;
340
+ for offset in entry_start..range . end ( ) {
356
341
if bytes. last ( ) . is_none_or ( |bytes_entry| bytes_entry. 0 < offset) {
357
342
// The last entry, if it exists, has a lower offset than us, so we
358
343
// can add it at the end and remain sorted.
359
- bytes. push ( ( offset, ( entry. 1 , ( offset - entry. 0 ) . bytes ( ) as u8 ) ) ) ;
344
+ bytes. push ( (
345
+ shift_offset ( offset) ,
346
+ ( entry. 1 , ( offset - entry. 0 ) . bytes ( ) as u8 ) ,
347
+ ) ) ;
360
348
} else {
361
349
// There already is an entry for this offset in there! This can happen when the
362
350
// start and end range checks actually end up hitting the same pointer, so we
363
351
// already added this in the "pointer at the start" part above.
364
- assert ! ( entry. 0 <= src . start) ;
352
+ assert ! ( entry. 0 <= range . start) ;
365
353
}
366
354
}
367
355
} else {
@@ -372,33 +360,40 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
372
360
if !bytes. is_empty ( ) && !Prov :: OFFSET_IS_ADDR {
373
361
// FIXME(#146291): We need to ensure that we don't mix different pointers with
374
362
// the same provenance.
375
- return Err ( AllocError :: ReadPartialPointer ( src . start ) ) ;
363
+ return Err ( AllocError :: ReadPartialPointer ( range . start ) ) ;
376
364
}
377
365
378
366
// And again a buffer for the new list on the target side.
379
- let mut dest_bytes = Vec :: with_capacity ( bytes. len ( ) * ( count as usize ) ) ;
380
- for i in 0 ..count {
381
- dest_bytes
382
- . extend ( bytes. iter ( ) . map ( |& ( offset, reloc) | ( shift_offset ( i, offset) , reloc) ) ) ;
383
- }
384
- debug_assert_eq ! ( dest_bytes. len( ) , dest_bytes. capacity( ) ) ;
385
- dest_bytes_box = Some ( dest_bytes. into_boxed_slice ( ) ) ;
367
+ bytes_box = bytes. into_boxed_slice ( ) ;
386
368
}
387
369
388
- Ok ( ProvenanceCopy { dest_ptrs : dest_ptrs_box , dest_bytes : dest_bytes_box } )
370
+ Ok ( ProvenanceCopy { ptrs : ptrs_box , bytes : bytes_box } )
389
371
}
390
372
391
373
/// Applies a provenance copy.
392
374
/// The affected range, as defined in the parameters to `prepare_copy` is expected
393
375
/// to be clear of provenance.
394
- pub fn apply_copy ( & mut self , copy : ProvenanceCopy < Prov > ) {
395
- if let Some ( dest_ptrs) = copy. dest_ptrs {
396
- self . ptrs . insert_presorted ( dest_ptrs. into ( ) ) ;
376
+ pub fn apply_copy ( & mut self , copy : ProvenanceCopy < Prov > , range : AllocRange , repeat : u64 ) {
377
+ let shift_offset = |idx : u64 , offset : Size | offset + range. start + idx * range. size ;
378
+ if !copy. ptrs . is_empty ( ) {
379
+ // We want to call `insert_presorted` only once so that, if possible, the entries
380
+ // after the range we insert are moved back only once.
381
+ let chunk_len = copy. ptrs . len ( ) as u64 ;
382
+ self . ptrs . insert_presorted ( ( 0 ..chunk_len * repeat) . map ( |i| {
383
+ let chunk = i / chunk_len;
384
+ let ( offset, reloc) = copy. ptrs [ ( i % chunk_len) as usize ] ;
385
+ ( shift_offset ( chunk, offset) , reloc)
386
+ } ) ) ;
397
387
}
398
- if let Some ( dest_bytes) = copy. dest_bytes
399
- && !dest_bytes. is_empty ( )
400
- {
401
- self . bytes . get_or_insert_with ( Box :: default) . insert_presorted ( dest_bytes. into ( ) ) ;
388
+ if !copy. bytes . is_empty ( ) {
389
+ let chunk_len = copy. bytes . len ( ) as u64 ;
390
+ self . bytes . get_or_insert_with ( Box :: default) . insert_presorted (
391
+ ( 0 ..chunk_len * repeat) . map ( |i| {
392
+ let chunk = i / chunk_len;
393
+ let ( offset, reloc) = copy. bytes [ ( i % chunk_len) as usize ] ;
394
+ ( shift_offset ( chunk, offset) , reloc)
395
+ } ) ,
396
+ ) ;
402
397
}
403
398
}
404
399
}
0 commit comments