@@ -54,7 +54,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
5454 let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
5555 if ( load_sz & chunk_sz) != 0 {
5656 // Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
57- * ( & raw mut out) . byte_add ( i) . cast:: <$ty>( ) = * src. byte_add ( i) . cast:: <$ty>( ) ;
57+ * ( & raw mut out) . wrapping_byte_add ( i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( i) . cast:: <$ty>( ) ;
5858 i |= chunk_sz;
5959 }
6060 ) +} ;
@@ -83,7 +83,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
8383 if ( load_sz & chunk_sz) != 0 {
8484 // Since we are doing the small reads first, `start_shift + i` has in the mean
8585 // time become aligned to `chunk_sz`.
86- * ( & raw mut out) . byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. byte_add ( start_shift + i) . cast:: <$ty>( ) ;
86+ * ( & raw mut out) . wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) ;
8787 i |= chunk_sz;
8888 }
8989 ) +} ;
@@ -137,7 +137,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
137137 let shift = offset * 8 ;
138138
139139 // Realign src
140- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
140+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
141141 let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
142142
143143 while dest_usize. wrapping_add ( 1 ) < dest_end {
@@ -249,7 +249,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
249249 let shift = offset * 8 ;
250250
251251 // Realign src
252- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
252+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
253253 let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
254254
255255 while dest_start. wrapping_add ( 1 ) < dest_usize {
0 commit comments