@@ -41,6 +41,61 @@ unsafe fn read_usize_unaligned(x: *const usize) -> usize {
4141 core:: mem:: transmute ( x_read)
4242}
4343
44+ /// Load `load_sz` many bytes from `src`, which must be usize-aligned. Acts as if we did a `usize`
45+ /// read with the out-of-bounds part filled with 0s.
46+ /// `load_sz` be strictly less than `WORD_SIZE`.
47+ #[ cfg( not( feature = "mem-unaligned" ) ) ]
48+ #[ inline( always) ]
49+ unsafe fn load_aligned_partial ( src : * const usize , load_sz : usize ) -> usize {
50+ let mut i = 0 ;
51+ let mut out = 0usize ;
52+ macro_rules! load_prefix {
53+ ( $( $ty: ty) +) => { $(
54+ let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
55+ if ( load_sz & chunk_sz) != 0 {
56+ // Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
57+ * ( & raw mut out) . byte_add( i) . cast:: <$ty>( ) = * src. byte_add( i) . cast:: <$ty>( ) ;
58+ i |= chunk_sz;
59+ }
60+ ) +} ;
61+ }
62+ // We can read up to 7 bytes here, which is enough for WORD_SIZE of 8
63+ // (as we handled the full-word case above).
64+ const { assert ! ( WORD_SIZE <= 8 ) } ;
65+ load_prefix ! ( u32 u16 u8 ) ;
66+ debug_assert ! ( i == load_sz) ;
67+ out
68+ }
69+
70+ /// Load `load_sz` many bytes from `src.byte_add(WORD_SIZE - load_sz)`. `src` must be `usize`-aligned.
71+ /// The bytes are returned as the *last* bytes of the return value, i.e., acts as if we had done
72+ /// a `usize` read from `src`, with the out-of-bounds part filled with 0s.
73+ /// `load_sz` be strictly less than `WORD_SIZE`.
74+ #[ cfg( not( feature = "mem-unaligned" ) ) ]
75+ #[ inline( always) ]
76+ unsafe fn load_aligned_end_partial ( src : * const usize , load_sz : usize ) -> usize {
77+ let mut i = 0 ;
78+ let mut out = 0usize ;
79+ let start_shift = WORD_SIZE - load_sz;
80+ macro_rules! load_prefix {
81+ ( $( $ty: ty) +) => { $(
82+ let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
83+ if ( load_sz & chunk_sz) != 0 {
84+ // Since we are doing the small reads first, `start_shift + i` has in the mean
85+ // time become aligned to `chunk_sz`.
86+ * ( & raw mut out) . byte_add( start_shift + i) . cast:: <$ty>( ) = * src. byte_add( start_shift + i) . cast:: <$ty>( ) ;
87+ i |= chunk_sz;
88+ }
89+ ) +} ;
90+ }
91+ // We can read up to 7 bytes here, which is enough for WORD_SIZE of 8
92+ // (as we handled the full-word case above).
93+ const { assert ! ( WORD_SIZE <= 8 ) } ;
94+ load_prefix ! ( u8 u16 u32 ) ;
95+ debug_assert ! ( i == load_sz) ;
96+ out
97+ }
98+
4499#[ inline( always) ]
45100pub unsafe fn copy_forward ( mut dest : * mut u8 , mut src : * const u8 , mut n : usize ) {
46101 #[ inline( always) ]
@@ -66,40 +121,54 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
66121 }
67122 }
68123
124+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
125+ /// `src` *must not* be `usize`-aligned.
69126 #[ cfg( not( feature = "mem-unaligned" ) ) ]
70127 #[ inline( always) ]
71128 unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
129+ debug_assert ! ( n > 0 && n % WORD_SIZE == 0 ) ;
130+
72131 let mut dest_usize = dest as * mut usize ;
73132 let dest_end = dest. wrapping_add ( n) as * mut usize ;
74133
75134 // Calculate the misalignment offset and shift needed to reassemble value.
135+ // Since `src` is definitely not aligned, `offset` is in the range 1..WORD_SIZE.
76136 let offset = src as usize & WORD_MASK ;
77137 let shift = offset * 8 ;
78138
79139 // Realign src
80- let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
81- // This will read (but won't use) bytes out of bound.
82- // cfg needed because not all targets will have atomic loads that can be lowered
83- // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
84- #[ cfg( target_has_atomic_load_store = "ptr" ) ]
85- let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
86- #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
87- let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
140+ let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
141+ let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
88142
89- while dest_usize < dest_end {
143+ while dest_usize. wrapping_add ( 1 ) < dest_end {
90144 src_aligned = src_aligned. wrapping_add ( 1 ) ;
91145 let cur_word = * src_aligned;
92146 #[ cfg( target_endian = "little" ) ]
93- let resembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
147+ let reassembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
94148 #[ cfg( target_endian = "big" ) ]
95- let resembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
149+ let reassembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
96150 prev_word = cur_word;
97151
98- * dest_usize = resembled ;
152+ * dest_usize = reassembled ;
99153 dest_usize = dest_usize. wrapping_add ( 1 ) ;
100154 }
155+
156+ // There's one more element left to go, and we can't use the loop for that as on the `src` side,
157+ // it is partially out-of-bounds.
158+ src_aligned = src_aligned. wrapping_add ( 1 ) ;
159+ let cur_word = load_aligned_partial ( src_aligned, offset) ;
160+ #[ cfg( target_endian = "little" ) ]
161+ let reassembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
162+ #[ cfg( target_endian = "big" ) ]
163+ let reassembled = prev_word << shift | cur_word >> ( WORD_SIZE * 8 - shift) ;
164+ // prev_word does not matter any more
165+
166+ * dest_usize = reassembled;
167+ // dest_usize does not matter any more
101168 }
102169
170+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
171+ /// `src` *must not* be `usize`-aligned.
103172 #[ cfg( feature = "mem-unaligned" ) ]
104173 #[ inline( always) ]
105174 unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
@@ -164,40 +233,54 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
164233 }
165234 }
166235
236+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
237+ /// `src` *must not* be `usize`-aligned.
167238 #[ cfg( not( feature = "mem-unaligned" ) ) ]
168239 #[ inline( always) ]
169240 unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
241+ debug_assert ! ( n > 0 && n % WORD_SIZE == 0 ) ;
242+
170243 let mut dest_usize = dest as * mut usize ;
171- let dest_start = dest. wrapping_sub ( n) as * mut usize ;
244+ let dest_start = dest. wrapping_sub ( n) as * mut usize ; // we're moving towards the start
172245
173246 // Calculate the misalignment offset and shift needed to reassemble value.
247+ // Since `src` is definitely not aligned, `offset` is in the range 1..WORD_SIZE.
174248 let offset = src as usize & WORD_MASK ;
175249 let shift = offset * 8 ;
176250
177- // Realign src_aligned
178- let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
179- // This will read (but won't use) bytes out of bound.
180- // cfg needed because not all targets will have atomic loads that can be lowered
181- // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
182- #[ cfg( target_has_atomic_load_store = "ptr" ) ]
183- let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
184- #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
185- let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
251+ // Realign src
252+ let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
253+ let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
186254
187- while dest_start < dest_usize {
255+ while dest_start. wrapping_add ( 1 ) < dest_usize {
188256 src_aligned = src_aligned. wrapping_sub ( 1 ) ;
189257 let cur_word = * src_aligned;
190258 #[ cfg( target_endian = "little" ) ]
191- let resembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
259+ let reassembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
192260 #[ cfg( target_endian = "big" ) ]
193- let resembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
261+ let reassembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
194262 prev_word = cur_word;
195263
196264 dest_usize = dest_usize. wrapping_sub ( 1 ) ;
197- * dest_usize = resembled ;
265+ * dest_usize = reassembled ;
198266 }
267+
268+ // There's one more element left to go, and we can't use the loop for that as on the `src` side,
269+ // it is partially out-of-bounds.
270+ src_aligned = src_aligned. wrapping_sub ( 1 ) ;
271+ let cur_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
272+ #[ cfg( target_endian = "little" ) ]
273+ let reassembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
274+ #[ cfg( target_endian = "big" ) ]
275+ let reassembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
276+ // prev_word does not matter any more
277+
278+ dest_usize = dest_usize. wrapping_sub ( 1 ) ;
279+ * dest_usize = reassembled;
199280 }
200281
282+ /// `n` is in units of bytes, but must be a multiple of the word size and must not be 0.
283+ /// `src` *must not* be `usize`-aligned.
201284 #[ cfg( feature = "mem-unaligned" ) ]
202285 #[ inline( always) ]
203286 unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
0 commit comments