|
34 | 34 | } |
35 | 35 | } |
36 | 36 |
|
37 | | -fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>) |
| 37 | +fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, offset: &mut Size) |
38 | 38 | where |
39 | 39 | Ty: TyAbiInterface<'a, C> + Copy, |
40 | 40 | C: HasDataLayout, |
@@ -70,92 +70,101 @@ where |
70 | 70 | ret.cast_to(Uniform::new(Reg::i64(), size)); |
71 | 71 | } else { |
72 | 72 | ret.make_indirect(); |
| 73 | + *offset += cx.data_layout().pointer_size(); |
73 | 74 | } |
74 | 75 | } |
75 | 76 |
|
76 | | -fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) |
| 77 | +fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, offset: &mut Size) |
77 | 78 | where |
78 | 79 | Ty: TyAbiInterface<'a, C> + Copy, |
79 | 80 | C: HasDataLayout, |
80 | 81 | { |
81 | | - if !arg.layout.is_aggregate() { |
82 | | - extend_integer_width_mips(arg, 64); |
83 | | - return; |
84 | | - } |
85 | | - if arg.layout.pass_indirectly_in_non_rustic_abis(cx) { |
86 | | - arg.make_indirect(); |
87 | | - return; |
88 | | - } |
89 | | - |
90 | 82 | let dl = cx.data_layout(); |
91 | 83 | let size = arg.layout.size; |
92 | 84 | let mut prefix = [None; 8]; |
93 | 85 | let mut prefix_index = 0; |
94 | 86 |
|
95 | | - match arg.layout.fields { |
96 | | - FieldsShape::Primitive => unreachable!(), |
97 | | - FieldsShape::Array { .. } => { |
98 | | - // Arrays are passed indirectly |
99 | | - arg.make_indirect(); |
100 | | - return; |
101 | | - } |
102 | | - FieldsShape::Union(_) => { |
103 | | - // Unions and are always treated as a series of 64-bit integer chunks |
104 | | - } |
105 | | - FieldsShape::Arbitrary { .. } => { |
106 | | - // Structures are split up into a series of 64-bit integer chunks, but any aligned |
107 | | - // doubles not part of another aggregate are passed as floats. |
108 | | - let mut last_offset = Size::ZERO; |
109 | | - |
110 | | - for i in 0..arg.layout.fields.count() { |
111 | | - let field = arg.layout.field(cx, i); |
112 | | - let offset = arg.layout.fields.offset(i); |
113 | | - |
114 | | - // We only care about aligned doubles |
115 | | - if let BackendRepr::Scalar(scalar) = field.backend_repr { |
116 | | - if scalar.primitive() == Primitive::Float(Float::F64) { |
117 | | - if offset.is_aligned(dl.f64_align) { |
118 | | - // Insert enough integers to cover [last_offset, offset) |
119 | | - assert!(last_offset.is_aligned(dl.f64_align)); |
120 | | - for _ in 0..((offset - last_offset).bits() / 64) |
121 | | - .min((prefix.len() - prefix_index) as u64) |
122 | | - { |
123 | | - prefix[prefix_index] = Some(Reg::i64()); |
124 | | - prefix_index += 1; |
125 | | - } |
| 87 | + // Detect need for padding |
| 88 | + let align = arg.layout.align.abi.max(dl.i64_align).min(dl.i128_align); |
| 89 | + let pad_i32 = !offset.is_aligned(align); |
126 | 90 |
|
127 | | - if prefix_index == prefix.len() { |
128 | | - break; |
| 91 | + if !arg.layout.is_aggregate() { |
| 92 | + extend_integer_width_mips(arg, 64); |
| 93 | + } else if arg.layout.pass_indirectly_in_non_rustic_abis(cx) { |
| 94 | + arg.make_indirect(); |
| 95 | + } else { |
| 96 | + match arg.layout.fields { |
| 97 | + FieldsShape::Primitive => unreachable!(), |
| 98 | + FieldsShape::Array { .. } => { |
| 99 | + // Arrays are passed indirectly |
| 100 | + arg.make_indirect(); |
| 101 | + } |
| 102 | + FieldsShape::Union(_) => { |
| 103 | + // Unions and are always treated as a series of 64-bit integer chunks |
| 104 | + } |
| 105 | + FieldsShape::Arbitrary { .. } => { |
| 106 | + // Structures are split up into a series of 64-bit integer chunks, but any aligned |
| 107 | + // doubles not part of another aggregate are passed as floats. |
| 108 | + let mut last_offset = Size::ZERO; |
| 109 | + |
| 110 | + for i in 0..arg.layout.fields.count() { |
| 111 | + let field = arg.layout.field(cx, i); |
| 112 | + let offset = arg.layout.fields.offset(i); |
| 113 | + |
| 114 | + // We only care about aligned doubles |
| 115 | + if let BackendRepr::Scalar(scalar) = field.backend_repr { |
| 116 | + if scalar.primitive() == Primitive::Float(Float::F64) { |
| 117 | + if offset.is_aligned(dl.f64_align) { |
| 118 | + // Insert enough integers to cover [last_offset, offset) |
| 119 | + assert!(last_offset.is_aligned(dl.f64_align)); |
| 120 | + for _ in 0..((offset - last_offset).bits() / 64) |
| 121 | + .min((prefix.len() - prefix_index) as u64) |
| 122 | + { |
| 123 | + prefix[prefix_index] = Some(Reg::i64()); |
| 124 | + prefix_index += 1; |
| 125 | + } |
| 126 | + |
| 127 | + if prefix_index == prefix.len() { |
| 128 | + break; |
| 129 | + } |
| 130 | + |
| 131 | + prefix[prefix_index] = Some(Reg::f64()); |
| 132 | + prefix_index += 1; |
| 133 | + last_offset = offset + Reg::f64().size; |
129 | 134 | } |
130 | | - |
131 | | - prefix[prefix_index] = Some(Reg::f64()); |
132 | | - prefix_index += 1; |
133 | | - last_offset = offset + Reg::f64().size; |
134 | 135 | } |
135 | 136 | } |
136 | 137 | } |
137 | 138 | } |
138 | | - } |
139 | | - }; |
140 | | - |
141 | | - // Extract first 8 chunks as the prefix |
142 | | - let rest_size = size - Size::from_bytes(8) * prefix_index as u64; |
143 | | - arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size))); |
| 139 | + }; |
| 140 | + |
| 141 | + // Extract first 8 chunks as the prefix |
| 142 | + let rest_size = size - Size::from_bytes(8) * prefix_index as u64; |
| 143 | + arg.cast_to_and_pad_i32( |
| 144 | + CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)), |
| 145 | + pad_i32, |
| 146 | + ); |
| 147 | + } |
| 148 | + *offset = offset.align_to(align) + size.align_to(align); |
144 | 149 | } |
145 | 150 |
|
146 | 151 | pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>) |
147 | 152 | where |
148 | 153 | Ty: TyAbiInterface<'a, C> + Copy, |
149 | 154 | C: HasDataLayout, |
150 | 155 | { |
| 156 | + // mips64 argument passing is also affected by the alignment of aggregates. |
| 157 | + // see mips.rs for how the offset is used |
| 158 | + let mut offset = Size::ZERO; |
| 159 | + |
151 | 160 | if !fn_abi.ret.is_ignore() { |
152 | | - classify_ret(cx, &mut fn_abi.ret); |
| 161 | + classify_ret(cx, &mut fn_abi.ret, &mut offset); |
153 | 162 | } |
154 | 163 |
|
155 | 164 | for arg in fn_abi.args.iter_mut() { |
156 | 165 | if arg.is_ignore() { |
157 | 166 | continue; |
158 | 167 | } |
159 | | - classify_arg(cx, arg); |
| 168 | + classify_arg(cx, arg, &mut offset); |
160 | 169 | } |
161 | 170 | } |
0 commit comments