@@ -17,6 +17,8 @@ use crate::transform::MirPass;
1717use std:: iter;
1818use std:: ops:: { Range , RangeFrom } ;
1919
20+ crate mod cycle;
21+
2022const INSTR_COST : usize = 5 ;
2123const CALL_PENALTY : usize = 25 ;
2224const LANDINGPAD_PENALTY : usize = 50 ;
@@ -37,6 +39,9 @@ struct CallSite<'tcx> {
3739
3840impl < ' tcx > MirPass < ' tcx > for Inline {
3941 fn run_pass ( & self , tcx : TyCtxt < ' tcx > , body : & mut Body < ' tcx > ) {
42+ // If you change this optimization level, also change the level in
43+ // `mir_drops_elaborated_and_const_checked` for the call to `mir_inliner_callees`.
44+ // Otherwise you will get an ICE about stolen MIR.
4045 if tcx. sess . opts . debugging_opts . mir_opt_level < 2 {
4146 return ;
4247 }
@@ -50,6 +55,8 @@ impl<'tcx> MirPass<'tcx> for Inline {
5055 return ;
5156 }
5257
58+ let span = trace_span ! ( "inline" , body = %tcx. def_path_str( body. source. def_id( ) ) ) ;
59+ let _guard = span. enter ( ) ;
5360 if inline ( tcx, body) {
5461 debug ! ( "running simplify cfg on {:?}" , body. source) ;
5562 CfgSimplifier :: new ( body) . simplify ( ) ;
@@ -90,8 +97,8 @@ struct Inliner<'tcx> {
9097 codegen_fn_attrs : & ' tcx CodegenFnAttrs ,
9198 /// Caller HirID.
9299 hir_id : hir:: HirId ,
93- /// Stack of inlined instances .
94- history : Vec < Instance < ' tcx > > ,
100+ /// Stack of inlined Instances .
101+ history : Vec < ty :: Instance < ' tcx > > ,
95102 /// Indicates that the caller body has been modified.
96103 changed : bool ,
97104}
@@ -103,13 +110,28 @@ impl Inliner<'tcx> {
103110 None => continue ,
104111 Some ( it) => it,
105112 } ;
113+ let span = trace_span ! ( "process_blocks" , %callsite. callee, ?bb) ;
114+ let _guard = span. enter ( ) ;
115+
116+ trace ! (
117+ "checking for self recursion ({:?} vs body_source: {:?})" ,
118+ callsite. callee. def_id( ) ,
119+ caller_body. source. def_id( )
120+ ) ;
121+ if callsite. callee . def_id ( ) == caller_body. source . def_id ( ) {
122+ debug ! ( "Not inlining a function into itself" ) ;
123+ continue ;
124+ }
106125
107- if !self . is_mir_available ( & callsite. callee , caller_body) {
126+ if !self . is_mir_available ( callsite. callee , caller_body) {
108127 debug ! ( "MIR unavailable {}" , callsite. callee) ;
109128 continue ;
110129 }
111130
131+ let span = trace_span ! ( "instance_mir" , %callsite. callee) ;
132+ let instance_mir_guard = span. enter ( ) ;
112133 let callee_body = self . tcx . instance_mir ( callsite. callee . def ) ;
134+ drop ( instance_mir_guard) ;
113135 if !self . should_inline ( callsite, callee_body) {
114136 continue ;
115137 }
@@ -137,28 +159,61 @@ impl Inliner<'tcx> {
137159 }
138160 }
139161
140- fn is_mir_available ( & self , callee : & Instance < ' tcx > , caller_body : & Body < ' tcx > ) -> bool {
141- if let InstanceDef :: Item ( _) = callee. def {
142- if !self . tcx . is_mir_available ( callee. def_id ( ) ) {
143- return false ;
162+ #[ instrument( skip( self , caller_body) ) ]
163+ fn is_mir_available ( & self , callee : Instance < ' tcx > , caller_body : & Body < ' tcx > ) -> bool {
164+ match callee. def {
165+ InstanceDef :: Item ( _) => {
166+ // If there is no MIR available (either because it was not in metadata or
167+ // because it has no MIR because it's an extern function), then the inliner
168+ // won't cause cycles on this.
169+ if !self . tcx . is_mir_available ( callee. def_id ( ) ) {
170+ return false ;
171+ }
144172 }
173+ // These have no own callable MIR.
174+ InstanceDef :: Intrinsic ( _) | InstanceDef :: Virtual ( ..) => return false ,
175+ // This cannot result in an immediate cycle since the callee MIR is a shim, which does
176+ // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
177+ // do not need to catch this here, we can wait until the inliner decides to continue
178+ // inlining a second time.
179+ InstanceDef :: VtableShim ( _)
180+ | InstanceDef :: ReifyShim ( _)
181+ | InstanceDef :: FnPtrShim ( ..)
182+ | InstanceDef :: ClosureOnceShim { .. }
183+ | InstanceDef :: DropGlue ( ..)
184+ | InstanceDef :: CloneShim ( ..) => return true ,
185+ }
186+
187+ if self . tcx . is_constructor ( callee. def_id ( ) ) {
188+ trace ! ( "constructors always have MIR" ) ;
189+ // Constructor functions cannot cause a query cycle.
190+ return true ;
145191 }
146192
147193 if let Some ( callee_def_id) = callee. def_id ( ) . as_local ( ) {
148194 let callee_hir_id = self . tcx . hir ( ) . local_def_id_to_hir_id ( callee_def_id) ;
149- // Avoid a cycle here by only using `instance_mir` only if we have
150- // a lower `HirId` than the callee. This ensures that the callee will
151- // not inline us. This trick only works without incremental compilation.
152- // So don't do it if that is enabled. Also avoid inlining into generators,
195+ // Avoid inlining into generators,
153196 // since their `optimized_mir` is used for layout computation, which can
154197 // create a cycle, even when no attempt is made to inline the function
155198 // in the other direction.
156- !self . tcx . dep_graph . is_fully_enabled ( )
199+ caller_body. generator_kind . is_none ( )
200+ && (
201+ // Avoid a cycle here by only using `instance_mir` only if we have
202+ // a lower `HirId` than the callee. This ensures that the callee will
203+ // not inline us. This trick only works without incremental compilation.
204+ // So don't do it if that is enabled.
205+ !self . tcx . dep_graph . is_fully_enabled ( )
157206 && self . hir_id < callee_hir_id
158- && caller_body. generator_kind . is_none ( )
207+ // If we know for sure that the function we're calling will itself try to
208+ // call us, then we avoid inlining that function.
209+ || !self . tcx . mir_callgraph_reachable ( ( callee, caller_body. source . def_id ( ) . expect_local ( ) ) )
210+ )
159211 } else {
160- // This cannot result in a cycle since the callee MIR is from another crate
161- // and is already optimized.
212+ // This cannot result in an immediate cycle since the callee MIR is from another crate
213+ // and is already optimized. Any subsequent inlining may cause cycles, but we do
214+ // not need to catch this here, we can wait until the inliner decides to continue
215+ // inlining a second time.
216+ trace ! ( "functions from other crates always have MIR" ) ;
162217 true
163218 }
164219 }
@@ -203,8 +258,8 @@ impl Inliner<'tcx> {
203258 None
204259 }
205260
261+ #[ instrument( skip( self , callee_body) ) ]
206262 fn should_inline ( & self , callsite : CallSite < ' tcx > , callee_body : & Body < ' tcx > ) -> bool {
207- debug ! ( "should_inline({:?})" , callsite) ;
208263 let tcx = self . tcx ;
209264
210265 if callsite. fn_sig . c_variadic ( ) {
@@ -333,7 +388,9 @@ impl Inliner<'tcx> {
333388 if let Ok ( Some ( instance) ) =
334389 Instance :: resolve ( self . tcx , self . param_env , def_id, substs)
335390 {
336- if callsite. callee == instance || self . history . contains ( & instance) {
391+ if callsite. callee . def_id ( ) == instance. def_id ( )
392+ || self . history . contains ( & instance)
393+ {
337394 debug ! ( "`callee is recursive - not inlining" ) ;
338395 return false ;
339396 }
0 commit comments