@@ -89,6 +89,7 @@ use rustc_index::{IndexSlice, IndexVec};
8989use rustc_middle:: middle:: region;
9090use rustc_middle:: mir:: * ;
9191use rustc_middle:: thir:: { ExprId , LintLevel } ;
92+ use rustc_middle:: ty:: { self , TyCtxt } ;
9293use rustc_middle:: { bug, span_bug} ;
9394use rustc_session:: lint:: Level ;
9495use rustc_span:: source_map:: Spanned ;
@@ -883,6 +884,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
883884 block. unit ( )
884885 }
885886
887+ fn is_async_drop_impl (
888+ tcx : TyCtxt < ' tcx > ,
889+ local_decls : & IndexVec < Local , LocalDecl < ' tcx > > ,
890+ typing_env : ty:: TypingEnv < ' tcx > ,
891+ local : Local ,
892+ ) -> bool {
893+ let ty = local_decls[ local] . ty ;
894+ if ty. is_async_drop ( tcx, typing_env) || ty. is_coroutine ( ) {
895+ return true ;
896+ }
897+ ty. needs_async_drop ( tcx, typing_env)
898+ }
899+ fn is_async_drop ( & self , local : Local ) -> bool {
900+ Self :: is_async_drop_impl ( self . tcx , & self . local_decls , self . typing_env ( ) , local)
901+ }
902+
886903 fn leave_top_scope ( & mut self , block : BasicBlock ) -> BasicBlock {
887904 // If we are emitting a `drop` statement, we need to have the cached
888905 // diverge cleanup pads ready in case that drop panics.
@@ -891,14 +908,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
891908 let unwind_to = if needs_cleanup { self . diverge_cleanup ( ) } else { DropIdx :: MAX } ;
892909
893910 let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
911+ let has_async_drops = is_coroutine
912+ && scope. drops . iter ( ) . any ( |v| v. kind == DropKind :: Value && self . is_async_drop ( v. local ) ) ;
913+ let dropline_to = if has_async_drops { Some ( self . diverge_dropline ( ) ) } else { None } ;
914+ let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
915+ let typing_env = self . typing_env ( ) ;
894916 build_scope_drops (
895917 & mut self . cfg ,
896918 & mut self . scopes . unwind_drops ,
919+ & mut self . scopes . coroutine_drops ,
897920 scope,
898921 block,
899922 unwind_to,
923+ dropline_to,
900924 is_coroutine && needs_cleanup,
901925 self . arg_count ,
926+ |v : Local | Self :: is_async_drop_impl ( self . tcx , & self . local_decls , typing_env, v) ,
902927 )
903928 . into_block ( )
904929 }
@@ -1314,22 +1339,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
13141339 self . scopes . unwind_drops . add_entry_point ( start, next_drop) ;
13151340 }
13161341
1317- /// Sets up a path that performs all required cleanup for dropping a
1318- /// coroutine, starting from the given block that ends in
1319- /// [TerminatorKind::Yield].
1320- ///
1321- /// This path terminates in CoroutineDrop.
1322- pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1342+ /// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
1343+ /// The `DropIdx` will be created if it doesn't already exist.
1344+ fn diverge_dropline ( & mut self ) -> DropIdx {
1345+ // It is okay to use dummy span because the getting scope index on the topmost scope
1346+ // must always succeed.
1347+ self . diverge_dropline_target ( self . scopes . topmost ( ) , DUMMY_SP )
1348+ }
1349+
1350+ /// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
1351+ fn diverge_dropline_target ( & mut self , target_scope : region:: Scope , span : Span ) -> DropIdx {
13231352 debug_assert ! (
1324- matches!(
1325- self . cfg. block_data( yield_block) . terminator( ) . kind,
1326- TerminatorKind :: Yield { .. }
1327- ) ,
1328- "coroutine_drop_cleanup called on block with non-yield terminator."
1353+ self . coroutine. is_some( ) ,
1354+ "diverge_dropline_target is valid only for coroutine"
13291355 ) ;
1330- let ( uncached_scope, mut cached_drop) = self
1331- . scopes
1332- . scopes
1356+ let target = self . scopes . scope_index ( target_scope, span) ;
1357+ let ( uncached_scope, mut cached_drop) = self . scopes . scopes [ ..=target]
13331358 . iter ( )
13341359 . enumerate ( )
13351360 . rev ( )
@@ -1338,13 +1363,34 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
13381363 } )
13391364 . unwrap_or ( ( 0 , ROOT_NODE ) ) ;
13401365
1341- for scope in & mut self . scopes . scopes [ uncached_scope..] {
1366+ if uncached_scope > target {
1367+ return cached_drop;
1368+ }
1369+
1370+ for scope in & mut self . scopes . scopes [ uncached_scope..=target] {
13421371 for drop in & scope. drops {
13431372 cached_drop = self . scopes . coroutine_drops . add_drop ( * drop, cached_drop) ;
13441373 }
13451374 scope. cached_coroutine_drop_block = Some ( cached_drop) ;
13461375 }
13471376
1377+ cached_drop
1378+ }
1379+
1380+ /// Sets up a path that performs all required cleanup for dropping a
1381+ /// coroutine, starting from the given block that ends in
1382+ /// [TerminatorKind::Yield].
1383+ ///
1384+ /// This path terminates in CoroutineDrop.
1385+ pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1386+ debug_assert ! (
1387+ matches!(
1388+ self . cfg. block_data( yield_block) . terminator( ) . kind,
1389+ TerminatorKind :: Yield { .. }
1390+ ) ,
1391+ "coroutine_drop_cleanup called on block with non-yield terminator."
1392+ ) ;
1393+ let cached_drop = self . diverge_dropline ( ) ;
13481394 self . scopes . coroutine_drops . add_entry_point ( yield_block, cached_drop) ;
13491395 }
13501396
@@ -1438,18 +1484,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
14381484/// * `unwind_to`, describes the drops that would occur at this point in the code if a
14391485/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
14401486/// instructions on unwinding)
1487+ /// * `dropline_to`, describes the drops that would occur at this point in the code if a
1488+ /// coroutine drop occured.
14411489/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
14421490/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
1443- fn build_scope_drops < ' tcx > (
1491+ fn build_scope_drops < ' tcx , F > (
14441492 cfg : & mut CFG < ' tcx > ,
14451493 unwind_drops : & mut DropTree ,
1494+ coroutine_drops : & mut DropTree ,
14461495 scope : & Scope ,
14471496 block : BasicBlock ,
14481497 unwind_to : DropIdx ,
1498+ dropline_to : Option < DropIdx > ,
14491499 storage_dead_on_unwind : bool ,
14501500 arg_count : usize ,
1451- ) -> BlockAnd < ( ) > {
1452- debug ! ( "build_scope_drops({:?} -> {:?})" , block, scope) ;
1501+ is_async_drop : F ,
1502+ ) -> BlockAnd < ( ) >
1503+ where
1504+ F : Fn ( Local ) -> bool ,
1505+ {
1506+ debug ! ( "build_scope_drops({:?} -> {:?}), dropline_to={:?}" , block, scope, dropline_to) ;
14531507
14541508 // Build up the drops in evaluation order. The end result will
14551509 // look like:
@@ -1482,6 +1536,9 @@ fn build_scope_drops<'tcx>(
14821536 // will branch to `drops[n]`.
14831537 let mut block = block;
14841538
1539+ // `dropline_to` indicates what needs to be dropped should coroutine drop occur.
1540+ let mut dropline_to = dropline_to;
1541+
14851542 for drop_data in scope. drops . iter ( ) . rev ( ) {
14861543 let source_info = drop_data. source_info ;
14871544 let local = drop_data. local ;
@@ -1498,6 +1555,12 @@ fn build_scope_drops<'tcx>(
14981555 debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
14991556 unwind_to = unwind_drops. drops [ unwind_to] . next ;
15001557
1558+ if let Some ( idx) = dropline_to {
1559+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1560+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1561+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1562+ }
1563+
15011564 // If the operand has been moved, and we are not on an unwind
15021565 // path, then don't generate the drop. (We only take this into
15031566 // account for non-unwind paths so as not to disturb the
@@ -1507,6 +1570,12 @@ fn build_scope_drops<'tcx>(
15071570 }
15081571
15091572 unwind_drops. add_entry_point ( block, unwind_to) ;
1573+ if let Some ( to) = dropline_to
1574+ && is_async_drop ( local)
1575+ {
1576+ coroutine_drops. add_entry_point ( block, to) ;
1577+ }
1578+
15101579 let next = cfg. start_new_block ( ) ;
15111580 cfg. terminate (
15121581 block,
@@ -1564,6 +1633,11 @@ fn build_scope_drops<'tcx>(
15641633 debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
15651634 unwind_to = unwind_drops. drops [ unwind_to] . next ;
15661635 }
1636+ if let Some ( idx) = dropline_to {
1637+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1638+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1639+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1640+ }
15671641 // Only temps and vars need their storage dead.
15681642 assert ! ( local. index( ) > arg_count) ;
15691643 cfg. push ( block, Statement { source_info, kind : StatementKind :: StorageDead ( local) } ) ;
@@ -1619,6 +1693,39 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
16191693 }
16201694 }
16211695 }
1696+ // Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
1697+ if is_coroutine
1698+ && drops. drops . iter ( ) . any ( |DropNode { data, next : _ } | {
1699+ data. kind == DropKind :: Value && self . is_async_drop ( data. local )
1700+ } )
1701+ {
1702+ let dropline_target = self . diverge_dropline_target ( else_scope, span) ;
1703+ let mut dropline_indices = IndexVec :: from_elem_n ( dropline_target, 1 ) ;
1704+ for ( drop_idx, drop_data) in drops. drops . iter_enumerated ( ) . skip ( 1 ) {
1705+ match drop_data. data . kind {
1706+ DropKind :: Storage | DropKind :: ForLint => {
1707+ let coroutine_drop = self
1708+ . scopes
1709+ . coroutine_drops
1710+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1711+ dropline_indices. push ( coroutine_drop) ;
1712+ }
1713+ DropKind :: Value => {
1714+ let coroutine_drop = self
1715+ . scopes
1716+ . coroutine_drops
1717+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1718+ if self . is_async_drop ( drop_data. data . local ) {
1719+ self . scopes . coroutine_drops . add_entry_point (
1720+ blocks[ drop_idx] . unwrap ( ) ,
1721+ dropline_indices[ drop_data. next ] ,
1722+ ) ;
1723+ }
1724+ dropline_indices. push ( coroutine_drop) ;
1725+ }
1726+ }
1727+ }
1728+ }
16221729 blocks[ ROOT_NODE ] . map ( BasicBlock :: unit)
16231730 }
16241731
@@ -1663,9 +1770,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
16631770 // to be captured by the coroutine. I'm not sure how important this
16641771 // optimization is, but it is here.
16651772 for ( drop_idx, drop_node) in drops. drops . iter_enumerated ( ) {
1666- if let DropKind :: Value = drop_node. data . kind {
1773+ if let DropKind :: Value = drop_node. data . kind
1774+ && let Some ( bb) = blocks[ drop_idx]
1775+ {
16671776 debug_assert ! ( drop_node. next < drops. drops. next_index( ) ) ;
1668- drops. entry_points . push ( ( drop_node. next , blocks [ drop_idx ] . unwrap ( ) ) ) ;
1777+ drops. entry_points . push ( ( drop_node. next , bb ) ) ;
16691778 }
16701779 }
16711780 Self :: build_unwind_tree ( cfg, drops, fn_span, resume_block) ;
@@ -1717,6 +1826,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
17171826 let term = cfg. block_data_mut ( from) . terminator_mut ( ) ;
17181827 if let TerminatorKind :: Yield { ref mut drop, .. } = term. kind {
17191828 * drop = Some ( to) ;
1829+ } else if let TerminatorKind :: Drop { ref mut drop, .. } = term. kind {
1830+ * drop = Some ( to) ;
17201831 } else {
17211832 span_bug ! (
17221833 term. source_info. span,
0 commit comments