1212//! The global cache has to be completely unobservable, while the per-cycle cache may impact
1313//! behavior as long as the resulting behavior is still correct.
1414use std:: cmp:: Ordering ;
15- use std:: collections:: BTreeMap ;
1615use std:: collections:: hash_map:: Entry ;
16+ use std:: collections:: { BTreeMap , btree_map} ;
1717use std:: fmt:: Debug ;
1818use std:: hash:: Hash ;
19+ use std:: iter;
1920use std:: marker:: PhantomData ;
2021
2122use derive_where:: derive_where;
@@ -230,13 +231,19 @@ impl AvailableDepth {
230231 }
231232}
232233
234+ #[ derive( Clone , Copy , Debug ) ]
235+ struct CycleHead {
236+ paths_to_head : PathsToNested ,
237+ usage_kind : UsageKind ,
238+ }
239+
233240/// All cycle heads a given goal depends on, ordered by their stack depth.
234241///
235242/// We also track all paths from this goal to that head. This is necessary
236243/// when rebasing provisional cache results.
237244#[ derive( Clone , Debug , Default ) ]
238245struct CycleHeads {
239- heads : BTreeMap < StackDepth , PathsToNested > ,
246+ heads : BTreeMap < StackDepth , CycleHead > ,
240247}
241248
242249impl CycleHeads {
@@ -256,32 +263,32 @@ impl CycleHeads {
256263 self . heads . first_key_value ( ) . map ( |( k, _) | * k)
257264 }
258265
259- fn remove_highest_cycle_head ( & mut self ) -> PathsToNested {
266+ fn remove_highest_cycle_head ( & mut self ) -> CycleHead {
260267 let last = self . heads . pop_last ( ) ;
261268 last. unwrap ( ) . 1
262269 }
263270
264- fn insert ( & mut self , head : StackDepth , path_from_entry : impl Into < PathsToNested > + Copy ) {
265- * self . heads . entry ( head) . or_insert ( path_from_entry. into ( ) ) |= path_from_entry. into ( ) ;
271+ fn insert (
272+ & mut self ,
273+ head_index : StackDepth ,
274+ path_from_entry : impl Into < PathsToNested > + Copy ,
275+ usage_kind : UsageKind ,
276+ ) {
277+ match self . heads . entry ( head_index) {
278+ btree_map:: Entry :: Vacant ( entry) => {
279+ entry. insert ( CycleHead { paths_to_head : path_from_entry. into ( ) , usage_kind } ) ;
280+ }
281+ btree_map:: Entry :: Occupied ( entry) => {
282+ let head = entry. into_mut ( ) ;
283+ head. paths_to_head |= path_from_entry. into ( ) ;
284+ head. usage_kind = head. usage_kind . merge ( usage_kind) ;
285+ }
286+ }
266287 }
267288
268- fn iter ( & self ) -> impl Iterator < Item = ( StackDepth , PathsToNested ) > + ' _ {
289+ fn iter ( & self ) -> impl Iterator < Item = ( StackDepth , CycleHead ) > + ' _ {
269290 self . heads . iter ( ) . map ( |( k, v) | ( * k, * v) )
270291 }
271-
272- /// Update the cycle heads of a goal at depth `this` given the cycle heads
273- /// of a nested goal. This merges the heads after filtering the parent goal
274- /// itself.
275- fn extend_from_child ( & mut self , this : StackDepth , step_kind : PathKind , child : & CycleHeads ) {
276- for ( & head, & path_from_entry) in child. heads . iter ( ) {
277- match head. cmp ( & this) {
278- Ordering :: Less => { }
279- Ordering :: Equal => continue ,
280- Ordering :: Greater => unreachable ! ( ) ,
281- }
282- self . insert ( head, path_from_entry. extend_with ( step_kind) ) ;
283- }
284- }
285292}
286293
287294bitflags:: bitflags! {
@@ -487,9 +494,6 @@ impl<X: Cx> EvaluationResult<X> {
487494
488495pub struct SearchGraph < D : Delegate < Cx = X > , X : Cx = <D as Delegate >:: Cx > {
489496 root_depth : AvailableDepth ,
490- /// The stack of goals currently being computed.
491- ///
492- /// An element is *deeper* in the stack if its index is *lower*.
493497 stack : Stack < X > ,
494498 /// The provisional cache contains entries for already computed goals which
495499 /// still depend on goals higher-up in the stack. We don't move them to the
@@ -511,6 +515,7 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
511515/// cache entry.
512516enum UpdateParentGoalCtxt < ' a , X : Cx > {
513517 Ordinary ( & ' a NestedGoals < X > ) ,
518+ CycleOnStack ( X :: Input ) ,
514519 ProvisionalCacheHit ,
515520}
516521
@@ -532,21 +537,42 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
532537 stack : & mut Stack < X > ,
533538 step_kind_from_parent : PathKind ,
534539 required_depth_for_nested : usize ,
535- heads : & CycleHeads ,
540+ heads : impl Iterator < Item = ( StackDepth , CycleHead ) > ,
536541 encountered_overflow : bool ,
537542 context : UpdateParentGoalCtxt < ' _ , X > ,
538543 ) {
539- if let Some ( parent_index) = stack. last_index ( ) {
540- let parent = & mut stack[ parent_index] ;
544+ if let Some ( ( parent_index, parent) ) = stack. last_mut_with_index ( ) {
541545 parent. required_depth = parent. required_depth . max ( required_depth_for_nested + 1 ) ;
542546 parent. encountered_overflow |= encountered_overflow;
543547
544- parent. heads . extend_from_child ( parent_index, step_kind_from_parent, heads) ;
548+ for ( head_index, head) in heads {
549+ match head_index. cmp ( & parent_index) {
550+ Ordering :: Less => parent. heads . insert (
551+ head_index,
552+ head. paths_to_head . extend_with ( step_kind_from_parent) ,
553+ head. usage_kind ,
554+ ) ,
555+ Ordering :: Equal => {
556+ let usage_kind = parent
557+ . has_been_used
558+ . map_or ( head. usage_kind , |prev| prev. merge ( head. usage_kind ) ) ;
559+ parent. has_been_used = Some ( usage_kind) ;
560+ }
561+ Ordering :: Greater => unreachable ! ( ) ,
562+ }
563+ }
545564 let parent_depends_on_cycle = match context {
546565 UpdateParentGoalCtxt :: Ordinary ( nested_goals) => {
547566 parent. nested_goals . extend_from_child ( step_kind_from_parent, nested_goals) ;
548567 !nested_goals. is_empty ( )
549568 }
569+ UpdateParentGoalCtxt :: CycleOnStack ( head) => {
570+ // We lookup provisional cache entries before detecting cycles.
571+ // We therefore can't use a global cache entry if it contains a cycle
572+ // whose head is in the provisional cache.
573+ parent. nested_goals . insert ( head, step_kind_from_parent. into ( ) ) ;
574+ true
575+ }
550576 UpdateParentGoalCtxt :: ProvisionalCacheHit => true ,
551577 } ;
552578 // Once we've got goals which encountered overflow or a cycle,
@@ -674,7 +700,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
674700 & mut self . stack ,
675701 step_kind_from_parent,
676702 evaluation_result. required_depth ,
677- & evaluation_result. heads ,
703+ evaluation_result. heads . iter ( ) ,
678704 evaluation_result. encountered_overflow ,
679705 UpdateParentGoalCtxt :: Ordinary ( & evaluation_result. nested_goals ) ,
680706 ) ;
@@ -772,7 +798,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
772798 stack_entry : & StackEntry < X > ,
773799 mut mutate_result : impl FnMut ( X :: Input , X :: Result ) -> X :: Result ,
774800 ) {
775- let popped_head = self . stack . next_index ( ) ;
801+ let popped_head_index = self . stack . next_index ( ) ;
776802 #[ allow( rustc:: potential_query_instability) ]
777803 self . provisional_cache . retain ( |& input, entries| {
778804 entries. retain_mut ( |entry| {
@@ -782,7 +808,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
782808 path_from_head,
783809 result,
784810 } = entry;
785- let ep = if heads. highest_cycle_head ( ) == popped_head {
811+ let popped_head = if heads. highest_cycle_head ( ) == popped_head_index {
786812 heads. remove_highest_cycle_head ( )
787813 } else {
788814 return true ;
@@ -796,9 +822,14 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
796822 // After rebasing the cycles `hph` will go through `e`. We need
797823 // to make sure that forall possible paths `hep` and `heph`
798824 // is equal to `hph.`
799- for ( h, ph) in stack_entry. heads . iter ( ) {
800- let hp =
801- Self :: cycle_path_kind ( & self . stack , stack_entry. step_kind_from_parent , h) ;
825+ let ep = popped_head. paths_to_head ;
826+ for ( head_index, head) in stack_entry. heads . iter ( ) {
827+ let ph = head. paths_to_head ;
828+ let hp = Self :: cycle_path_kind (
829+ & self . stack ,
830+ stack_entry. step_kind_from_parent ,
831+ head_index,
832+ ) ;
802833
803834 // We first validate that all cycles while computing `p` would stay
804835 // the same if we were to recompute it as a nested goal of `e`.
@@ -818,7 +849,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
818849 // the heads of `e` to make sure that rebasing `e` again also considers
819850 // them.
820851 let eph = ep. extend_with_paths ( ph) ;
821- heads. insert ( h , eph) ;
852+ heads. insert ( head_index , eph, head . usage_kind ) ;
822853 }
823854
824855 let Some ( head) = heads. opt_highest_cycle_head ( ) else {
@@ -878,11 +909,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
878909 & mut self . stack ,
879910 step_kind_from_parent,
880911 0 ,
881- heads,
912+ heads. iter ( ) ,
882913 encountered_overflow,
883914 UpdateParentGoalCtxt :: ProvisionalCacheHit ,
884915 ) ;
885- debug_assert ! ( self . stack[ head] . has_been_used. is_some( ) ) ;
886916 debug ! ( ?head, ?path_from_head, "provisional cache hit" ) ;
887917 return Some ( result) ;
888918 }
@@ -994,12 +1024,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
9941024
9951025 // We don't move cycle participants to the global cache, so the
9961026 // cycle heads are always empty.
997- let heads = Default :: default ( ) ;
1027+ let heads = iter :: empty ( ) ;
9981028 Self :: update_parent_goal (
9991029 & mut self . stack ,
10001030 step_kind_from_parent,
10011031 required_depth,
1002- & heads,
1032+ heads,
10031033 encountered_overflow,
10041034 UpdateParentGoalCtxt :: Ordinary ( nested_goals) ,
10051035 ) ;
@@ -1015,34 +1045,31 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10151045 input : X :: Input ,
10161046 step_kind_from_parent : PathKind ,
10171047 ) -> Option < X :: Result > {
1018- let head = self . stack . find ( input) ?;
1048+ let head_index = self . stack . find ( input) ?;
10191049 // We have a nested goal which directly relies on a goal deeper in the stack.
10201050 //
10211051 // We start by tagging all cycle participants, as that's necessary for caching.
10221052 //
10231053 // Finally we can return either the provisional response or the initial response
10241054 // in case we're in the first fixpoint iteration for this goal.
1025- let path_kind = Self :: cycle_path_kind ( & self . stack , step_kind_from_parent, head) ;
1026- debug ! ( ?path_kind, "encountered cycle with depth {head:?}" ) ;
1027- let usage_kind = UsageKind :: Single ( path_kind) ;
1028- self . stack [ head] . has_been_used =
1029- Some ( self . stack [ head] . has_been_used . map_or ( usage_kind, |prev| prev. merge ( usage_kind) ) ) ;
1030-
1031- // Subtle: when encountering a cyclic goal, we still first checked for overflow,
1032- // so we have to update the reached depth.
1033- let last_index = self . stack . last_index ( ) . unwrap ( ) ;
1034- let last = & mut self . stack [ last_index] ;
1035- last. required_depth = last. required_depth . max ( 1 ) ;
1036-
1037- last. nested_goals . insert ( input, step_kind_from_parent. into ( ) ) ;
1038- last. nested_goals . insert ( last. input , PathsToNested :: EMPTY ) ;
1039- if last_index != head {
1040- last. heads . insert ( head, step_kind_from_parent) ;
1041- }
1055+ let path_kind = Self :: cycle_path_kind ( & self . stack , step_kind_from_parent, head_index) ;
1056+ debug ! ( ?path_kind, "encountered cycle with depth {head_index:?}" ) ;
1057+ let head = CycleHead {
1058+ paths_to_head : step_kind_from_parent. into ( ) ,
1059+ usage_kind : UsageKind :: Single ( path_kind) ,
1060+ } ;
1061+ Self :: update_parent_goal (
1062+ & mut self . stack ,
1063+ step_kind_from_parent,
1064+ 0 ,
1065+ iter:: once ( ( head_index, head) ) ,
1066+ false ,
1067+ UpdateParentGoalCtxt :: CycleOnStack ( input) ,
1068+ ) ;
10421069
10431070 // Return the provisional result or, if we're in the first iteration,
10441071 // start with no constraints.
1045- if let Some ( result) = self . stack [ head ] . provisional_result {
1072+ if let Some ( result) = self . stack [ head_index ] . provisional_result {
10461073 Some ( result)
10471074 } else {
10481075 Some ( D :: initial_provisional_result ( cx, path_kind, input) )
0 commit comments