1212//! The global cache has to be completely unobservable, while the per-cycle cache may impact
1313//! behavior as long as the resulting behavior is still correct.
1414use std:: cmp:: Ordering ;
15- use std:: collections:: BTreeMap ;
1615use std:: collections:: hash_map:: Entry ;
16+ use std:: collections:: { BTreeMap , btree_map} ;
1717use std:: fmt:: Debug ;
1818use std:: hash:: Hash ;
19+ use std:: iter;
1920use std:: marker:: PhantomData ;
2021
2122use derive_where:: derive_where;
@@ -230,13 +231,19 @@ impl AvailableDepth {
230231 }
231232}
232233
234+ #[ derive( Clone , Copy , Debug ) ]
235+ struct CycleHead {
236+ paths_to_head : PathsToNested ,
237+ usage_kind : UsageKind ,
238+ }
239+
233240/// All cycle heads a given goal depends on, ordered by their stack depth.
234241///
235242/// We also track all paths from this goal to that head. This is necessary
236243/// when rebasing provisional cache results.
237244#[ derive( Clone , Debug , Default ) ]
238245struct CycleHeads {
239- heads : BTreeMap < StackDepth , PathsToNested > ,
246+ heads : BTreeMap < StackDepth , CycleHead > ,
240247}
241248
242249impl CycleHeads {
@@ -256,32 +263,32 @@ impl CycleHeads {
256263 self . heads . first_key_value ( ) . map ( |( k, _) | * k)
257264 }
258265
259- fn remove_highest_cycle_head ( & mut self ) -> PathsToNested {
266+ fn remove_highest_cycle_head ( & mut self ) -> CycleHead {
260267 let last = self . heads . pop_last ( ) ;
261268 last. unwrap ( ) . 1
262269 }
263270
264- fn insert ( & mut self , head : StackDepth , path_from_entry : impl Into < PathsToNested > + Copy ) {
265- * self . heads . entry ( head) . or_insert ( path_from_entry. into ( ) ) |= path_from_entry. into ( ) ;
271+ fn insert (
272+ & mut self ,
273+ head_index : StackDepth ,
274+ path_from_entry : impl Into < PathsToNested > + Copy ,
275+ usage_kind : UsageKind ,
276+ ) {
277+ match self . heads . entry ( head_index) {
278+ btree_map:: Entry :: Vacant ( entry) => {
279+ entry. insert ( CycleHead { paths_to_head : path_from_entry. into ( ) , usage_kind } ) ;
280+ }
281+ btree_map:: Entry :: Occupied ( entry) => {
282+ let head = entry. into_mut ( ) ;
283+ head. paths_to_head |= path_from_entry. into ( ) ;
284+ head. usage_kind = head. usage_kind . merge ( usage_kind) ;
285+ }
286+ }
266287 }
267288
268- fn iter ( & self ) -> impl Iterator < Item = ( StackDepth , PathsToNested ) > + ' _ {
289+ fn iter ( & self ) -> impl Iterator < Item = ( StackDepth , CycleHead ) > + ' _ {
269290 self . heads . iter ( ) . map ( |( k, v) | ( * k, * v) )
270291 }
271-
272- /// Update the cycle heads of a goal at depth `this` given the cycle heads
273- /// of a nested goal. This merges the heads after filtering the parent goal
274- /// itself.
275- fn extend_from_child ( & mut self , this : StackDepth , step_kind : PathKind , child : & CycleHeads ) {
276- for ( & head, & path_from_entry) in child. heads . iter ( ) {
277- match head. cmp ( & this) {
278- Ordering :: Less => { }
279- Ordering :: Equal => continue ,
280- Ordering :: Greater => unreachable ! ( ) ,
281- }
282- self . insert ( head, path_from_entry. extend_with ( step_kind) ) ;
283- }
284- }
285292}
286293
287294bitflags:: bitflags! {
@@ -487,9 +494,6 @@ impl<X: Cx> EvaluationResult<X> {
487494
488495pub struct SearchGraph < D : Delegate < Cx = X > , X : Cx = <D as Delegate >:: Cx > {
489496 root_depth : AvailableDepth ,
490- /// The stack of goals currently being computed.
491- ///
492- /// An element is *deeper* in the stack if its index is *lower*.
493497 stack : Stack < X > ,
494498 /// The provisional cache contains entries for already computed goals which
495499 /// still depend on goals higher-up in the stack. We don't move them to the
@@ -511,6 +515,7 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
511515/// cache entry.
512516enum UpdateParentGoalCtxt < ' a , X : Cx > {
513517 Ordinary ( & ' a NestedGoals < X > ) ,
518+ CycleOnStack ( X :: Input ) ,
514519 ProvisionalCacheHit ,
515520}
516521
@@ -532,21 +537,42 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
532537 stack : & mut Stack < X > ,
533538 step_kind_from_parent : PathKind ,
534539 required_depth_for_nested : usize ,
535- heads : & CycleHeads ,
540+ heads : impl Iterator < Item = ( StackDepth , CycleHead ) > ,
536541 encountered_overflow : bool ,
537542 context : UpdateParentGoalCtxt < ' _ , X > ,
538543 ) {
539- if let Some ( parent_index) = stack. last_index ( ) {
540- let parent = & mut stack[ parent_index] ;
544+ if let Some ( ( parent_index, parent) ) = stack. last_mut_with_index ( ) {
541545 parent. required_depth = parent. required_depth . max ( required_depth_for_nested + 1 ) ;
542546 parent. encountered_overflow |= encountered_overflow;
543547
544- parent. heads . extend_from_child ( parent_index, step_kind_from_parent, heads) ;
548+ for ( head_index, head) in heads {
549+ match head_index. cmp ( & parent_index) {
550+ Ordering :: Less => parent. heads . insert (
551+ head_index,
552+ head. paths_to_head . extend_with ( step_kind_from_parent) ,
553+ head. usage_kind ,
554+ ) ,
555+ Ordering :: Equal => {
556+ let usage_kind = parent
557+ . has_been_used
558+ . map_or ( head. usage_kind , |prev| prev. merge ( head. usage_kind ) ) ;
559+ parent. has_been_used = Some ( usage_kind) ;
560+ }
561+ Ordering :: Greater => unreachable ! ( ) ,
562+ }
563+ }
545564 let parent_depends_on_cycle = match context {
546565 UpdateParentGoalCtxt :: Ordinary ( nested_goals) => {
547566 parent. nested_goals . extend_from_child ( step_kind_from_parent, nested_goals) ;
548567 !nested_goals. is_empty ( )
549568 }
569+ UpdateParentGoalCtxt :: CycleOnStack ( head) => {
570+ // We lookup provisional cache entries before detecting cycles.
571+ // We therefore can't use a global cache entry if it contains a cycle
572+ // whose head is in the provisional cache.
573+ parent. nested_goals . insert ( head, step_kind_from_parent. into ( ) ) ;
574+ true
575+ }
550576 UpdateParentGoalCtxt :: ProvisionalCacheHit => true ,
551577 } ;
552578 // Once we've got goals which encountered overflow or a cycle,
@@ -674,7 +700,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
674700 & mut self . stack ,
675701 step_kind_from_parent,
676702 evaluation_result. required_depth ,
677- & evaluation_result. heads ,
703+ evaluation_result. heads . iter ( ) ,
678704 evaluation_result. encountered_overflow ,
679705 UpdateParentGoalCtxt :: Ordinary ( & evaluation_result. nested_goals ) ,
680706 ) ;
@@ -772,7 +798,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
772798 stack_entry : & StackEntry < X > ,
773799 mut mutate_result : impl FnMut ( X :: Input , X :: Result ) -> X :: Result ,
774800 ) {
775- let popped_head = self . stack . next_index ( ) ;
801+ let popped_head_index = self . stack . next_index ( ) ;
776802 #[ allow( rustc:: potential_query_instability) ]
777803 self . provisional_cache . retain ( |& input, entries| {
778804 entries. retain_mut ( |entry| {
@@ -782,7 +808,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
782808 path_from_head,
783809 result,
784810 } = entry;
785- let ep = if heads. highest_cycle_head ( ) == popped_head {
811+ let popped_head = if heads. highest_cycle_head ( ) == popped_head_index {
786812 heads. remove_highest_cycle_head ( )
787813 } else {
788814 return true ;
@@ -795,9 +821,14 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
795821 //
796822 // After rebasing the cycles `hph` will go through `e`. We need to make
797823 // sure that forall possible paths `hep`, `heph` is equal to `hph.`
798- for ( h, ph) in stack_entry. heads . iter ( ) {
799- let hp =
800- Self :: cycle_path_kind ( & self . stack , stack_entry. step_kind_from_parent , h) ;
824+ let ep = popped_head. paths_to_head ;
825+ for ( head_index, head) in stack_entry. heads . iter ( ) {
826+ let ph = head. paths_to_head ;
827+ let hp = Self :: cycle_path_kind (
828+ & self . stack ,
829+ stack_entry. step_kind_from_parent ,
830+ head_index,
831+ ) ;
801832
802833 // We first validate that all cycles while computing `p` would stay
803834 // the same if we were to recompute it as a nested goal of `e`.
@@ -817,7 +848,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
817848 // the heads of `e` to make sure that rebasing `e` again also considers
818849 // them.
819850 let eph = ep. extend_with_paths ( ph) ;
820- heads. insert ( h , eph) ;
851+ heads. insert ( head_index , eph, head . usage_kind ) ;
821852 }
822853
823854 let Some ( head) = heads. opt_highest_cycle_head ( ) else {
@@ -877,11 +908,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
877908 & mut self . stack ,
878909 step_kind_from_parent,
879910 0 ,
880- heads,
911+ heads. iter ( ) ,
881912 encountered_overflow,
882913 UpdateParentGoalCtxt :: ProvisionalCacheHit ,
883914 ) ;
884- debug_assert ! ( self . stack[ head] . has_been_used. is_some( ) ) ;
885915 debug ! ( ?head, ?path_from_head, "provisional cache hit" ) ;
886916 return Some ( result) ;
887917 }
@@ -993,12 +1023,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
9931023
9941024 // We don't move cycle participants to the global cache, so the
9951025 // cycle heads are always empty.
996- let heads = Default :: default ( ) ;
1026+ let heads = iter :: empty ( ) ;
9971027 Self :: update_parent_goal (
9981028 & mut self . stack ,
9991029 step_kind_from_parent,
10001030 required_depth,
1001- & heads,
1031+ heads,
10021032 encountered_overflow,
10031033 UpdateParentGoalCtxt :: Ordinary ( nested_goals) ,
10041034 ) ;
@@ -1014,34 +1044,31 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
10141044 input : X :: Input ,
10151045 step_kind_from_parent : PathKind ,
10161046 ) -> Option < X :: Result > {
1017- let head = self . stack . find ( input) ?;
1047+ let head_index = self . stack . find ( input) ?;
10181048 // We have a nested goal which directly relies on a goal deeper in the stack.
10191049 //
10201050 // We start by tagging all cycle participants, as that's necessary for caching.
10211051 //
10221052 // Finally we can return either the provisional response or the initial response
10231053 // in case we're in the first fixpoint iteration for this goal.
1024- let path_kind = Self :: cycle_path_kind ( & self . stack , step_kind_from_parent, head) ;
1025- debug ! ( ?path_kind, "encountered cycle with depth {head:?}" ) ;
1026- let usage_kind = UsageKind :: Single ( path_kind) ;
1027- self . stack [ head] . has_been_used =
1028- Some ( self . stack [ head] . has_been_used . map_or ( usage_kind, |prev| prev. merge ( usage_kind) ) ) ;
1029-
1030- // Subtle: when encountering a cyclic goal, we still first checked for overflow,
1031- // so we have to update the reached depth.
1032- let last_index = self . stack . last_index ( ) . unwrap ( ) ;
1033- let last = & mut self . stack [ last_index] ;
1034- last. required_depth = last. required_depth . max ( 1 ) ;
1035-
1036- last. nested_goals . insert ( input, step_kind_from_parent. into ( ) ) ;
1037- last. nested_goals . insert ( last. input , PathsToNested :: EMPTY ) ;
1038- if last_index != head {
1039- last. heads . insert ( head, step_kind_from_parent) ;
1040- }
1054+ let path_kind = Self :: cycle_path_kind ( & self . stack , step_kind_from_parent, head_index) ;
1055+ debug ! ( ?path_kind, "encountered cycle with depth {head_index:?}" ) ;
1056+ let head = CycleHead {
1057+ paths_to_head : step_kind_from_parent. into ( ) ,
1058+ usage_kind : UsageKind :: Single ( path_kind) ,
1059+ } ;
1060+ Self :: update_parent_goal (
1061+ & mut self . stack ,
1062+ step_kind_from_parent,
1063+ 0 ,
1064+ iter:: once ( ( head_index, head) ) ,
1065+ false ,
1066+ UpdateParentGoalCtxt :: CycleOnStack ( input) ,
1067+ ) ;
10411068
10421069 // Return the provisional result or, if we're in the first iteration,
10431070 // start with no constraints.
1044- if let Some ( result) = self . stack [ head ] . provisional_result {
1071+ if let Some ( result) = self . stack [ head_index ] . provisional_result {
10451072 Some ( result)
10461073 } else {
10471074 Some ( D :: initial_provisional_result ( cx, path_kind, input) )
0 commit comments