From e8cb28b056cf8187168223e139fb339b67e0fa72 Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 14:21:52 +0200 Subject: [PATCH 1/7] do not use the global solver cache for proof trees doing so requires overwriting global cache entries and generally adds significant complexity to the solver. This is also only ever done for root goals, so it feels easier to wrap the `evaluate_canonical_goal` in an ordinary query if necessary. --- compiler/rustc_middle/src/arena.rs | 4 - compiler/rustc_middle/src/ty/context.rs | 9 -- .../src/solve/inspect/build.rs | 107 +++--------------- .../src/solve/search_graph.rs | 7 +- .../src/solve/inspect/analyse.rs | 10 +- compiler/rustc_type_ir/src/interner.rs | 12 -- .../src/search_graph/global_cache.rs | 37 ++---- .../rustc_type_ir/src/search_graph/mod.rs | 87 +++++++------- compiler/rustc_type_ir/src/solve/inspect.rs | 4 +- compiler/rustc_type_ir/src/solve/mod.rs | 9 -- 10 files changed, 74 insertions(+), 212 deletions(-) diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs index e3d7dff3c66bb..37c10b14054c5 100644 --- a/compiler/rustc_middle/src/arena.rs +++ b/compiler/rustc_middle/src/arena.rs @@ -61,10 +61,6 @@ macro_rules! arena_types { [] dtorck_constraint: rustc_middle::traits::query::DropckConstraint<'tcx>, [] candidate_step: rustc_middle::traits::query::CandidateStep<'tcx>, [] autoderef_bad_ty: rustc_middle::traits::query::MethodAutoderefBadTy<'tcx>, - [] canonical_goal_evaluation: - rustc_type_ir::solve::inspect::CanonicalGoalEvaluationStep< - rustc_middle::ty::TyCtxt<'tcx> - >, [] query_region_constraints: rustc_middle::infer::canonical::QueryRegionConstraints<'tcx>, [] type_op_subtype: rustc_middle::infer::canonical::Canonical<'tcx, diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index fd41668ae44c8..7509a7584f6db 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -112,8 +112,6 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.mk_predefined_opaques_in_body(data) } type DefiningOpaqueTypes = &'tcx ty::List; - type CanonicalGoalEvaluationStepRef = - &'tcx solve::inspect::CanonicalGoalEvaluationStep>; type CanonicalVars = CanonicalVarInfos<'tcx>; fn mk_canonical_var_infos(self, infos: &[ty::CanonicalVarInfo]) -> Self::CanonicalVars { self.mk_canonical_var_infos(infos) @@ -282,13 +280,6 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.debug_assert_args_compatible(def_id, args); } - fn intern_canonical_goal_evaluation_step( - self, - step: solve::inspect::CanonicalGoalEvaluationStep>, - ) -> &'tcx solve::inspect::CanonicalGoalEvaluationStep> { - self.arena.alloc(step) - } - fn mk_type_list_from_iter(self, args: I) -> T::Output where I: Iterator, diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs index 3e266ddac71fd..28953528f6654 100644 --- a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs +++ b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs @@ -5,10 +5,9 @@ //! see the comment on [ProofTreeBuilder]. use std::marker::PhantomData; -use std::mem; use rustc_type_ir::inherent::*; -use rustc_type_ir::{self as ty, search_graph, Interner}; +use rustc_type_ir::{self as ty, Interner}; use crate::delegate::SolverDelegate; use crate::solve::eval_ctxt::canonical; @@ -96,33 +95,11 @@ impl WipGoalEvaluation { } } -#[derive(derivative::Derivative)] -#[derivative(PartialEq(bound = ""), Eq(bound = ""))] -pub(in crate::solve) enum WipCanonicalGoalEvaluationKind { - Overflow, - CycleInStack, - ProvisionalCacheHit, - Interned { final_revision: I::CanonicalGoalEvaluationStepRef }, -} - -impl std::fmt::Debug for WipCanonicalGoalEvaluationKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Overflow => write!(f, "Overflow"), - Self::CycleInStack => write!(f, "CycleInStack"), - Self::ProvisionalCacheHit => write!(f, "ProvisionalCacheHit"), - Self::Interned { final_revision: _ } => { - f.debug_struct("Interned").finish_non_exhaustive() - } - } - } -} - #[derive(derivative::Derivative)] #[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))] struct WipCanonicalGoalEvaluation { goal: CanonicalInput, - kind: Option>, + encountered_overflow: bool, /// Only used for uncached goals. After we finished evaluating /// the goal, this is interned and moved into `kind`. final_revision: Option>, @@ -131,25 +108,17 @@ struct WipCanonicalGoalEvaluation { impl WipCanonicalGoalEvaluation { fn finalize(self) -> inspect::CanonicalGoalEvaluation { - // We've already interned the final revision in - // `fn finalize_canonical_goal_evaluation`. - assert!(self.final_revision.is_none()); - let kind = match self.kind.unwrap() { - WipCanonicalGoalEvaluationKind::Overflow => { + inspect::CanonicalGoalEvaluation { + goal: self.goal, + kind: if self.encountered_overflow { + assert!(self.final_revision.is_none()); inspect::CanonicalGoalEvaluationKind::Overflow - } - WipCanonicalGoalEvaluationKind::CycleInStack => { - inspect::CanonicalGoalEvaluationKind::CycleInStack - } - WipCanonicalGoalEvaluationKind::ProvisionalCacheHit => { - inspect::CanonicalGoalEvaluationKind::ProvisionalCacheHit - } - WipCanonicalGoalEvaluationKind::Interned { final_revision } => { + } else { + let final_revision = self.final_revision.unwrap().finalize(); inspect::CanonicalGoalEvaluationKind::Evaluation { final_revision } - } - }; - - inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() } + }, + result: self.result.unwrap(), + } } } @@ -315,7 +284,7 @@ impl, I: Interner> ProofTreeBuilder { ) -> ProofTreeBuilder { self.nested(|| WipCanonicalGoalEvaluation { goal, - kind: None, + encountered_overflow: false, final_revision: None, result: None, }) @@ -336,11 +305,11 @@ impl, I: Interner> ProofTreeBuilder { } } - pub fn canonical_goal_evaluation_kind(&mut self, kind: WipCanonicalGoalEvaluationKind) { + pub fn canonical_goal_evaluation_overflow(&mut self) { if let Some(this) = self.as_mut() { match this { DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => { - assert_eq!(canonical_goal_evaluation.kind.replace(kind), None); + canonical_goal_evaluation.encountered_overflow = true; } _ => unreachable!(), }; @@ -554,51 +523,3 @@ impl, I: Interner> ProofTreeBuilder { } } } - -impl search_graph::ProofTreeBuilder for ProofTreeBuilder -where - D: SolverDelegate, - I: Interner, -{ - fn try_apply_proof_tree( - &mut self, - proof_tree: Option, - ) -> bool { - if !self.is_noop() { - if let Some(final_revision) = proof_tree { - let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision }; - self.canonical_goal_evaluation_kind(kind); - true - } else { - false - } - } else { - true - } - } - - fn on_provisional_cache_hit(&mut self) { - self.canonical_goal_evaluation_kind(WipCanonicalGoalEvaluationKind::ProvisionalCacheHit); - } - - fn on_cycle_in_stack(&mut self) { - self.canonical_goal_evaluation_kind(WipCanonicalGoalEvaluationKind::CycleInStack); - } - - fn finalize_canonical_goal_evaluation( - &mut self, - tcx: I, - ) -> Option { - self.as_mut().map(|this| match this { - DebugSolver::CanonicalGoalEvaluation(evaluation) => { - let final_revision = mem::take(&mut evaluation.final_revision).unwrap(); - let final_revision = - tcx.intern_canonical_goal_evaluation_step(final_revision.finalize()); - let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision }; - assert_eq!(evaluation.kind.replace(kind), None); - final_revision - } - _ => unreachable!(), - }) - } -} diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index fe053a506e712..1f2c65191a618 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -5,7 +5,7 @@ use rustc_type_ir::search_graph::{self, CycleKind, UsageKind}; use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult}; use rustc_type_ir::Interner; -use super::inspect::{self, ProofTreeBuilder}; +use super::inspect::ProofTreeBuilder; use super::FIXPOINT_STEP_LIMIT; use crate::delegate::SolverDelegate; @@ -25,6 +25,9 @@ where const FIXPOINT_STEP_LIMIT: usize = FIXPOINT_STEP_LIMIT; type ProofTreeBuilder = ProofTreeBuilder; + fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool { + inspect.is_noop() + } fn recursion_limit(cx: I) -> usize { cx.recursion_limit() @@ -68,7 +71,7 @@ where inspect: &mut ProofTreeBuilder, input: CanonicalInput, ) -> QueryResult { - inspect.canonical_goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow); + inspect.canonical_goal_evaluation_overflow(); response_no_constraints(cx, input, Certainty::overflow(true)) } diff --git a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs index e8de8457440ff..4e4022830d46e 100644 --- a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs +++ b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs @@ -332,13 +332,9 @@ impl<'a, 'tcx> InspectGoal<'a, 'tcx> { pub fn candidates(&'a self) -> Vec> { let mut candidates = vec![]; - let last_eval_step = match self.evaluation_kind { - inspect::CanonicalGoalEvaluationKind::Overflow - | inspect::CanonicalGoalEvaluationKind::CycleInStack - | inspect::CanonicalGoalEvaluationKind::ProvisionalCacheHit => { - warn!("unexpected root evaluation: {:?}", self.evaluation_kind); - return vec![]; - } + let last_eval_step = match &self.evaluation_kind { + // An annoying edge case in case the recursion limit is 0. + inspect::CanonicalGoalEvaluationKind::Overflow => return vec![], inspect::CanonicalGoalEvaluationKind::Evaluation { final_revision } => final_revision, }; diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index 14ebbb12fe2f0..d8336e3cd6d64 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -11,7 +11,6 @@ use crate::ir_print::IrPrint; use crate::lang_items::TraitSolverLangItem; use crate::relate::Relate; use crate::search_graph; -use crate::solve::inspect::CanonicalGoalEvaluationStep; use crate::solve::{ CanonicalInput, ExternalConstraintsData, PredefinedOpaquesData, QueryResult, SolverMode, }; @@ -63,11 +62,6 @@ pub trait Interner: + Eq + TypeVisitable + SliceLike; - type CanonicalGoalEvaluationStepRef: Copy - + Debug - + Hash - + Eq - + Deref>; type CanonicalVars: Copy + Debug @@ -175,11 +169,6 @@ pub trait Interner: fn debug_assert_args_compatible(self, def_id: Self::DefId, args: Self::GenericArgs); - fn intern_canonical_goal_evaluation_step( - self, - step: CanonicalGoalEvaluationStep, - ) -> Self::CanonicalGoalEvaluationStepRef; - fn mk_type_list_from_iter(self, args: I) -> T::Output where I: Iterator, @@ -388,7 +377,6 @@ impl CollectAndApply for Result { } impl search_graph::Cx for I { - type ProofTree = Option; type Input = CanonicalInput; type Result = QueryResult; diff --git a/compiler/rustc_type_ir/src/search_graph/global_cache.rs b/compiler/rustc_type_ir/src/search_graph/global_cache.rs index 5ccda931f9c5f..796a0ab807646 100644 --- a/compiler/rustc_type_ir/src/search_graph/global_cache.rs +++ b/compiler/rustc_type_ir/src/search_graph/global_cache.rs @@ -3,15 +3,8 @@ use rustc_index::IndexVec; use super::{AvailableDepth, Cx, StackDepth, StackEntry}; use crate::data_structures::{HashMap, HashSet}; -#[derive(derivative::Derivative)] -#[derivative(Debug(bound = ""), Clone(bound = ""), Copy(bound = ""))] -struct QueryData { - result: X::Result, - proof_tree: X::ProofTree, -} - struct Success { - data: X::Tracked>, + result: X::Tracked, additional_depth: usize, } @@ -29,14 +22,13 @@ struct CacheEntry { /// See the doc comment of `StackEntry::cycle_participants` for more /// details. nested_goals: HashSet, - with_overflow: HashMap>>, + with_overflow: HashMap>, } #[derive(derivative::Derivative)] #[derivative(Debug(bound = ""))] pub(super) struct CacheData<'a, X: Cx> { pub(super) result: X::Result, - pub(super) proof_tree: X::ProofTree, pub(super) additional_depth: usize, pub(super) encountered_overflow: bool, // FIXME: This is currently unused, but impacts the design @@ -58,20 +50,19 @@ impl GlobalCache { input: X::Input, result: X::Result, - proof_tree: X::ProofTree, dep_node: X::DepNodeIndex, additional_depth: usize, encountered_overflow: bool, nested_goals: &HashSet, ) { - let data = cx.mk_tracked(QueryData { result, proof_tree }, dep_node); + let result = cx.mk_tracked(result, dep_node); let entry = self.map.entry(input).or_default(); entry.nested_goals.extend(nested_goals); if encountered_overflow { - entry.with_overflow.insert(additional_depth, data); + entry.with_overflow.insert(additional_depth, result); } else { - entry.success = Some(Success { data, additional_depth }); + entry.success = Some(Success { result, additional_depth }); } } @@ -93,10 +84,8 @@ impl GlobalCache { if let Some(ref success) = entry.success { if available_depth.cache_entry_is_applicable(success.additional_depth) { - let QueryData { result, proof_tree } = cx.get_tracked(&success.data); return Some(CacheData { - result, - proof_tree, + result: cx.get_tracked(&success.result), additional_depth: success.additional_depth, encountered_overflow: false, nested_goals: &entry.nested_goals, @@ -104,15 +93,11 @@ impl GlobalCache { } } - entry.with_overflow.get(&available_depth.0).map(|e| { - let QueryData { result, proof_tree } = cx.get_tracked(e); - CacheData { - result, - proof_tree, - additional_depth: available_depth.0, - encountered_overflow: true, - nested_goals: &entry.nested_goals, - } + entry.with_overflow.get(&available_depth.0).map(|e| CacheData { + result: cx.get_tracked(e), + additional_depth: available_depth.0, + encountered_overflow: true, + nested_goals: &entry.nested_goals, }) } } diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index c2204becdfd71..27b5d76e3be54 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -21,7 +21,6 @@ mod validate; /// about `Input` and `Result` as they are implementation details /// of the search graph. pub trait Cx: Copy { - type ProofTree: Debug + Copy; type Input: Debug + Eq + Hash + Copy; type Result: Debug + Eq + Hash + Copy; @@ -42,17 +41,12 @@ pub trait Cx: Copy { ) -> R; } -pub trait ProofTreeBuilder { - fn try_apply_proof_tree(&mut self, proof_tree: X::ProofTree) -> bool; - fn on_provisional_cache_hit(&mut self); - fn on_cycle_in_stack(&mut self); - fn finalize_canonical_goal_evaluation(&mut self, cx: X) -> X::ProofTree; -} - pub trait Delegate { type Cx: Cx; const FIXPOINT_STEP_LIMIT: usize; - type ProofTreeBuilder: ProofTreeBuilder; + + type ProofTreeBuilder; + fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool; fn recursion_limit(cx: Self::Cx) -> usize; @@ -363,8 +357,10 @@ impl, X: Cx> SearchGraph { return D::on_stack_overflow(cx, inspect, input); }; - if let Some(result) = self.lookup_global_cache(cx, input, available_depth, inspect) { - return result; + if D::inspect_is_noop(inspect) { + if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { + return result; + } } // Check whether the goal is in the provisional cache. @@ -387,7 +383,6 @@ impl, X: Cx> SearchGraph { // We have a nested goal which is already in the provisional cache, use // its result. We do not provide any usage kind as that should have been // already set correctly while computing the cache entry. - inspect.on_provisional_cache_hit(); Self::tag_cycle_participants(&mut self.stack, None, entry.head); return entry.result; } else if let Some(stack_depth) = cache_entry.stack_depth { @@ -398,8 +393,6 @@ impl, X: Cx> SearchGraph { // // Finally we can return either the provisional response or the initial response // in case we're in the first fixpoint iteration for this goal. - inspect.on_cycle_in_stack(); - let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth); let cycle_kind = if is_coinductive_cycle { CycleKind::Coinductive } else { CycleKind::Inductive }; @@ -454,8 +447,6 @@ impl, X: Cx> SearchGraph { (current_entry, result) }); - let proof_tree = inspect.finalize_canonical_goal_evaluation(cx); - self.update_parent_goal(final_entry.reached_depth, final_entry.encountered_overflow); // We're now done with this goal. In case this goal is involved in a larger cycle @@ -472,28 +463,10 @@ impl, X: Cx> SearchGraph { entry.with_inductive_stack = Some(DetachedEntry { head, result }); } } else { - // When encountering a cycle, both inductive and coinductive, we only - // move the root into the global cache. We also store all other cycle - // participants involved. - // - // We must not use the global cache entry of a root goal if a cycle - // participant is on the stack. This is necessary to prevent unstable - // results. See the comment of `StackEntry::nested_goals` for - // more details. self.provisional_cache.remove(&input); - let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); - cx.with_global_cache(self.mode, |cache| { - cache.insert( - cx, - input, - result, - proof_tree, - dep_node, - additional_depth, - final_entry.encountered_overflow, - &final_entry.nested_goals, - ) - }) + if D::inspect_is_noop(inspect) { + self.insert_global_cache(cx, input, final_entry, result, dep_node) + } } self.check_invariants(); @@ -509,25 +482,15 @@ impl, X: Cx> SearchGraph { cx: X, input: X::Input, available_depth: AvailableDepth, - inspect: &mut D::ProofTreeBuilder, ) -> Option { cx.with_global_cache(self.mode, |cache| { let CacheData { result, - proof_tree, additional_depth, encountered_overflow, nested_goals: _, // FIXME: consider nested goals here. } = cache.get(cx, input, &self.stack, available_depth)?; - // If we're building a proof tree and the current cache entry does not - // contain a proof tree, we do not use the entry but instead recompute - // the goal. We simply overwrite the existing entry once we're done, - // caching the proof tree. - if !inspect.try_apply_proof_tree(proof_tree) { - return None; - } - // Update the reached depth of the current goal to make sure // its state is the same regardless of whether we've used the // global cache or not. @@ -538,6 +501,36 @@ impl, X: Cx> SearchGraph { Some(result) }) } + + /// When encountering a cycle, both inductive and coinductive, we only + /// move the root into the global cache. We also store all other cycle + /// participants involved. + /// + /// We must not use the global cache entry of a root goal if a cycle + /// participant is on the stack. This is necessary to prevent unstable + /// results. See the comment of `StackEntry::nested_goals` for + /// more details. + fn insert_global_cache( + &mut self, + cx: X, + input: X::Input, + final_entry: StackEntry, + result: X::Result, + dep_node: X::DepNodeIndex, + ) { + let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); + cx.with_global_cache(self.mode, |cache| { + cache.insert( + cx, + input, + result, + dep_node, + additional_depth, + final_entry.encountered_overflow, + &final_entry.nested_goals, + ) + }) + } } enum StepResult { diff --git a/compiler/rustc_type_ir/src/solve/inspect.rs b/compiler/rustc_type_ir/src/solve/inspect.rs index 0733c730064b0..8c88b7cda6eb5 100644 --- a/compiler/rustc_type_ir/src/solve/inspect.rs +++ b/compiler/rustc_type_ir/src/solve/inspect.rs @@ -72,9 +72,7 @@ pub struct CanonicalGoalEvaluation { #[derivative(PartialEq(bound = ""), Eq(bound = ""), Hash(bound = ""), Debug(bound = ""))] pub enum CanonicalGoalEvaluationKind { Overflow, - CycleInStack, - ProvisionalCacheHit, - Evaluation { final_revision: I::CanonicalGoalEvaluationStepRef }, + Evaluation { final_revision: CanonicalGoalEvaluationStep }, } #[derive(derivative::Derivative)] diff --git a/compiler/rustc_type_ir/src/solve/mod.rs b/compiler/rustc_type_ir/src/solve/mod.rs index 2449ac47db6fa..f91ffd53c2b6e 100644 --- a/compiler/rustc_type_ir/src/solve/mod.rs +++ b/compiler/rustc_type_ir/src/solve/mod.rs @@ -385,12 +385,3 @@ impl MaybeCause { } } } - -#[derive(derivative::Derivative)] -#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))] -pub struct CacheData { - pub result: QueryResult, - pub proof_tree: Option, - pub additional_depth: usize, - pub encountered_overflow: bool, -} From 49922f7b7c041ea77894b946a61189a8cb445336 Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 13:12:23 +0200 Subject: [PATCH 2/7] expand fuzzing support this allows us to only sometimes disable the global cache. --- .../src/solve/search_graph.rs | 9 +++ .../rustc_type_ir/src/search_graph/mod.rs | 56 ++++++++++++++++--- 2 files changed, 56 insertions(+), 9 deletions(-) diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index 1f2c65191a618..0994d0e3b3d88 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -1,3 +1,4 @@ +use std::convert::Infallible; use std::marker::PhantomData; use rustc_type_ir::inherent::*; @@ -22,6 +23,14 @@ where { type Cx = D::Interner; + type ValidationScope = Infallible; + fn enter_validation_scope( + _cx: Self::Cx, + _input: ::Input, + ) -> Option { + None + } + const FIXPOINT_STEP_LIMIT: usize = FIXPOINT_STEP_LIMIT; type ProofTreeBuilder = ProofTreeBuilder; diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 27b5d76e3be54..7e176decf4bd7 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -43,6 +43,19 @@ pub trait Cx: Copy { pub trait Delegate { type Cx: Cx; + type ValidationScope; + /// Returning `Some` disables the global cache for the current goal. + /// + /// The `ValidationScope` is used when fuzzing the search graph to track + /// for which goals the global cache has been disabled. This is necessary + /// as we may otherwise ignore the global cache entry for some goal `G` + /// only to later use it, failing to detect a cycle goal and potentially + /// changing the result. + fn enter_validation_scope( + cx: Self::Cx, + input: ::Input, + ) -> Option; + const FIXPOINT_STEP_LIMIT: usize; type ProofTreeBuilder; @@ -357,11 +370,21 @@ impl, X: Cx> SearchGraph { return D::on_stack_overflow(cx, inspect, input); }; - if D::inspect_is_noop(inspect) { - if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { - return result; - } - } + let validate_cache = if !D::inspect_is_noop(inspect) { + None + } else if let Some(scope) = D::enter_validation_scope(cx, input) { + // When validating the global cache we need to track the goals for which the + // global cache has been disabled as it may otherwise change the result for + // cyclic goals. We don't care about goals which are not on the current stack + // so it's fine to drop their scope eagerly. + self.lookup_global_cache_untracked(cx, input, available_depth) + .inspect(|expected| debug!(?expected, "validate cache entry")) + .map(|r| (scope, r)) + } else if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { + return result; + } else { + None + }; // Check whether the goal is in the provisional cache. // The provisional result may rely on the path to its cycle roots, @@ -453,6 +476,7 @@ impl, X: Cx> SearchGraph { // do not remove it from the provisional cache and update its provisional result. // We only add the root of cycles to the global cache. if let Some(head) = final_entry.non_root_cycle_participant { + debug_assert!(validate_cache.is_none()); let coinductive_stack = Self::stack_coinductive_from(cx, &self.stack, head); let entry = self.provisional_cache.get_mut(&input).unwrap(); @@ -464,16 +488,29 @@ impl, X: Cx> SearchGraph { } } else { self.provisional_cache.remove(&input); - if D::inspect_is_noop(inspect) { + if let Some((_scope, expected)) = validate_cache { + // Do not try to move a goal into the cache again if we're testing + // the global cache. + assert_eq!(result, expected, "input={input:?}"); + } else if D::inspect_is_noop(inspect) { self.insert_global_cache(cx, input, final_entry, result, dep_node) } } - self.check_invariants(); - result } + fn lookup_global_cache_untracked( + &self, + cx: X, + input: X::Input, + available_depth: AvailableDepth, + ) -> Option { + cx.with_global_cache(self.mode, |cache| { + cache.get(cx, input, &self.stack, available_depth).map(|c| c.result) + }) + } + /// Try to fetch a previously computed result from the global cache, /// making sure to only do so if it would match the result of reevaluating /// this goal. @@ -497,7 +534,7 @@ impl, X: Cx> SearchGraph { let reached_depth = self.stack.next_index().plus(additional_depth); self.update_parent_goal(reached_depth, encountered_overflow); - debug!("global cache hit"); + debug!(?additional_depth, "global cache hit"); Some(result) }) } @@ -519,6 +556,7 @@ impl, X: Cx> SearchGraph { dep_node: X::DepNodeIndex, ) { let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); + debug!(?final_entry, ?result, "insert global cache"); cx.with_global_cache(self.mode, |cache| { cache.insert( cx, From 4955a35a0724a4161045126dfe590a5d0e0da052 Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 13:14:19 +0200 Subject: [PATCH 3/7] simplify match + move `debug!` call --- compiler/rustc_type_ir/src/search_graph/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 7e176decf4bd7..39f670faf4ae0 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -105,6 +105,7 @@ pub enum UsageKind { impl UsageKind { fn merge(self, other: Self) -> Self { match (self, other) { + (UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed, (UsageKind::Single(lhs), UsageKind::Single(rhs)) => { if lhs == rhs { UsageKind::Single(lhs) @@ -112,9 +113,6 @@ impl UsageKind { UsageKind::Mixed } } - (UsageKind::Mixed, UsageKind::Mixed) - | (UsageKind::Mixed, UsageKind::Single(_)) - | (UsageKind::Single(_), UsageKind::Mixed) => UsageKind::Mixed, } } } @@ -459,7 +457,7 @@ impl, X: Cx> SearchGraph { for _ in 0..D::FIXPOINT_STEP_LIMIT { match self.fixpoint_step_in_task(cx, input, inspect, &mut prove_goal) { StepResult::Done(final_entry, result) => return (final_entry, result), - StepResult::HasChanged => debug!("fixpoint changed provisional results"), + StepResult::HasChanged => {} } } @@ -624,6 +622,7 @@ impl, X: Cx> SearchGraph { if D::reached_fixpoint(cx, usage_kind, input, stack_entry.provisional_result, result) { StepResult::Done(stack_entry, result) } else { + debug!(?result, "fixpoint changed provisional results"); let depth = self.stack.push(StackEntry { has_been_used: None, provisional_result: Some(result), From 61851f99c489f4fda35a2170ebb03f9e7f7706f4 Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 14:59:01 +0200 Subject: [PATCH 4/7] tracing: debug to trace --- compiler/rustc_type_ir/src/binder.rs | 24 ++++++++---------------- compiler/rustc_type_ir/src/fold.rs | 11 +++++------ 2 files changed, 13 insertions(+), 22 deletions(-) diff --git a/compiler/rustc_type_ir/src/binder.rs b/compiler/rustc_type_ir/src/binder.rs index 17b35a2807a2d..a76c138393545 100644 --- a/compiler/rustc_type_ir/src/binder.rs +++ b/compiler/rustc_type_ir/src/binder.rs @@ -7,7 +7,7 @@ use std::ops::{ControlFlow, Deref}; use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable}; #[cfg(feature = "nightly")] use rustc_serialize::Decodable; -use tracing::debug; +use tracing::instrument; use crate::data_structures::SsoHashSet; use crate::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable}; @@ -836,28 +836,20 @@ impl<'a, I: Interner> ArgFolder<'a, I> { /// As indicated in the diagram, here the same type `&'a i32` is instantiated once, but in the /// first case we do not increase the De Bruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. + #[instrument(level = "trace", skip(self), fields(binders_passed = self.binders_passed), ret)] fn shift_vars_through_binders>(&self, val: T) -> T { - debug!( - "shift_vars(val={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})", - val, - self.binders_passed, - val.has_escaping_bound_vars() - ); - if self.binders_passed == 0 || !val.has_escaping_bound_vars() { - return val; + val + } else { + ty::fold::shift_vars(self.cx, val, self.binders_passed) } - - let result = ty::fold::shift_vars(TypeFolder::cx(self), val, self.binders_passed); - debug!("shift_vars: shifted result = {:?}", result); - - result } fn shift_region_through_binders(&self, region: I::Region) -> I::Region { if self.binders_passed == 0 || !region.has_escaping_bound_vars() { - return region; + region + } else { + ty::fold::shift_region(self.cx, region, self.binders_passed) } - ty::fold::shift_region(self.cx, region, self.binders_passed) } } diff --git a/compiler/rustc_type_ir/src/fold.rs b/compiler/rustc_type_ir/src/fold.rs index a4d8dafb246e5..d9aa678bb7471 100644 --- a/compiler/rustc_type_ir/src/fold.rs +++ b/compiler/rustc_type_ir/src/fold.rs @@ -47,7 +47,7 @@ use rustc_index::{Idx, IndexVec}; use std::mem; -use tracing::debug; +use tracing::instrument; use crate::data_structures::Lrc; use crate::inherent::*; @@ -414,15 +414,14 @@ pub fn shift_region(cx: I, region: I::Region, amount: u32) -> I::Re } } +#[instrument(level = "trace", skip(cx), ret)] pub fn shift_vars(cx: I, value: T, amount: u32) -> T where T: TypeFoldable, { - debug!("shift_vars(value={:?}, amount={})", value, amount); - if amount == 0 || !value.has_escaping_bound_vars() { - return value; + value + } else { + value.fold_with(&mut Shifter::new(cx, amount)) } - - value.fold_with(&mut Shifter::new(cx, amount)) } From a7ce1e1a23792ae0ab1eb2ffb9e102377eea73cf Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 23:03:34 +0200 Subject: [PATCH 5/7] move behavior out of shared fn --- .../rustc_type_ir/src/search_graph/mod.rs | 25 +++++++------------ 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 39f670faf4ae0..3dd1cc301029c 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -301,17 +301,7 @@ impl, X: Cx> SearchGraph { // We update both the head of this cycle to rerun its evaluation until // we reach a fixpoint and all other cycle participants to make sure that // their result does not get moved to the global cache. - fn tag_cycle_participants( - stack: &mut IndexVec>, - usage_kind: Option, - head: StackDepth, - ) { - if let Some(usage_kind) = usage_kind { - stack[head].has_been_used = - Some(stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); - } - debug_assert!(stack[head].has_been_used.is_some()); - + fn tag_cycle_participants(stack: &mut IndexVec>, head: StackDepth) { // The current root of these cycles. Note that this may not be the final // root in case a later goal depends on a goal higher up the stack. let mut current_root = head; @@ -404,7 +394,8 @@ impl, X: Cx> SearchGraph { // We have a nested goal which is already in the provisional cache, use // its result. We do not provide any usage kind as that should have been // already set correctly while computing the cache entry. - Self::tag_cycle_participants(&mut self.stack, None, entry.head); + debug_assert!(self.stack[entry.head].has_been_used.is_some()); + Self::tag_cycle_participants(&mut self.stack, entry.head); return entry.result; } else if let Some(stack_depth) = cache_entry.stack_depth { debug!("encountered cycle with depth {stack_depth:?}"); @@ -417,11 +408,13 @@ impl, X: Cx> SearchGraph { let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth); let cycle_kind = if is_coinductive_cycle { CycleKind::Coinductive } else { CycleKind::Inductive }; - Self::tag_cycle_participants( - &mut self.stack, - Some(UsageKind::Single(cycle_kind)), - stack_depth, + let usage_kind = UsageKind::Single(cycle_kind); + self.stack[stack_depth].has_been_used = Some( + self.stack[stack_depth] + .has_been_used + .map_or(usage_kind, |prev| prev.merge(usage_kind)), ); + Self::tag_cycle_participants(&mut self.stack, stack_depth); // Return the provisional result or, if we're in the first iteration, // start with no constraints. From 9fbcf25fd0488e89348803c41a5b65afd342ff72 Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 23:08:14 +0200 Subject: [PATCH 6/7] merge impl blocks --- compiler/rustc_type_ir/src/search_graph/mod.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 3dd1cc301029c..a34ca0204050f 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -117,6 +117,11 @@ impl UsageKind { } } +enum StepResult { + Done(StackEntry, X::Result), + HasChanged, +} + #[derive(Debug, Clone, Copy)] struct AvailableDepth(usize); impl AvailableDepth { @@ -560,14 +565,7 @@ impl, X: Cx> SearchGraph { ) }) } -} -enum StepResult { - Done(StackEntry, X::Result), - HasChanged, -} - -impl, X: Cx> SearchGraph { /// When we encounter a coinductive cycle, we have to fetch the /// result of that cycle while we are still computing it. Because /// of this we continuously recompute the cycle until the result From 4d641096866e81060f59d9eeccadaca85b39c82c Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 23 Jul 2024 23:19:29 +0200 Subject: [PATCH 7/7] split provisional cache and stack lookup this makes it easier to maintain and modify going forward. There may be a small performance cost as we now need to access the provisional cache *and* walk through the stack to detect cycles. However, the provisional cache should be mostly empty and the stack should only have a few elements so the performance impact is likely minimal. Given the complexity of the search graph maintainability trumps linear performance improvements. --- .../rustc_type_ir/src/search_graph/mod.rs | 204 +++++++++--------- .../src/search_graph/validate.rs | 18 +- 2 files changed, 104 insertions(+), 118 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index a34ca0204050f..171a50bb3b1fe 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -224,8 +224,8 @@ struct DetachedEntry { result: X::Result, } -/// Stores the stack depth of a currently evaluated goal *and* already -/// computed results for goals which depend on other goals still on the stack. +/// Stores the provisional result of already computed results for goals which +/// depend on other goals still on the stack. /// /// The provisional result may depend on whether the stack above it is inductive /// or coinductive. Because of this, we store separate provisional results for @@ -239,16 +239,13 @@ struct DetachedEntry { #[derive(derivative::Derivative)] #[derivative(Default(bound = ""))] struct ProvisionalCacheEntry { - stack_depth: Option, with_inductive_stack: Option>, with_coinductive_stack: Option>, } impl ProvisionalCacheEntry { fn is_empty(&self) -> bool { - self.stack_depth.is_none() - && self.with_inductive_stack.is_none() - && self.with_coinductive_stack.is_none() + self.with_inductive_stack.is_none() && self.with_coinductive_stack.is_none() } } @@ -379,71 +376,26 @@ impl, X: Cx> SearchGraph { None }; - // Check whether the goal is in the provisional cache. - // The provisional result may rely on the path to its cycle roots, - // so we have to check the path of the current goal matches that of - // the cache entry. - let cache_entry = self.provisional_cache.entry(input).or_default(); - if let Some(entry) = cache_entry - .with_coinductive_stack - .as_ref() - .filter(|p| Self::stack_coinductive_from(cx, &self.stack, p.head)) - .or_else(|| { - cache_entry - .with_inductive_stack - .as_ref() - .filter(|p| !Self::stack_coinductive_from(cx, &self.stack, p.head)) - }) - { - debug!("provisional cache hit"); - // We have a nested goal which is already in the provisional cache, use - // its result. We do not provide any usage kind as that should have been - // already set correctly while computing the cache entry. - debug_assert!(self.stack[entry.head].has_been_used.is_some()); - Self::tag_cycle_participants(&mut self.stack, entry.head); - return entry.result; - } else if let Some(stack_depth) = cache_entry.stack_depth { - debug!("encountered cycle with depth {stack_depth:?}"); - // We have a nested goal which directly relies on a goal deeper in the stack. - // - // We start by tagging all cycle participants, as that's necessary for caching. - // - // Finally we can return either the provisional response or the initial response - // in case we're in the first fixpoint iteration for this goal. - let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth); - let cycle_kind = - if is_coinductive_cycle { CycleKind::Coinductive } else { CycleKind::Inductive }; - let usage_kind = UsageKind::Single(cycle_kind); - self.stack[stack_depth].has_been_used = Some( - self.stack[stack_depth] - .has_been_used - .map_or(usage_kind, |prev| prev.merge(usage_kind)), - ); - Self::tag_cycle_participants(&mut self.stack, stack_depth); - - // Return the provisional result or, if we're in the first iteration, - // start with no constraints. - return if let Some(result) = self.stack[stack_depth].provisional_result { - result - } else { - D::initial_provisional_result(cx, cycle_kind, input) - }; - } else { - // No entry, we push this goal on the stack and try to prove it. - let depth = self.stack.next_index(); - let entry = StackEntry { - input, - available_depth, - reached_depth: depth, - non_root_cycle_participant: None, - encountered_overflow: false, - has_been_used: None, - nested_goals: Default::default(), - provisional_result: None, - }; - assert_eq!(self.stack.push(entry), depth); - cache_entry.stack_depth = Some(depth); + if let Some(result) = self.lookup_provisional_cache(cx, input) { + return result; + } + + if let Some(result) = self.check_cycle_on_stack(cx, input) { + return result; + } + + let depth = self.stack.next_index(); + let entry = StackEntry { + input, + available_depth, + reached_depth: depth, + non_root_cycle_participant: None, + encountered_overflow: false, + has_been_used: None, + nested_goals: Default::default(), + provisional_result: None, }; + assert_eq!(self.stack.push(entry), depth); // This is for global caching, so we properly track query dependencies. // Everything that affects the `result` should be performed within this @@ -475,8 +427,7 @@ impl, X: Cx> SearchGraph { debug_assert!(validate_cache.is_none()); let coinductive_stack = Self::stack_coinductive_from(cx, &self.stack, head); - let entry = self.provisional_cache.get_mut(&input).unwrap(); - entry.stack_depth = None; + let entry = self.provisional_cache.entry(input).or_default(); if coinductive_stack { entry.with_coinductive_stack = Some(DetachedEntry { head, result }); } else { @@ -535,35 +486,52 @@ impl, X: Cx> SearchGraph { }) } - /// When encountering a cycle, both inductive and coinductive, we only - /// move the root into the global cache. We also store all other cycle - /// participants involved. - /// - /// We must not use the global cache entry of a root goal if a cycle - /// participant is on the stack. This is necessary to prevent unstable - /// results. See the comment of `StackEntry::nested_goals` for - /// more details. - fn insert_global_cache( - &mut self, - cx: X, - input: X::Input, - final_entry: StackEntry, - result: X::Result, - dep_node: X::DepNodeIndex, - ) { - let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); - debug!(?final_entry, ?result, "insert global cache"); - cx.with_global_cache(self.mode, |cache| { - cache.insert( - cx, - input, - result, - dep_node, - additional_depth, - final_entry.encountered_overflow, - &final_entry.nested_goals, - ) - }) + fn lookup_provisional_cache(&mut self, cx: X, input: X::Input) -> Option { + let cache_entry = self.provisional_cache.get(&input)?; + let &DetachedEntry { head, result } = cache_entry + .with_coinductive_stack + .as_ref() + .filter(|p| Self::stack_coinductive_from(cx, &self.stack, p.head)) + .or_else(|| { + cache_entry + .with_inductive_stack + .as_ref() + .filter(|p| !Self::stack_coinductive_from(cx, &self.stack, p.head)) + })?; + + debug!("provisional cache hit"); + // We have a nested goal which is already in the provisional cache, use + // its result. We do not provide any usage kind as that should have been + // already set correctly while computing the cache entry. + Self::tag_cycle_participants(&mut self.stack, head); + debug_assert!(self.stack[head].has_been_used.is_some()); + Some(result) + } + + fn check_cycle_on_stack(&mut self, cx: X, input: X::Input) -> Option { + let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?; + debug!("encountered cycle with depth {head:?}"); + // We have a nested goal which directly relies on a goal deeper in the stack. + // + // We start by tagging all cycle participants, as that's necessary for caching. + // + // Finally we can return either the provisional response or the initial response + // in case we're in the first fixpoint iteration for this goal. + let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, head); + let cycle_kind = + if is_coinductive_cycle { CycleKind::Coinductive } else { CycleKind::Inductive }; + let usage_kind = UsageKind::Single(cycle_kind); + self.stack[head].has_been_used = + Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); + Self::tag_cycle_participants(&mut self.stack, head); + + // Return the provisional result or, if we're in the first iteration, + // start with no constraints. + if let Some(result) = self.stack[head].provisional_result { + Some(result) + } else { + Some(D::initial_provisional_result(cx, cycle_kind, input)) + } } /// When we encounter a coinductive cycle, we have to fetch the @@ -614,13 +582,43 @@ impl, X: Cx> SearchGraph { StepResult::Done(stack_entry, result) } else { debug!(?result, "fixpoint changed provisional results"); - let depth = self.stack.push(StackEntry { + self.stack.push(StackEntry { has_been_used: None, provisional_result: Some(result), ..stack_entry }); - debug_assert_eq!(self.provisional_cache[&input].stack_depth, Some(depth)); StepResult::HasChanged } } + + /// When encountering a cycle, both inductive and coinductive, we only + /// move the root into the global cache. We also store all other cycle + /// participants involved. + /// + /// We must not use the global cache entry of a root goal if a cycle + /// participant is on the stack. This is necessary to prevent unstable + /// results. See the comment of `StackEntry::nested_goals` for + /// more details. + fn insert_global_cache( + &mut self, + cx: X, + input: X::Input, + final_entry: StackEntry, + result: X::Result, + dep_node: X::DepNodeIndex, + ) { + let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len(); + debug!(?final_entry, ?result, "insert global cache"); + cx.with_global_cache(self.mode, |cache| { + cache.insert( + cx, + input, + result, + dep_node, + additional_depth, + final_entry.encountered_overflow, + &final_entry.nested_goals, + ) + }) + } } diff --git a/compiler/rustc_type_ir/src/search_graph/validate.rs b/compiler/rustc_type_ir/src/search_graph/validate.rs index 1ae806834ba7d..b4802811b0f57 100644 --- a/compiler/rustc_type_ir/src/search_graph/validate.rs +++ b/compiler/rustc_type_ir/src/search_graph/validate.rs @@ -23,8 +23,6 @@ impl, X: Cx> SearchGraph { ref nested_goals, provisional_result, } = *entry; - let cache_entry = provisional_cache.get(&entry.input).unwrap(); - assert_eq!(cache_entry.stack_depth, Some(depth)); if let Some(head) = non_root_cycle_participant { assert!(head < depth); assert!(nested_goals.is_empty()); @@ -45,19 +43,9 @@ impl, X: Cx> SearchGraph { } } - for (&input, entry) in &self.provisional_cache { - let ProvisionalCacheEntry { stack_depth, with_coinductive_stack, with_inductive_stack } = - entry; - assert!( - stack_depth.is_some() - || with_coinductive_stack.is_some() - || with_inductive_stack.is_some() - ); - - if let &Some(stack_depth) = stack_depth { - assert_eq!(stack[stack_depth].input, input); - } - + for (&_input, entry) in &self.provisional_cache { + let ProvisionalCacheEntry { with_coinductive_stack, with_inductive_stack } = entry; + assert!(with_coinductive_stack.is_some() || with_inductive_stack.is_some()); let check_detached = |detached_entry: &DetachedEntry| { let DetachedEntry { head, result: _ } = *detached_entry; assert_ne!(stack[head].has_been_used, None);