@@ -81,12 +81,19 @@ where
8181 /// the values inferred while solving the instantiated goal.
8282 /// - `external_constraints`: additional constraints which aren't expressible
8383 /// using simple unification of inference variables.
84+ ///
85+ /// This takes the `shallow_certainty` which represents whether we're confident
86+ /// that the final result of the current goal only depends on the nested goals.
87+ ///
88+ /// In case this is `Certainy::Maybe`, there may still be additional nested goals
89+ /// or inference constraints required for this candidate to be hold. The candidate
90+ /// always requires all already added constraints and nested goals.
8491 #[ instrument( level = "trace" , skip( self ) , ret) ]
8592 pub ( in crate :: solve) fn evaluate_added_goals_and_make_canonical_response (
8693 & mut self ,
87- certainty : Certainty ,
94+ shallow_certainty : Certainty ,
8895 ) -> QueryResult < I > {
89- self . inspect . make_canonical_response ( certainty ) ;
96+ self . inspect . make_canonical_response ( shallow_certainty ) ;
9097
9198 let goals_certainty = self . try_evaluate_added_goals ( ) ?;
9299 assert_eq ! (
@@ -103,26 +110,29 @@ where
103110 NoSolution
104111 } ) ?;
105112
106- // When normalizing, we've replaced the expected term with an unconstrained
107- // inference variable. This means that we dropped information which could
108- // have been important. We handle this by instead returning the nested goals
109- // to the caller, where they are then handled.
110- //
111- // As we return all ambiguous nested goals, we can ignore the certainty returned
112- // by `try_evaluate_added_goals()`.
113- let ( certainty, normalization_nested_goals) = match self . current_goal_kind {
114- CurrentGoalKind :: NormalizesTo => {
115- let goals = std:: mem:: take ( & mut self . nested_goals ) ;
116- if goals. is_empty ( ) {
117- assert ! ( matches!( goals_certainty, Certainty :: Yes ) ) ;
113+ let ( certainty, normalization_nested_goals) =
114+ match ( self . current_goal_kind , shallow_certainty) {
115+ // When normalizing, we've replaced the expected term with an unconstrained
116+ // inference variable. This means that we dropped information which could
117+ // have been important. We handle this by instead returning the nested goals
118+ // to the caller, where they are then handled. We only do so if we do not
119+ // need to recompute the `NormalizesTo` goal afterwards to avoid repeatedly
120+ // uplifting its nested goals. This is the case if the `shallow_certainty` is
121+ // `Certainty::Yes`.
122+ ( CurrentGoalKind :: NormalizesTo , Certainty :: Yes ) => {
123+ let goals = std:: mem:: take ( & mut self . nested_goals ) ;
124+ // As we return all ambiguous nested goals, we can ignore the certainty
125+ // returned by `self.try_evaluate_added_goals()`.
126+ if goals. is_empty ( ) {
127+ assert ! ( matches!( goals_certainty, Certainty :: Yes ) ) ;
128+ }
129+ ( Certainty :: Yes , NestedNormalizationGoals ( goals) )
118130 }
119- ( certainty, NestedNormalizationGoals ( goals) )
120- }
121- CurrentGoalKind :: Misc | CurrentGoalKind :: CoinductiveTrait => {
122- let certainty = certainty. unify_with ( goals_certainty) ;
123- ( certainty, NestedNormalizationGoals :: empty ( ) )
124- }
125- } ;
131+ _ => {
132+ let certainty = shallow_certainty. unify_with ( goals_certainty) ;
133+ ( certainty, NestedNormalizationGoals :: empty ( ) )
134+ }
135+ } ;
126136
127137 if let Certainty :: Maybe ( cause @ MaybeCause :: Overflow { .. } ) = certainty {
128138 // If we have overflow, it's probable that we're substituting a type
0 commit comments