@@ -8,21 +8,15 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
88use rustc_middle:: ty:: GenericArgsRef ;
99use rustc_middle:: ty:: { Ty , TyCtxt } ;
1010use rustc_middle:: {
11- mir:: {
12- self ,
13- interpret:: {
14- Allocation , ConstAllocation , GlobalId , InterpResult , PointerArithmetic , Scalar ,
15- } ,
16- BinOp , ConstValue , NonDivergingIntrinsic ,
17- } ,
11+ mir:: { self , BinOp , ConstValue , NonDivergingIntrinsic } ,
1812 ty:: layout:: TyAndLayout ,
1913} ;
2014use rustc_span:: symbol:: { sym, Symbol } ;
2115use rustc_target:: abi:: Size ;
2216
2317use super :: {
24- util:: ensure_monomorphic_enough, CheckInAllocMsg , ImmTy , InterpCx , Machine , OpTy , PlaceTy ,
25- Pointer ,
18+ util:: ensure_monomorphic_enough, Allocation , CheckInAllocMsg , ConstAllocation , GlobalId , ImmTy ,
19+ InterpCx , InterpResult , Machine , OpTy , PlaceTy , Pointer , PointerArithmetic , Provenance , Scalar ,
2620} ;
2721
2822use crate :: fluent_generated as fluent;
@@ -236,106 +230,125 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
236230 }
237231 sym:: ptr_offset_from | sym:: ptr_offset_from_unsigned => {
238232 let a = self . read_pointer ( & args[ 0 ] ) ?;
233+ let a_parts = self . ptr_try_get_alloc_id ( a) ;
239234 let b = self . read_pointer ( & args[ 1 ] ) ?;
235+ let b_parts = self . ptr_try_get_alloc_id ( b) ;
236+
237+ // If the pointers' addresses are the same, then different rules apply.
238+ let same_addr = match ( a_parts, b_parts) {
239+ ( Err ( a_addr) , Err ( b_addr) ) => a_addr == b_addr,
240+ _ if M :: Provenance :: OFFSET_IS_ADDR => a. addr ( ) == b. addr ( ) ,
241+ _ => {
242+ // We can't determine whether they are the same, so we have to assume they
243+ // are different.
244+ false
245+ }
246+ } ;
240247
241- let usize_layout = self . layout_of ( self . tcx . types . usize ) ?;
242- let isize_layout = self . layout_of ( self . tcx . types . isize ) ?;
243-
244- // Get offsets for both that are at least relative to the same base.
245- let ( a_offset, b_offset) =
246- match ( self . ptr_try_get_alloc_id ( a) , self . ptr_try_get_alloc_id ( b) ) {
247- ( Err ( a) , Err ( b) ) => {
248- // Neither pointer points to an allocation.
249- // If these are inequal or null, this *will* fail the deref check below.
250- ( a, b)
251- }
252- ( Err ( _) , _) | ( _, Err ( _) ) => {
253- // We managed to find a valid allocation for one pointer, but not the other.
254- // That means they are definitely not pointing to the same allocation.
255- throw_ub_custom ! (
256- fluent:: const_eval_different_allocations,
257- name = intrinsic_name,
258- ) ;
259- }
260- ( Ok ( ( a_alloc_id, a_offset, _) ) , Ok ( ( b_alloc_id, b_offset, _) ) ) => {
261- // Found allocation for both. They must be into the same allocation.
262- if a_alloc_id != b_alloc_id {
248+ if same_addr {
249+ // Distance: 0. No further requirements.
250+ self . write_scalar ( Scalar :: from_target_usize ( 0 , self ) , dest) ?;
251+ } else {
252+ let usize_layout = self . layout_of ( self . tcx . types . usize ) ?;
253+ let isize_layout = self . layout_of ( self . tcx . types . isize ) ?;
254+
255+ // The addresses are not the same, so we have to do some extra checks and some maths.
256+ let ( a_offset, b_offset) =
257+ match ( self . ptr_try_get_alloc_id ( a) , self . ptr_try_get_alloc_id ( b) ) {
258+ ( Ok ( ( a_alloc_id, a_offset, _) ) , Ok ( ( b_alloc_id, b_offset, _) ) ) => {
259+ // Found allocation for both. They must be into the same allocation.
260+ if a_alloc_id != b_alloc_id {
261+ throw_ub_custom ! (
262+ fluent:: const_eval_offset_from_different_allocations,
263+ name = intrinsic_name,
264+ ) ;
265+ }
266+ // Use these offsets for distance calculation.
267+ ( a_offset. bytes ( ) , b_offset. bytes ( ) )
268+ }
269+ ( Err ( _) , Err ( _) ) => {
270+ throw_ub_custom ! (
271+ fluent:: const_eval_offset_from_different_integers,
272+ name = intrinsic_name,
273+ ) ;
274+ }
275+ _ => {
276+ // They don't both have an allocation, and they are also not the same.
277+ // This is never allowed.
278+ throw_ub_custom ! (
279+ fluent:: const_eval_offset_from_different_allocations,
280+ name = intrinsic_name,
281+ ) ;
282+ }
283+ } ;
284+
285+ // Compute distance.
286+ let dist = {
287+ // Addresses are unsigned, so this is a `usize` computation. We have to do the
288+ // overflow check separately anyway.
289+ let ( val, overflowed) = {
290+ let a_offset = ImmTy :: from_uint ( a_offset, usize_layout) ;
291+ let b_offset = ImmTy :: from_uint ( b_offset, usize_layout) ;
292+ self . overflowing_binary_op ( BinOp :: Sub , & a_offset, & b_offset) ?
293+ } ;
294+ if overflowed {
295+ // a < b
296+ if intrinsic_name == sym:: ptr_offset_from_unsigned {
297+ throw_ub_custom ! (
298+ fluent:: const_eval_offset_from_unsigned_overflow,
299+ a_offset = a_offset,
300+ b_offset = b_offset,
301+ ) ;
302+ }
303+ // The signed form of the intrinsic allows this. If we interpret the
304+ // difference as isize, we'll get the proper signed difference. If that
305+ // seems *positive*, they were more than isize::MAX apart.
306+ let dist = val. to_scalar ( ) . to_target_isize ( self ) ?;
307+ if dist >= 0 {
308+ throw_ub_custom ! (
309+ fluent:: const_eval_offset_from_underflow,
310+ name = intrinsic_name,
311+ ) ;
312+ }
313+ dist
314+ } else {
315+ // b >= a
316+ let dist = val. to_scalar ( ) . to_target_isize ( self ) ?;
317+ // If converting to isize produced a *negative* result, we had an overflow
318+ // because they were more than isize::MAX apart.
319+ if dist < 0 {
263320 throw_ub_custom ! (
264- fluent:: const_eval_different_allocations ,
321+ fluent:: const_eval_offset_from_overflow ,
265322 name = intrinsic_name,
266323 ) ;
267324 }
268- // Use these offsets for distance calculation.
269- ( a_offset. bytes ( ) , b_offset. bytes ( ) )
325+ dist
270326 }
271327 } ;
272328
273- // Compute distance.
274- let dist = {
275- // Addresses are unsigned, so this is a `usize` computation. We have to do the
276- // overflow check separately anyway.
277- let ( val, overflowed) = {
278- let a_offset = ImmTy :: from_uint ( a_offset, usize_layout) ;
279- let b_offset = ImmTy :: from_uint ( b_offset, usize_layout) ;
280- self . overflowing_binary_op ( BinOp :: Sub , & a_offset, & b_offset) ?
281- } ;
282- if overflowed {
283- // a < b
284- if intrinsic_name == sym:: ptr_offset_from_unsigned {
285- throw_ub_custom ! (
286- fluent:: const_eval_unsigned_offset_from_overflow,
287- a_offset = a_offset,
288- b_offset = b_offset,
289- ) ;
290- }
291- // The signed form of the intrinsic allows this. If we interpret the
292- // difference as isize, we'll get the proper signed difference. If that
293- // seems *positive*, they were more than isize::MAX apart.
294- let dist = val. to_scalar ( ) . to_target_isize ( self ) ?;
295- if dist >= 0 {
296- throw_ub_custom ! (
297- fluent:: const_eval_offset_from_underflow,
298- name = intrinsic_name,
299- ) ;
300- }
301- dist
329+ // Check that the range between them is dereferenceable ("in-bounds or one past the
330+ // end of the same allocation"). This is like the check in ptr_offset_inbounds.
331+ let min_ptr = if dist >= 0 { b } else { a } ;
332+ self . check_ptr_access (
333+ min_ptr,
334+ Size :: from_bytes ( dist. unsigned_abs ( ) ) ,
335+ CheckInAllocMsg :: OffsetFromTest ,
336+ ) ?;
337+
338+ // Perform division by size to compute return value.
339+ let ret_layout = if intrinsic_name == sym:: ptr_offset_from_unsigned {
340+ assert ! ( 0 <= dist && dist <= self . target_isize_max( ) ) ;
341+ usize_layout
302342 } else {
303- // b >= a
304- let dist = val. to_scalar ( ) . to_target_isize ( self ) ?;
305- // If converting to isize produced a *negative* result, we had an overflow
306- // because they were more than isize::MAX apart.
307- if dist < 0 {
308- throw_ub_custom ! (
309- fluent:: const_eval_offset_from_overflow,
310- name = intrinsic_name,
311- ) ;
312- }
313- dist
314- }
315- } ;
316-
317- // Check that the range between them is dereferenceable ("in-bounds or one past the
318- // end of the same allocation"). This is like the check in ptr_offset_inbounds.
319- let min_ptr = if dist >= 0 { b } else { a } ;
320- self . check_ptr_access (
321- min_ptr,
322- Size :: from_bytes ( dist. unsigned_abs ( ) ) ,
323- CheckInAllocMsg :: OffsetFromTest ,
324- ) ?;
325-
326- // Perform division by size to compute return value.
327- let ret_layout = if intrinsic_name == sym:: ptr_offset_from_unsigned {
328- assert ! ( 0 <= dist && dist <= self . target_isize_max( ) ) ;
329- usize_layout
330- } else {
331- assert ! ( self . target_isize_min( ) <= dist && dist <= self . target_isize_max( ) ) ;
332- isize_layout
333- } ;
334- let pointee_layout = self . layout_of ( instance_args. type_at ( 0 ) ) ?;
335- // If ret_layout is unsigned, we checked that so is the distance, so we are good.
336- let val = ImmTy :: from_int ( dist, ret_layout) ;
337- let size = ImmTy :: from_int ( pointee_layout. size . bytes ( ) , ret_layout) ;
338- self . exact_div ( & val, & size, dest) ?;
343+ assert ! ( self . target_isize_min( ) <= dist && dist <= self . target_isize_max( ) ) ;
344+ isize_layout
345+ } ;
346+ let pointee_layout = self . layout_of ( instance_args. type_at ( 0 ) ) ?;
347+ // If ret_layout is unsigned, we checked that so is the distance, so we are good.
348+ let val = ImmTy :: from_int ( dist, ret_layout) ;
349+ let size = ImmTy :: from_int ( pointee_layout. size . bytes ( ) , ret_layout) ;
350+ self . exact_div ( & val, & size, dest) ?;
351+ }
339352 }
340353
341354 sym:: assert_inhabited
0 commit comments