diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 3310fe4f43f83..a7654f434c647 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -354,8 +354,12 @@ impl CodegenBackend for LlvmCodegenBackend { } fn replaced_intrinsics(&self) -> Vec { - let mut will_not_use_fallback = - vec![sym::unchecked_funnel_shl, sym::unchecked_funnel_shr, sym::carrying_mul_add]; + let mut will_not_use_fallback = vec![ + sym::unchecked_funnel_shl, + sym::unchecked_funnel_shr, + sym::carrying_mul_add, + sym::layout_of_val, + ]; if llvm_util::get_version() >= (22, 0, 0) { will_not_use_fallback.push(sym::carryless_mul); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index f5ee9406f4bf1..ebf516c242c8a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,4 +1,4 @@ -use rustc_abi::WrappingRange; +use rustc_abi::{Align, FIRST_VARIANT, FieldIdx, WrappingRange}; use rustc_middle::mir::SourceInfo; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; @@ -7,7 +7,7 @@ use rustc_span::sym; use rustc_target::spec::Arch; use super::FunctionCx; -use super::operand::OperandRef; +use super::operand::{OperandRef, OperandRefBuilder}; use super::place::PlaceRef; use crate::common::{AtomicRmwBinOp, SynchronizationScope}; use crate::errors::InvalidMonomorphization; @@ -149,17 +149,39 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::va_start => bx.va_start(args[0].immediate()), sym::va_end => bx.va_end(args[0].immediate()), - sym::size_of_val => { + sym::size_of_val | sym::align_of_val | sym::layout_of_val => { let tp_ty = fn_args.type_at(0); let (_, meta) = args[0].val.pointer_parts(); - let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); - llsize - } - sym::align_of_val => { - let tp_ty = fn_args.type_at(0); - let (_, meta) = args[0].val.pointer_parts(); - let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); - llalign + let (llsize, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); + match name { + sym::size_of_val => llsize, + sym::align_of_val => llalign, + sym::layout_of_val => { + // The builder insulates us from in-memory order, but double-check declared order + debug_assert!({ + let layout_adt = result.layout.ty.ty_adt_def().unwrap(); + let layout_fields = layout_adt.variant(FIRST_VARIANT).fields.as_slice(); + if let [size, align] = &layout_fields.raw + && size.name == sym::size + && align.name == sym::align + { + true + } else { + false + } + }); + + let mut builder = OperandRefBuilder::<'_, Bx::Value>::new(result.layout); + builder.insert_imm(FieldIdx::from_u32(0), llsize); + builder.insert_imm(FieldIdx::from_u32(1), llalign); + let val = builder.build(bx.cx()).val; + // the match can only return a single `Bx::Value`, + // so we need to do the store and return. + val.store(bx, result); + return Ok(()); + } + _ => bug!(), + } } sym::vtable_size | sym::vtable_align => { let vtable = args[0].immediate(); @@ -179,9 +201,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128; bx.range_metadata(value, WrappingRange { start: 0, end: size_bound }); } - // Alignment is always nonzero. + // Alignment is always a power of two, thus 1..=0x800…000, + // but also bounded by the maximum we support in type layout. sym::vtable_align => { - bx.range_metadata(value, WrappingRange { start: 1, end: !0 }) + let align_bound = u128::min( + bx.data_layout().ptr_sized_integer().signed_min() as u128, + Align::MAX.bytes().into(), + ); + bx.range_metadata(value, WrappingRange { start: 1, end: align_bound }) } _ => {} } diff --git a/compiler/rustc_codegen_ssa/src/size_of_val.rs b/compiler/rustc_codegen_ssa/src/size_of_val.rs index e1bd8014d7a2f..b09bdb63814d4 100644 --- a/compiler/rustc_codegen_ssa/src/size_of_val.rs +++ b/compiler/rustc_codegen_ssa/src/size_of_val.rs @@ -1,6 +1,6 @@ //! Computing the size and alignment of a value. -use rustc_abi::WrappingRange; +use rustc_abi::{Align, WrappingRange}; use rustc_hir::LangItem; use rustc_middle::bug; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; @@ -36,8 +36,10 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Size is always <= isize::MAX. let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128; bx.range_metadata(size, WrappingRange { start: 0, end: size_bound }); - // Alignment is always nonzero. - bx.range_metadata(align, WrappingRange { start: 1, end: !0 }); + // Alignment is always a power of two, thus 1..=0x800…000, + // but also bounded by the maximum we support in type layout. + let align_bound = u128::min(size_bound + 1, Align::MAX.bytes().into()); + bx.range_metadata(align, WrappingRange { start: 1, end: align_bound }); (size, align) } @@ -157,7 +159,12 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Furthermore, `align >= unsized_align`, and therefore we only need to do: // let full_size = (unsized_offset_unadjusted + unsized_size).align_to(full_align); - let full_size = bx.add(unsized_offset_unadjusted, unsized_size); + // This is the size *before* rounding up, which cannot exceed the size *after* + // rounding up, which itself cannot exceed `isize::MAX`. Thus the addition + // itself cannot overflow `isize::MAX`, let alone `usize::MAX`. + // (The range attribute from loading the size from the vtable is enough to prove + // `nuw`, but not `nsw`, which we only know from Rust's layout rules.) + let full_size = bx.unchecked_suadd(unsized_offset_unadjusted, unsized_size); // Issue #27023: must add any necessary padding to `size` // (to make it a multiple of `align`) before returning it. diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 5669b6793add7..6a4bf3a32eed8 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -296,6 +296,9 @@ pub(crate) fn check_intrinsic_type( sym::size_of_val | sym::align_of_val => { (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.types.usize) } + sym::layout_of_val => { + (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.ty_alloc_layout(span)) + } sym::offset_of => (1, 0, vec![tcx.types.u32, tcx.types.u32], tcx.types.usize), sym::rustc_peek => (1, 0, vec![param(0)], param(0)), sym::caller_location => (0, 0, vec![], tcx.caller_location_ty()), diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 430890d5a42d8..4429403737073 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -1079,6 +1079,12 @@ impl<'tcx> TyCtxt<'tcx> { self.type_of(ordering_enum).no_bound_vars().unwrap() } + /// Gets a `Ty` representing the [`LangItem::AllocLayout`] + pub fn ty_alloc_layout(self, span: Span) -> Ty<'tcx> { + let layout_did = self.require_lang_item(hir::LangItem::AllocLayout, span); + self.type_of(layout_did).no_bound_vars().unwrap() + } + /// Obtain the given diagnostic item's `DefId`. Use `is_diagnostic_item` if you just want to /// compare against another `DefId`, since `is_diagnostic_item` is cheaper. pub fn get_diagnostic_item(self, name: Symbol) -> Option { diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index e6ad39928b304..d601a5752dd67 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1347,6 +1347,7 @@ symbols! { large_assignments, last, lateout, + layout_of_val, lazy_normalization_consts, lazy_type_alias, le, diff --git a/hello_git.txt b/hello_git.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index 4bffdd17696fb..b50d6051ff44b 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -5,7 +5,7 @@ // Your performance intuition is useless. Run perf. use crate::error::Error; -use crate::intrinsics::{unchecked_add, unchecked_mul, unchecked_sub}; +use crate::intrinsics::{self, unchecked_add, unchecked_mul, unchecked_sub}; use crate::mem::SizedTypeProperties; use crate::ptr::{Alignment, NonNull}; use crate::{assert_unsafe_precondition, fmt, mem}; @@ -26,6 +26,9 @@ use crate::{assert_unsafe_precondition, fmt, mem}; /// requirements, or use the more lenient `Allocator` interface.) #[stable(feature = "alloc_layout", since = "1.28.0")] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +// BEWARE! The implemention of the `layout_of_val` intrinsic is coupled to the +// declared order of these fields. As a reminder, you'll also get a (debug-only) +// ICE if you change their names, though you can easily update that expectation. #[lang = "alloc_layout"] pub struct Layout { // size of the requested block of memory, measured in bytes. @@ -210,14 +213,23 @@ impl Layout { /// Produces layout describing a record that could be used to /// allocate backing structure for `T` (which could be a trait /// or other unsized type like a slice). + /// + /// # Examples + /// + /// ``` + /// use std::alloc::Layout; + /// + /// let array = [1_u8, 2, 3]; + /// assert_eq!(Layout::for_value::<[u8]>(&array), Layout::from_size_align(3, 1).unwrap()); + /// ``` #[stable(feature = "alloc_layout", since = "1.28.0")] #[rustc_const_stable(feature = "const_alloc_layout", since = "1.85.0")] #[must_use] #[inline] pub const fn for_value(t: &T) -> Self { - let (size, alignment) = (size_of_val(t), Alignment::of_val(t)); - // SAFETY: see rationale in `new` for why this is using the unsafe variant - unsafe { Layout::from_size_alignment_unchecked(size, alignment) } + // SAFETY: val is a reference, so if it's to a DST it has valid metadata. + // (And if `T` is sized there's no requirements on the pointer.) + unsafe { Layout::for_value_raw(t) } } /// Produces layout describing a record that could be used to @@ -247,14 +259,36 @@ impl Layout { /// /// [trait object]: ../../book/ch17-02-trait-objects.html /// [extern type]: ../../unstable-book/language-features/extern-types.html + /// + /// # Examples + /// + /// ``` + /// #![feature(layout_for_ptr)] + /// + /// use std::alloc::Layout; + /// use std::ptr; + /// + /// let arbitrary = ptr::without_provenance::<[u16; 3]>(123456); + /// assert_eq!( + /// // SAFETY: for a sized pointee, the function is always sound. + /// unsafe { Layout::for_value_raw(arbitrary) }, + /// Layout::from_size_align(6, 2).unwrap(), + /// ); + /// + /// let slice = ptr::slice_from_raw_parts(arbitrary, 789); + /// assert_eq!( + /// // SAFETY: with a slice pointee, this is sound because the length + /// // is short enough that size in bytes doesn't overflow isize::MAX. + /// unsafe { Layout::for_value_raw(slice) }, + /// Layout::from_size_align(6 * 789, 2).unwrap(), + /// ); + /// ``` #[unstable(feature = "layout_for_ptr", issue = "69835")] #[must_use] #[inline] - pub const unsafe fn for_value_raw(t: *const T) -> Self { + pub const unsafe fn for_value_raw(ptr: *const T) -> Self { // SAFETY: we pass along the prerequisites of these functions to the caller - let (size, alignment) = unsafe { (mem::size_of_val_raw(t), Alignment::of_val_raw(t)) }; - // SAFETY: see rationale in `new` for why this is using the unsafe variant - unsafe { Layout::from_size_alignment_unchecked(size, alignment) } + unsafe { intrinsics::layout_of_val(ptr) } } /// Creates a `NonNull` that is dangling, but well-aligned for this Layout. diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 9d5f49c88295a..d1be1d9e55f9a 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -53,6 +53,7 @@ issue = "none" )] +use crate::alloc::Layout; use crate::ffi::va_list::{VaArgSafe, VaList}; use crate::marker::{ConstParamTy, DiscriminantKind, PointeeSized, Tuple}; use crate::{mem, ptr}; @@ -2864,6 +2865,26 @@ pub const unsafe fn size_of_val(ptr: *const T) -> usize; #[rustc_intrinsic_const_stable_indirect] pub const unsafe fn align_of_val(ptr: *const T) -> usize; +/// The size and alignment of the referenced value in bytes. +/// +/// The stabilized version of this intrinsic is [`Layout::for_value_raw`]. +/// +/// # Safety +/// +/// See [`Layout::for_value_raw`] for safety conditions. +#[rustc_nounwind] +#[unstable(feature = "core_intrinsics", issue = "none")] +#[rustc_intrinsic] +// This adds no semantics or UB atop just calling `size_of_val`+`align_of_val`. +#[miri::intrinsic_fallback_is_spec] +pub const unsafe fn layout_of_val(ptr: *const T) -> Layout { + // SAFETY: we pass along the prerequisites of these functions to the caller + let (size, align) = unsafe { (size_of_val(ptr), align_of_val(ptr)) }; + // SAFETY: The size and alignment of a valid allocation (or type) + // always meet the requirements of `Layout`. + unsafe { Layout::from_size_align_unchecked(size, align) } +} + /// Compute the type information of a concrete type. /// It can only be called at compile time, the backends do /// not implement it. diff --git a/tests/codegen-llvm/dst-vtable-align-nonzero.rs b/tests/codegen-llvm/dst-vtable-align-nonzero.rs index 1404bd64f500c..2eee91876683c 100644 --- a/tests/codegen-llvm/dst-vtable-align-nonzero.rs +++ b/tests/codegen-llvm/dst-vtable-align-nonzero.rs @@ -64,4 +64,4 @@ pub unsafe fn align_load_from_vtable_align_intrinsic(x: &dyn Trait) -> usize { core::intrinsics::vtable_align(vtable) } -// CHECK: [[RANGE_META]] = !{[[USIZE]] 1, [[USIZE]] 0} +// CHECK: [[RANGE_META]] = !{[[USIZE]] 1, [[USIZE]] [[#0x20000001]] diff --git a/tests/codegen-llvm/intrinsics/layout_of_val.rs b/tests/codegen-llvm/intrinsics/layout_of_val.rs new file mode 100644 index 0000000000000..a03af9c355669 --- /dev/null +++ b/tests/codegen-llvm/intrinsics/layout_of_val.rs @@ -0,0 +1,77 @@ +//@ compile-flags: -Copt-level=3 -C no-prepopulate-passes -Z inline-mir +//@ only-64bit (so I don't need to worry about usize) +//@ needs-deterministic-layouts + +// Note that the layout algorithm currently puts the align before the size, +// because the *type* for the size doesn't have a niche. This test may need +// to be updated if the in-memory field order of `Layout` ever changes. + +#![crate_type = "lib"] +#![feature(core_intrinsics)] + +use std::alloc::Layout; +use std::intrinsics::layout_of_val; + +// CHECK-LABEL: @thin_metadata( +#[no_mangle] +pub unsafe fn thin_metadata(ptr: *const [u32; 2]) -> Layout { + // CHECK: [[LAYOUT:%.+]] = alloca [16 x i8], align 8 + // CHECK-NOT: load + // CHECK-NOT: store + // CHECK: store i64 4, ptr [[LAYOUT]], align 8 + // CHECK-NEXT: [[SIZEP:%.+]] = getelementptr inbounds i8, ptr [[LAYOUT]], i64 8 + // CHECK-NEXT: store i64 8, ptr [[SIZEP]], align 8 + // CHECK-NOT: store + layout_of_val(ptr) +} + +// CHECK-LABEL: @slice_metadata(ptr noundef %ptr.0, i64 noundef %ptr.1) +#[no_mangle] +pub unsafe fn slice_metadata(ptr: *const [u32]) -> Layout { + // CHECK: [[LAYOUT:%.+]] = alloca [16 x i8], align 8 + // CHECK-NOT: load + // CHECK-NOT: store + // CHECK: [[BYTES:%.+]] = mul nuw nsw i64 %ptr.1, 4 + // CHECK-NEXT: store i64 4, ptr [[LAYOUT]], align 8 + // CHECK-NEXT: [[SIZEP:%.+]] = getelementptr inbounds i8, ptr [[LAYOUT]], i64 8 + // CHECK-NEXT: store i64 [[BYTES]], ptr [[SIZEP]], align 8 + // CHECK-NOT: store + layout_of_val(ptr) +} + +pub struct WithTail([u32; 3], T); + +// CHECK-LABEL: @dst_metadata +// CHECK-SAME: (ptr noundef %ptr.0, ptr{{.+}}%ptr.1) +#[no_mangle] +pub unsafe fn dst_metadata(ptr: *const WithTail) -> Layout { + // CHECK: [[LAYOUT:%.+]] = alloca [16 x i8], align 8 + // CHECK-NOT: load + // CHECK-NOT: store + // CHECK: [[DST_SIZEP:%.+]] = getelementptr inbounds i8, ptr %ptr.1, i64 8 + // CHECK-NEXT: [[DST_SIZE:%.+]] = load i64, ptr [[DST_SIZEP]], align 8, + // CHECK-SAME: !range [[SIZE_RANGE:.+]], !invariant.load + // CHECK-NEXT: [[DST_ALIGNP:%.+]] = getelementptr inbounds i8, ptr %ptr.1, i64 16 + // CHECK-NEXT: [[DST_ALIGN:%.+]] = load i64, ptr [[DST_ALIGNP]], align 8, + // CHECK-SAME: !range [[ALIGN_RANGE:!.+]], !invariant.load + + // CHECK-NEXT: [[STRUCT_MORE:%.+]] = icmp ugt i64 4, [[DST_ALIGN]] + // CHECK-NEXT: [[ALIGN:%.+]] = select i1 [[STRUCT_MORE]], i64 4, i64 [[DST_ALIGN]] + + // CHECK-NEXT: [[MINSIZE:%.+]] = add nuw nsw i64 12, [[DST_SIZE]] + // CHECK-NEXT: [[ALIGN_M1:%.+]] = sub i64 [[ALIGN]], 1 + // CHECK-NEXT: [[MAXSIZE:%.+]] = add i64 [[MINSIZE]], [[ALIGN_M1]] + // CHECK-NEXT: [[ALIGN_NEG:%.+]] = sub i64 0, [[ALIGN]] + // CHECK-NEXT: [[SIZE:%.+]] = and i64 [[MAXSIZE]], [[ALIGN_NEG]] + + // CHECK-NEXT: store i64 [[ALIGN]], ptr [[LAYOUT]], align 8 + // CHECK-NEXT: [[LAYOUT_SIZEP:%.+]] = getelementptr inbounds i8, ptr [[LAYOUT]], i64 8 + // CHECK-NEXT: store i64 [[SIZE]], ptr [[LAYOUT_SIZEP]], align 8 + // CHECK-NOT: store + layout_of_val(ptr) +} + +// CHECK-LABEL: declare + +// CHECK: [[SIZE_RANGE]] = !{i64 0, i64 -[[#0x8000000000000000]] +// CHECK: [[ALIGN_RANGE]] = !{i64 1, i64 [[#0x20000001]] diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-abort.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-abort.mir index f8e575f490b0c..5e606817826fe 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-abort.mir @@ -8,33 +8,36 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { let _2: std::ptr::NonNull<[T]>; let mut _3: *mut [T]; let mut _4: *const [T]; + let mut _6: usize; let _9: (); scope 3 { + let _5: std::alloc::Layout; scope 4 { - scope 17 (inlined Layout::size) { + scope 8 (inlined Layout::size) { } - scope 18 (inlined std::ptr::Unique::<[T]>::cast::) { - scope 19 (inlined NonNull::<[T]>::cast::) { - scope 20 (inlined NonNull::<[T]>::as_ptr) { + scope 9 (inlined std::ptr::Unique::<[T]>::cast::) { + scope 10 (inlined NonNull::<[T]>::cast::) { + scope 11 (inlined NonNull::<[T]>::as_ptr) { } } } - scope 21 (inlined as From>>::from) { - scope 22 (inlined std::ptr::Unique::::as_non_null_ptr) { + scope 12 (inlined as From>>::from) { + scope 13 (inlined std::ptr::Unique::::as_non_null_ptr) { } } - scope 23 (inlined ::deallocate) { - scope 24 (inlined std::alloc::Global::deallocate_impl) { - scope 25 (inlined std::alloc::Global::deallocate_impl_runtime) { - let mut _8: *mut u8; - scope 26 (inlined Layout::size) { + scope 14 (inlined ::deallocate) { + scope 15 (inlined std::alloc::Global::deallocate_impl) { + scope 16 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _7: *mut u8; + scope 17 (inlined Layout::size) { } - scope 27 (inlined NonNull::::as_ptr) { + scope 18 (inlined NonNull::::as_ptr) { } - scope 28 (inlined std::alloc::dealloc) { - scope 29 (inlined Layout::size) { + scope 19 (inlined std::alloc::dealloc) { + let mut _8: std::ptr::Alignment; + scope 20 (inlined Layout::size) { } - scope 30 (inlined Layout::alignment) { + scope 21 (inlined Layout::alignment) { } } } @@ -46,63 +49,44 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 7 (inlined Layout::for_value_raw::<[T]>) { - let mut _5: usize; - let mut _7: std::ptr::Alignment; - scope 8 { - scope 16 (inlined #[track_caller] Layout::from_size_alignment_unchecked) { - } - } - scope 9 (inlined size_of_val_raw::<[T]>) { - } - scope 10 (inlined std::ptr::Alignment::of_val_raw::<[T]>) { - let _6: usize; - scope 11 { - scope 13 (inlined #[track_caller] std::ptr::Alignment::new_unchecked) { - scope 14 (inlined core::ub_checks::check_language_ub) { - scope 15 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - } - scope 12 (inlined align_of_val_raw::<[T]>) { - } - } } } } } bb0: { + StorageLive(_5); StorageLive(_2); _2 = copy (((*_1).0: std::ptr::Unique<[T]>).0: std::ptr::NonNull<[T]>); StorageLive(_4); _3 = copy _2 as *mut [T] (Transmute); _4 = copy _2 as *const [T] (Transmute); - _5 = std::intrinsics::size_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; + _5 = layout_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; } bb1: { - StorageLive(_6); - _6 = const ::ALIGN; - _7 = copy _6 as std::ptr::Alignment (Transmute); - StorageDead(_6); StorageDead(_4); - switchInt(copy _5) -> [0: bb4, otherwise: bb2]; + _6 = copy (_5.0: usize); + switchInt(copy _6) -> [0: bb4, otherwise: bb2]; } bb2: { + StorageLive(_7); + _7 = copy _3 as *mut u8 (PtrToPtr); StorageLive(_8); - _8 = copy _3 as *mut u8 (PtrToPtr); - _9 = alloc::alloc::__rust_dealloc(move _8, move _5, move _7) -> [return: bb3, unwind unreachable]; + _8 = copy (_5.1: std::ptr::Alignment); + _9 = alloc::alloc::__rust_dealloc(move _7, move _6, move _8) -> [return: bb3, unwind unreachable]; } bb3: { StorageDead(_8); + StorageDead(_7); goto -> bb4; } bb4: { StorageDead(_2); + StorageDead(_5); return; } } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir index f8e575f490b0c..5e606817826fe 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir @@ -8,33 +8,36 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { let _2: std::ptr::NonNull<[T]>; let mut _3: *mut [T]; let mut _4: *const [T]; + let mut _6: usize; let _9: (); scope 3 { + let _5: std::alloc::Layout; scope 4 { - scope 17 (inlined Layout::size) { + scope 8 (inlined Layout::size) { } - scope 18 (inlined std::ptr::Unique::<[T]>::cast::) { - scope 19 (inlined NonNull::<[T]>::cast::) { - scope 20 (inlined NonNull::<[T]>::as_ptr) { + scope 9 (inlined std::ptr::Unique::<[T]>::cast::) { + scope 10 (inlined NonNull::<[T]>::cast::) { + scope 11 (inlined NonNull::<[T]>::as_ptr) { } } } - scope 21 (inlined as From>>::from) { - scope 22 (inlined std::ptr::Unique::::as_non_null_ptr) { + scope 12 (inlined as From>>::from) { + scope 13 (inlined std::ptr::Unique::::as_non_null_ptr) { } } - scope 23 (inlined ::deallocate) { - scope 24 (inlined std::alloc::Global::deallocate_impl) { - scope 25 (inlined std::alloc::Global::deallocate_impl_runtime) { - let mut _8: *mut u8; - scope 26 (inlined Layout::size) { + scope 14 (inlined ::deallocate) { + scope 15 (inlined std::alloc::Global::deallocate_impl) { + scope 16 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _7: *mut u8; + scope 17 (inlined Layout::size) { } - scope 27 (inlined NonNull::::as_ptr) { + scope 18 (inlined NonNull::::as_ptr) { } - scope 28 (inlined std::alloc::dealloc) { - scope 29 (inlined Layout::size) { + scope 19 (inlined std::alloc::dealloc) { + let mut _8: std::ptr::Alignment; + scope 20 (inlined Layout::size) { } - scope 30 (inlined Layout::alignment) { + scope 21 (inlined Layout::alignment) { } } } @@ -46,63 +49,44 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 7 (inlined Layout::for_value_raw::<[T]>) { - let mut _5: usize; - let mut _7: std::ptr::Alignment; - scope 8 { - scope 16 (inlined #[track_caller] Layout::from_size_alignment_unchecked) { - } - } - scope 9 (inlined size_of_val_raw::<[T]>) { - } - scope 10 (inlined std::ptr::Alignment::of_val_raw::<[T]>) { - let _6: usize; - scope 11 { - scope 13 (inlined #[track_caller] std::ptr::Alignment::new_unchecked) { - scope 14 (inlined core::ub_checks::check_language_ub) { - scope 15 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - } - scope 12 (inlined align_of_val_raw::<[T]>) { - } - } } } } } bb0: { + StorageLive(_5); StorageLive(_2); _2 = copy (((*_1).0: std::ptr::Unique<[T]>).0: std::ptr::NonNull<[T]>); StorageLive(_4); _3 = copy _2 as *mut [T] (Transmute); _4 = copy _2 as *const [T] (Transmute); - _5 = std::intrinsics::size_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; + _5 = layout_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; } bb1: { - StorageLive(_6); - _6 = const ::ALIGN; - _7 = copy _6 as std::ptr::Alignment (Transmute); - StorageDead(_6); StorageDead(_4); - switchInt(copy _5) -> [0: bb4, otherwise: bb2]; + _6 = copy (_5.0: usize); + switchInt(copy _6) -> [0: bb4, otherwise: bb2]; } bb2: { + StorageLive(_7); + _7 = copy _3 as *mut u8 (PtrToPtr); StorageLive(_8); - _8 = copy _3 as *mut u8 (PtrToPtr); - _9 = alloc::alloc::__rust_dealloc(move _8, move _5, move _7) -> [return: bb3, unwind unreachable]; + _8 = copy (_5.1: std::ptr::Alignment); + _9 = alloc::alloc::__rust_dealloc(move _7, move _6, move _8) -> [return: bb3, unwind unreachable]; } bb3: { StorageDead(_8); + StorageDead(_7); goto -> bb4; } bb4: { StorageDead(_2); + StorageDead(_5); return; } } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-abort.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-abort.mir index f8e575f490b0c..5e606817826fe 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-abort.mir @@ -8,33 +8,36 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { let _2: std::ptr::NonNull<[T]>; let mut _3: *mut [T]; let mut _4: *const [T]; + let mut _6: usize; let _9: (); scope 3 { + let _5: std::alloc::Layout; scope 4 { - scope 17 (inlined Layout::size) { + scope 8 (inlined Layout::size) { } - scope 18 (inlined std::ptr::Unique::<[T]>::cast::) { - scope 19 (inlined NonNull::<[T]>::cast::) { - scope 20 (inlined NonNull::<[T]>::as_ptr) { + scope 9 (inlined std::ptr::Unique::<[T]>::cast::) { + scope 10 (inlined NonNull::<[T]>::cast::) { + scope 11 (inlined NonNull::<[T]>::as_ptr) { } } } - scope 21 (inlined as From>>::from) { - scope 22 (inlined std::ptr::Unique::::as_non_null_ptr) { + scope 12 (inlined as From>>::from) { + scope 13 (inlined std::ptr::Unique::::as_non_null_ptr) { } } - scope 23 (inlined ::deallocate) { - scope 24 (inlined std::alloc::Global::deallocate_impl) { - scope 25 (inlined std::alloc::Global::deallocate_impl_runtime) { - let mut _8: *mut u8; - scope 26 (inlined Layout::size) { + scope 14 (inlined ::deallocate) { + scope 15 (inlined std::alloc::Global::deallocate_impl) { + scope 16 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _7: *mut u8; + scope 17 (inlined Layout::size) { } - scope 27 (inlined NonNull::::as_ptr) { + scope 18 (inlined NonNull::::as_ptr) { } - scope 28 (inlined std::alloc::dealloc) { - scope 29 (inlined Layout::size) { + scope 19 (inlined std::alloc::dealloc) { + let mut _8: std::ptr::Alignment; + scope 20 (inlined Layout::size) { } - scope 30 (inlined Layout::alignment) { + scope 21 (inlined Layout::alignment) { } } } @@ -46,63 +49,44 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 7 (inlined Layout::for_value_raw::<[T]>) { - let mut _5: usize; - let mut _7: std::ptr::Alignment; - scope 8 { - scope 16 (inlined #[track_caller] Layout::from_size_alignment_unchecked) { - } - } - scope 9 (inlined size_of_val_raw::<[T]>) { - } - scope 10 (inlined std::ptr::Alignment::of_val_raw::<[T]>) { - let _6: usize; - scope 11 { - scope 13 (inlined #[track_caller] std::ptr::Alignment::new_unchecked) { - scope 14 (inlined core::ub_checks::check_language_ub) { - scope 15 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - } - scope 12 (inlined align_of_val_raw::<[T]>) { - } - } } } } } bb0: { + StorageLive(_5); StorageLive(_2); _2 = copy (((*_1).0: std::ptr::Unique<[T]>).0: std::ptr::NonNull<[T]>); StorageLive(_4); _3 = copy _2 as *mut [T] (Transmute); _4 = copy _2 as *const [T] (Transmute); - _5 = std::intrinsics::size_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; + _5 = layout_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; } bb1: { - StorageLive(_6); - _6 = const ::ALIGN; - _7 = copy _6 as std::ptr::Alignment (Transmute); - StorageDead(_6); StorageDead(_4); - switchInt(copy _5) -> [0: bb4, otherwise: bb2]; + _6 = copy (_5.0: usize); + switchInt(copy _6) -> [0: bb4, otherwise: bb2]; } bb2: { + StorageLive(_7); + _7 = copy _3 as *mut u8 (PtrToPtr); StorageLive(_8); - _8 = copy _3 as *mut u8 (PtrToPtr); - _9 = alloc::alloc::__rust_dealloc(move _8, move _5, move _7) -> [return: bb3, unwind unreachable]; + _8 = copy (_5.1: std::ptr::Alignment); + _9 = alloc::alloc::__rust_dealloc(move _7, move _6, move _8) -> [return: bb3, unwind unreachable]; } bb3: { StorageDead(_8); + StorageDead(_7); goto -> bb4; } bb4: { StorageDead(_2); + StorageDead(_5); return; } } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir index f8e575f490b0c..5e606817826fe 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir @@ -8,33 +8,36 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { let _2: std::ptr::NonNull<[T]>; let mut _3: *mut [T]; let mut _4: *const [T]; + let mut _6: usize; let _9: (); scope 3 { + let _5: std::alloc::Layout; scope 4 { - scope 17 (inlined Layout::size) { + scope 8 (inlined Layout::size) { } - scope 18 (inlined std::ptr::Unique::<[T]>::cast::) { - scope 19 (inlined NonNull::<[T]>::cast::) { - scope 20 (inlined NonNull::<[T]>::as_ptr) { + scope 9 (inlined std::ptr::Unique::<[T]>::cast::) { + scope 10 (inlined NonNull::<[T]>::cast::) { + scope 11 (inlined NonNull::<[T]>::as_ptr) { } } } - scope 21 (inlined as From>>::from) { - scope 22 (inlined std::ptr::Unique::::as_non_null_ptr) { + scope 12 (inlined as From>>::from) { + scope 13 (inlined std::ptr::Unique::::as_non_null_ptr) { } } - scope 23 (inlined ::deallocate) { - scope 24 (inlined std::alloc::Global::deallocate_impl) { - scope 25 (inlined std::alloc::Global::deallocate_impl_runtime) { - let mut _8: *mut u8; - scope 26 (inlined Layout::size) { + scope 14 (inlined ::deallocate) { + scope 15 (inlined std::alloc::Global::deallocate_impl) { + scope 16 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _7: *mut u8; + scope 17 (inlined Layout::size) { } - scope 27 (inlined NonNull::::as_ptr) { + scope 18 (inlined NonNull::::as_ptr) { } - scope 28 (inlined std::alloc::dealloc) { - scope 29 (inlined Layout::size) { + scope 19 (inlined std::alloc::dealloc) { + let mut _8: std::ptr::Alignment; + scope 20 (inlined Layout::size) { } - scope 30 (inlined Layout::alignment) { + scope 21 (inlined Layout::alignment) { } } } @@ -46,63 +49,44 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 7 (inlined Layout::for_value_raw::<[T]>) { - let mut _5: usize; - let mut _7: std::ptr::Alignment; - scope 8 { - scope 16 (inlined #[track_caller] Layout::from_size_alignment_unchecked) { - } - } - scope 9 (inlined size_of_val_raw::<[T]>) { - } - scope 10 (inlined std::ptr::Alignment::of_val_raw::<[T]>) { - let _6: usize; - scope 11 { - scope 13 (inlined #[track_caller] std::ptr::Alignment::new_unchecked) { - scope 14 (inlined core::ub_checks::check_language_ub) { - scope 15 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - } - scope 12 (inlined align_of_val_raw::<[T]>) { - } - } } } } } bb0: { + StorageLive(_5); StorageLive(_2); _2 = copy (((*_1).0: std::ptr::Unique<[T]>).0: std::ptr::NonNull<[T]>); StorageLive(_4); _3 = copy _2 as *mut [T] (Transmute); _4 = copy _2 as *const [T] (Transmute); - _5 = std::intrinsics::size_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; + _5 = layout_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; } bb1: { - StorageLive(_6); - _6 = const ::ALIGN; - _7 = copy _6 as std::ptr::Alignment (Transmute); - StorageDead(_6); StorageDead(_4); - switchInt(copy _5) -> [0: bb4, otherwise: bb2]; + _6 = copy (_5.0: usize); + switchInt(copy _6) -> [0: bb4, otherwise: bb2]; } bb2: { + StorageLive(_7); + _7 = copy _3 as *mut u8 (PtrToPtr); StorageLive(_8); - _8 = copy _3 as *mut u8 (PtrToPtr); - _9 = alloc::alloc::__rust_dealloc(move _8, move _5, move _7) -> [return: bb3, unwind unreachable]; + _8 = copy (_5.1: std::ptr::Alignment); + _9 = alloc::alloc::__rust_dealloc(move _7, move _6, move _8) -> [return: bb3, unwind unreachable]; } bb3: { StorageDead(_8); + StorageDead(_7); goto -> bb4; } bb4: { StorageDead(_2); + StorageDead(_5); return; } } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.rs b/tests/mir-opt/pre-codegen/drop_boxed_slice.rs index ae10cfb0b1713..0679958512687 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.rs +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.rs @@ -8,9 +8,9 @@ pub unsafe fn generic_in_place(ptr: *mut Box<[T]>) { // CHECK-LABEL: fn generic_in_place(_1: *mut Box<[T]>) // CHECK: (inlined as Drop>::drop) - // CHECK: [[SIZE:_.+]] = std::intrinsics::size_of_val::<[T]> - // CHECK: [[ALIGN:_.+]] = const ::ALIGN; - // CHECK: [[B:_.+]] = copy [[ALIGN]] as std::ptr::Alignment (Transmute); - // CHECK: = alloc::alloc::__rust_dealloc({{.+}}, move [[SIZE]], move [[B]]) -> + // CHECK: [[LAYOUT:_.+]] = layout_of_val::<[T]> + // CHECK: [[SIZE:_.+]] = copy ([[LAYOUT]].0: usize); + // CHECK: [[ALIGN:_.+]] = copy ([[LAYOUT]].1: std::ptr::Alignment); + // CHECK: = alloc::alloc::__rust_dealloc({{.+}}, move [[SIZE]], move [[ALIGN]]) -> std::ptr::drop_in_place(ptr) }