Skip to content

Allow enum and union literals to also create SSA values #138759

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 5, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 38 additions & 1 deletion compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
@@ -571,6 +571,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
pub(crate) fn builder(
layout: TyAndLayout<'tcx>,
) -> Option<OperandRef<'tcx, Result<V, abi::Scalar>>> {
// Uninhabited types are weird, because for example `Result<!, !>`
// shows up as `FieldsShape::Primitive` and we need to be able to write
// a field into `(u32, !)`. We'll do that in an `alloca` instead.
if layout.uninhabited {
return None;
}

let val = match layout.backend_repr {
BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)),
@@ -640,16 +647,46 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
}
}

/// Insert the immediate value `imm` for field `f` in the *type itself*,
/// rather than into one of the variants.
///
/// Most things want [`OperandRef::insert_field`] instead, but this one is
/// necessary for writing things like enum tags that aren't in any variant.
pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
let field_offset = self.layout.fields.offset(f.as_usize());
let is_zero_offset = field_offset == Size::ZERO;
match &mut self.val {
OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
*val = Ok(imm);
}
OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
*fst = Ok(imm);
}
OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
*snd = Ok(imm);
}
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
}
}

/// After having set all necessary fields, this converts the
/// `OperandValue<Result<V, _>>` (as obtained from [`OperandRef::builder`])
/// to the normal `OperandValue<V>`.
///
/// ICEs if any required fields were not set.
pub fn build(&self) -> OperandRef<'tcx, V> {
pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
let OperandRef { val, layout } = *self;

// For something like `Option::<u32>::None`, it's expected that the
// payload scalar will not actually have been set, so this converts
// unset scalars to corresponding `undef` values so long as the scalar
// from the layout allows uninit.
let unwrap = |r: Result<V, abi::Scalar>| match r {
Ok(v) => v,
Err(s) if s.is_uninit_valid() => {
let bty = cx.type_from_scalar(s);
cx.const_undef(bty)
}
Comment on lines +686 to +689
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hm. for some reason this feels like it will bite my nose.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The undef part here is important because aggregating None::<u32> doesn't set the second field in the ScalarPair.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, lovely.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The good news is that with the is_uninit_valid guard this will still ICE if you miss a field in (u32, u32) or similar.

Err(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
};

128 changes: 82 additions & 46 deletions compiler/rustc_codegen_ssa/src/mir/place.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants};
use rustc_abi::{
Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants,
};
use rustc_middle::mir::PlaceTy;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
@@ -239,53 +241,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
variant_index: VariantIdx,
) {
if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() {
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
// if that turns out to be helpful.
bx.abort();
return;
}
match self.layout.variants {
Variants::Empty => unreachable!("we already handled uninhabited types"),
Variants::Single { index } => assert_eq!(index, variant_index),

Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
let ptr = self.project_field(bx, tag_field.as_usize());
let to =
self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
bx.store_to_place(
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.val,
);
match codegen_tag_value(bx.cx(), variant_index, self.layout) {
Err(UninhabitedVariantError) => {
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
// if that turns out to be helpful.
bx.abort();
}
Variants::Multiple {
tag_encoding:
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag_field,
..
} => {
if variant_index != untagged_variant {
let niche = self.project_field(bx, tag_field.as_usize());
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else {
bug!("expected a scalar placeref for the niche");
};
// We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
// around the `niche`'s type.
// The easiest way to do that is to do wrapping arithmetic on `u128` and then
// masking off any extra bits that occur because we did the arithmetic with too many bits.
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128).wrapping_add(niche_start);
let niche_value = niche_value & niche.layout.size.unsigned_int_max();

let niche_llval = bx.cx().scalar_to_backend(
Scalar::from_uint(niche_value, niche.layout.size),
scalar,
niche_llty,
);
OperandValue::Immediate(niche_llval).store(bx, niche);
}
Ok(Some((tag_field, imm))) => {
let tag_place = self.project_field(bx, tag_field.as_usize());
OperandValue::Immediate(imm).store(bx, tag_place);
}
Ok(None) => {}
}
}

@@ -471,3 +437,73 @@ fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let offset = bx.and(neg_value, align_minus_1);
bx.add(value, offset)
}

/// Calculates the value that needs to be stored to mark the discriminant.
///
/// This might be `None` for a `struct` or a niched variant (like `Some(&3)`).
///
/// If it's `Some`, it returns the value to store and the field in which to
/// store it. Note that this value is *not* the same as the discriminant, in
/// general, as it might be a niche value or have a different size.
///
/// It might also be an `Err` because the variant is uninhabited.
pub(super) fn codegen_tag_value<'tcx, V>(
cx: &impl CodegenMethods<'tcx, Value = V>,
variant_index: VariantIdx,
layout: TyAndLayout<'tcx>,
) -> Result<Option<(FieldIdx, V)>, UninhabitedVariantError> {
// By checking uninhabited-ness first we don't need to worry about types
// like `(u32, !)` which are single-variant but weird.
if layout.for_variant(cx, variant_index).is_uninhabited() {
return Err(UninhabitedVariantError);
}

Ok(match layout.variants {
Variants::Empty => unreachable!("we already handled uninhabited types"),
Variants::Single { index } => {
assert_eq!(index, variant_index);
None
}

Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
let discr = layout.ty.discriminant_for_variant(cx.tcx(), variant_index);
let to = discr.unwrap().val;
let tag_layout = layout.field(cx, tag_field.as_usize());
let tag_llty = cx.immediate_backend_type(tag_layout);
let imm = cx.const_uint_big(tag_llty, to);
Some((tag_field, imm))
}
Variants::Multiple {
tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag_field,
..
} => {
if variant_index != untagged_variant {
let niche_layout = layout.field(cx, tag_field.as_usize());
let niche_llty = cx.immediate_backend_type(niche_layout);
let BackendRepr::Scalar(scalar) = niche_layout.backend_repr else {
bug!("expected a scalar placeref for the niche");
};
// We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
// around the `niche`'s type.
// The easiest way to do that is to do wrapping arithmetic on `u128` and then
// masking off any extra bits that occur because we did the arithmetic with too many bits.
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128).wrapping_add(niche_start);
let niche_value = niche_value & niche_layout.size.unsigned_int_max();

let niche_llval = cx.scalar_to_backend(
Scalar::from_uint(niche_value, niche_layout.size),
scalar,
niche_llty,
);
Some((tag_field, niche_llval))
} else {
None
}
}
Comment on lines +481 to +503
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

note to self: mostly just a move from line 242

})
}

#[derive(Debug)]
pub(super) struct UninhabitedVariantError;
49 changes: 29 additions & 20 deletions compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
@@ -10,7 +10,7 @@ use rustc_span::{DUMMY_SP, Span};
use tracing::{debug, instrument};

use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::place::{PlaceRef, codegen_tag_value};
use super::{FunctionCx, LocalRef};
use crate::common::IntPredicate;
use crate::traits::*;
@@ -694,7 +694,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
mir::Rvalue::Aggregate(_, ref fields) => {
mir::Rvalue::Aggregate(ref kind, ref fields) => {
let (variant_index, active_field_index) = match **kind {
mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
(variant_index, active_field_index)
}
_ => (FIRST_VARIANT, None),
};

let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(ty);
let layout = self.cx.layout_of(ty);
@@ -706,10 +713,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
for (field_idx, field) in fields.iter_enumerated() {
let op = self.codegen_operand(bx, field);
builder.insert_field(bx, FIRST_VARIANT, field_idx, op);
let fi = active_field_index.unwrap_or(field_idx);
builder.insert_field(bx, variant_index, fi, op);
}

builder.build()
let tag_result = codegen_tag_value(self.cx, variant_index, layout);
match tag_result {
Err(super::place::UninhabitedVariantError) => {
// Like codegen_set_discr we use a sound abort, but could
// potentially `unreachable` or just return the poison for
// more optimizability, if that turns out to be helpful.
bx.abort();
let val = OperandValue::poison(bx, layout);
OperandRef { val, layout }
}
Ok(maybe_tag_value) => {
if let Some((tag_field, tag_imm)) = maybe_tag_value {
builder.insert_imm(tag_field, tag_imm);
}
builder.build(bx.cx())
}
}
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(bx, operand);
@@ -1037,28 +1061,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Arrays are always aggregates, so it's not worth checking anything here.
// (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.)
mir::Rvalue::Repeat(..) => false,
mir::Rvalue::Aggregate(ref kind, _) => {
let allowed_kind = match **kind {
// This always produces a `ty::RawPtr`, so will be Immediate or Pair
mir::AggregateKind::RawPtr(..) => true,
mir::AggregateKind::Array(..) => false,
mir::AggregateKind::Tuple => true,
mir::AggregateKind::Adt(def_id, ..) => {
let adt_def = self.cx.tcx().adt_def(def_id);
adt_def.is_struct() && !adt_def.repr().simd()
}
mir::AggregateKind::Closure(..) => true,
// FIXME: Can we do this for simple coroutines too?
mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false,
};
allowed_kind && {
Comment on lines -1040 to -1054
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As an update from previous revisions of this PR, note that we no longer need to check the AggregateKind here at all. The layout check is enough to cover all the possibilities.

mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(ty);
let layout = self.cx.spanned_layout_of(ty, span);
OperandRef::<Bx::Value>::builder(layout).is_some()
}
}
}

// (*) this is only true if the type is suitable
}
6 changes: 4 additions & 2 deletions tests/codegen/align-struct.rs
Original file line number Diff line number Diff line change
@@ -15,9 +15,11 @@ pub struct Nested64 {
d: i8,
}

// This has the extra field in B to ensure it's not ScalarPair,
// and thus that the test actually emits it via memory, not `insertvalue`.
pub enum Enum4 {
A(i32),
B(i32),
B(i32, i32),
}

pub enum Enum64 {
@@ -54,7 +56,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK-LABEL: @enum4
#[no_mangle]
pub fn enum4(a: i32) -> Enum4 {
// CHECK: %e4 = alloca [8 x i8], align 4
// CHECK: %e4 = alloca [12 x i8], align 4
let e4 = Enum4::A(a);
e4
}
6 changes: 3 additions & 3 deletions tests/codegen/common_prim_int_ptr.rs
Original file line number Diff line number Diff line change
@@ -11,9 +11,9 @@
#[no_mangle]
pub fn insert_int(x: usize) -> Result<usize, Box<()>> {
// CHECK: start:
// CHECK-NEXT: inttoptr i{{[0-9]+}} %x to ptr
// CHECK-NEXT: insertvalue
// CHECK-NEXT: ret { i{{[0-9]+}}, ptr }
// CHECK-NEXT: %[[WO_PROV:.+]] = getelementptr i8, ptr null, [[USIZE:i[0-9]+]] %x
// CHECK-NEXT: %[[R:.+]] = insertvalue { [[USIZE]], ptr } { [[USIZE]] 0, ptr poison }, ptr %[[WO_PROV]], 1
// CHECK-NEXT: ret { [[USIZE]], ptr } %[[R]]
Ok(x)
}

129 changes: 129 additions & 0 deletions tests/codegen/enum/enum-aggregate.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes
//@ min-llvm-version: 19
//@ only-64bit

#![crate_type = "lib"]

use std::cmp::Ordering;
use std::num::NonZero;
use std::ptr::NonNull;

#[no_mangle]
fn make_some_bool(x: bool) -> Option<bool> {
// CHECK-LABEL: i8 @make_some_bool(i1 zeroext %x)
// CHECK-NEXT: start:
// CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8
// CHECK-NEXT: ret i8 %[[WIDER]]
Some(x)
}

#[no_mangle]
fn make_none_bool() -> Option<bool> {
// CHECK-LABEL: i8 @make_none_bool()
// CHECK-NEXT: start:
// CHECK-NEXT: ret i8 2
None
}

#[no_mangle]
fn make_some_ordering(x: Ordering) -> Option<Ordering> {
// CHECK-LABEL: i8 @make_some_ordering(i8 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: ret i8 %x
Some(x)
}

#[no_mangle]
fn make_some_u16(x: u16) -> Option<u16> {
// CHECK-LABEL: { i16, i16 } @make_some_u16(i16 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: %0 = insertvalue { i16, i16 } { i16 1, i16 poison }, i16 %x, 1
// CHECK-NEXT: ret { i16, i16 } %0
Some(x)
}

#[no_mangle]
fn make_none_u16() -> Option<u16> {
// CHECK-LABEL: { i16, i16 } @make_none_u16()
// CHECK-NEXT: start:
// CHECK-NEXT: ret { i16, i16 } { i16 0, i16 undef }
None
}

#[no_mangle]
fn make_some_nzu32(x: NonZero<u32>) -> Option<NonZero<u32>> {
// CHECK-LABEL: i32 @make_some_nzu32(i32 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: ret i32 %x
Some(x)
}

#[no_mangle]
fn make_ok_ptr(x: NonNull<u16>) -> Result<NonNull<u16>, usize> {
// CHECK-LABEL: { i64, ptr } @make_ok_ptr(ptr %x)
// CHECK-NEXT: start:
// CHECK-NEXT: %0 = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %x, 1
// CHECK-NEXT: ret { i64, ptr } %0
Ok(x)
}

#[no_mangle]
fn make_ok_int(x: usize) -> Result<usize, NonNull<u16>> {
// CHECK-LABEL: { i64, ptr } @make_ok_int(i64 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: %[[NOPROV:.+]] = getelementptr i8, ptr null, i64 %x
// CHECK-NEXT: %[[R:.+]] = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %[[NOPROV]], 1
// CHECK-NEXT: ret { i64, ptr } %[[R]]
Ok(x)
}

#[no_mangle]
fn make_some_ref(x: &u16) -> Option<&u16> {
// CHECK-LABEL: ptr @make_some_ref(ptr align 2 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: ret ptr %x
Some(x)
}

#[no_mangle]
fn make_none_ref<'a>() -> Option<&'a u16> {
// CHECK-LABEL: ptr @make_none_ref()
// CHECK-NEXT: start:
// CHECK-NEXT: ret ptr null
None
}

#[inline(never)]
fn make_err_generic<E>(e: E) -> Result<u32, E> {
// CHECK-LABEL: define{{.+}}make_err_generic
// CHECK-NEXT: start:
// CHECK-NEXT: call void @llvm.trap()
// CHECK-NEXT: ret i32 poison
Err(e)
}

#[no_mangle]
fn make_uninhabited_err_indirectly(n: Never) -> Result<u32, Never> {
// CHECK-LABEL: i32 @make_uninhabited_err_indirectly()
// CHECK-NEXT: start:
// CHECK-NEXT: call{{.+}}make_err_generic
make_err_generic(n)
}

#[no_mangle]
fn make_fully_uninhabited_result(v: u32, n: Never) -> Result<(u32, Never), (Never, u32)> {
// We don't try to do this in SSA form since the whole type is uninhabited.

// CHECK-LABEL: { i32, i32 } @make_fully_uninhabited_result(i32 %v)
// CHECK: %[[ALLOC_V:.+]] = alloca [4 x i8]
// CHECK: %[[RET:.+]] = alloca [8 x i8]
// CHECK: store i32 %v, ptr %[[ALLOC_V]]
// CHECK: %[[TEMP_V:.+]] = load i32, ptr %[[ALLOC_V]]
// CHECK: %[[INNER:.+]] = getelementptr inbounds i8, ptr %[[RET]]
// CHECK: store i32 %[[TEMP_V]], ptr %[[INNER]]
// CHECK: call void @llvm.trap()
// CHECK: unreachable
Ok((v, n))
}

enum Never {}
5 changes: 2 additions & 3 deletions tests/codegen/set-discriminant-invalid.rs
Original file line number Diff line number Diff line change
@@ -16,10 +16,9 @@ impl IntoError<Error> for Api {
type Source = ApiError;
// CHECK-LABEL: @into_error
// CHECK: llvm.trap()
// Also check the next two instructions to make sure we do not match against `trap`
// Also check the next instruction to make sure we do not match against `trap`
// elsewhere in the code.
// CHECK-NEXT: load
// CHECK-NEXT: ret
// CHECK-NEXT: ret i8 poison
#[no_mangle]
fn into_error(self, error: Self::Source) -> Error {
Error::Api { source: error }
85 changes: 85 additions & 0 deletions tests/codegen/union-aggregate.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes
//@ min-llvm-version: 19
//@ only-64bit

#![crate_type = "lib"]
#![feature(transparent_unions)]

#[repr(transparent)]
union MU<T: Copy> {
uninit: (),
value: T,
}

use std::cmp::Ordering;
use std::num::NonZero;
use std::ptr::NonNull;

#[no_mangle]
fn make_mu_bool(x: bool) -> MU<bool> {
// CHECK-LABEL: i8 @make_mu_bool(i1 zeroext %x)
// CHECK-NEXT: start:
// CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8
// CHECK-NEXT: ret i8 %[[WIDER]]
MU { value: x }
}

#[no_mangle]
fn make_mu_bool_uninit() -> MU<bool> {
// CHECK-LABEL: i8 @make_mu_bool_uninit()
// CHECK-NEXT: start:
// CHECK-NEXT: ret i8 undef
MU { uninit: () }
}

#[no_mangle]
fn make_mu_ref(x: &u16) -> MU<&u16> {
// CHECK-LABEL: ptr @make_mu_ref(ptr align 2 %x)
// CHECK-NEXT: start:
// CHECK-NEXT: ret ptr %x
MU { value: x }
}

#[no_mangle]
fn make_mu_ref_uninit<'a>() -> MU<&'a u16> {
// CHECK-LABEL: ptr @make_mu_ref_uninit()
// CHECK-NEXT: start:
// CHECK-NEXT: ret ptr undef
MU { uninit: () }
}

#[no_mangle]
fn make_mu_str(x: &str) -> MU<&str> {
// CHECK-LABEL: { ptr, i64 } @make_mu_str(ptr align 1 %x.0, i64 %x.1)
// CHECK-NEXT: start:
// CHECK-NEXT: %0 = insertvalue { ptr, i64 } poison, ptr %x.0, 0
// CHECK-NEXT: %1 = insertvalue { ptr, i64 } %0, i64 %x.1, 1
// CHECK-NEXT: ret { ptr, i64 } %1
MU { value: x }
}

#[no_mangle]
fn make_mu_str_uninit<'a>() -> MU<&'a str> {
// CHECK-LABEL: { ptr, i64 } @make_mu_str_uninit()
// CHECK-NEXT: start:
// CHECK-NEXT: ret { ptr, i64 } undef
MU { uninit: () }
}

#[no_mangle]
fn make_mu_pair(x: (u8, u32)) -> MU<(u8, u32)> {
// CHECK-LABEL: { i8, i32 } @make_mu_pair(i8 %x.0, i32 %x.1)
// CHECK-NEXT: start:
// CHECK-NEXT: %0 = insertvalue { i8, i32 } poison, i8 %x.0, 0
// CHECK-NEXT: %1 = insertvalue { i8, i32 } %0, i32 %x.1, 1
// CHECK-NEXT: ret { i8, i32 } %1
MU { value: x }
}

#[no_mangle]
fn make_mu_pair_uninit() -> MU<(u8, u32)> {
// CHECK-LABEL: { i8, i32 } @make_mu_pair_uninit()
// CHECK-NEXT: start:
// CHECK-NEXT: ret { i8, i32 } undef
MU { uninit: () }
}
10 changes: 8 additions & 2 deletions tests/ui/sanitizer/memory-eager.rs
Original file line number Diff line number Diff line change
@@ -8,8 +8,14 @@
//
//@ run-fail
//@ error-pattern: MemorySanitizer: use-of-uninitialized-value
//@ error-pattern: Uninitialized value was created by an allocation
//@ error-pattern: in the stack frame
//@ [optimized]error-pattern: Uninitialized value was created by an allocation
//@ [optimized]error-pattern: in the stack frame
//
// FIXME the unoptimized case actually has that text in the output too, per
// <https://github.com/rust-lang/rust/pull/138759#issuecomment-3037186707>
// but doesn't seem to be getting picked up for some reason. For now we don't
// check for that part, since it's still testing that memory sanitizer reported
// a use of an uninitialized value, which is the critical part.
//
// This test case intentionally limits the usage of the std,
// since it will be linked with an uninstrumented version of it.