diff --git a/Cargo.lock b/Cargo.lock index e91116ade9edd..0691cddaa0b76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3410,11 +3410,9 @@ name = "rustc_codegen_ssa" version = "0.0.0" dependencies = [ "ar_archive_writer", - "arrayvec", "bitflags", "bstr", "cc", - "either", "itertools", "libc", "object 0.36.7", diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 7bffeaf4cc9e2..e169cb1517107 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -758,7 +758,7 @@ impl LayoutCalculator { niche_variants, niche_start, }, - tag_field: 0, + tag_field: FieldIdx::new(0), variants: IndexVec::new(), }, fields: FieldsShape::Arbitrary { @@ -1072,7 +1072,7 @@ impl LayoutCalculator { variants: Variants::Multiple { tag, tag_encoding: TagEncoding::Direct, - tag_field: 0, + tag_field: FieldIdx::new(0), variants: IndexVec::new(), }, fields: FieldsShape::Arbitrary { diff --git a/compiler/rustc_abi/src/layout/coroutine.rs b/compiler/rustc_abi/src/layout/coroutine.rs index 27e704d538c83..2b22276d4aed7 100644 --- a/compiler/rustc_abi/src/layout/coroutine.rs +++ b/compiler/rustc_abi/src/layout/coroutine.rs @@ -158,7 +158,7 @@ pub(super) fn layout< // Build a prefix layout, including "promoting" all ineligible // locals as part of the prefix. We compute the layout of all of // these fields at once to get optimal packing. - let tag_index = prefix_layouts.len(); + let tag_index = prefix_layouts.next_index(); // `variant_fields` already accounts for the reserved variants, so no need to add them. let max_discr = (variant_fields.len() - 1) as u128; @@ -187,7 +187,7 @@ pub(super) fn layout< // "a" (`0..b_start`) and "b" (`b_start..`) correspond to // "outer" and "promoted" fields respectively. - let b_start = FieldIdx::new(tag_index + 1); + let b_start = tag_index.plus(1); let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index())); let offsets_a = offsets; diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 3f28dc7a1efae..eaaaf472ac05f 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1571,7 +1571,7 @@ pub enum Variants { Multiple { tag: Scalar, tag_encoding: TagEncoding, - tag_field: usize, + tag_field: FieldIdx, variants: IndexVec>, }, } diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs index 4d0d5dc60eba6..a08b0e0cbfc59 100644 --- a/compiler/rustc_codegen_cranelift/src/discriminant.rs +++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs @@ -28,7 +28,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>( tag_encoding: TagEncoding::Direct, variants: _, } => { - let ptr = place.place_field(fx, FieldIdx::new(tag_field)); + let ptr = place.place_field(fx, tag_field); let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val; let to = match ptr.layout().ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { @@ -53,7 +53,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>( variants: _, } => { if variant_index != untagged_variant { - let niche = place.place_field(fx, FieldIdx::new(tag_field)); + let niche = place.place_field(fx, tag_field); let niche_type = fx.clif_type(niche.layout().ty).unwrap(); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128).wrapping_add(niche_start); @@ -118,7 +118,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>( let cast_to = fx.clif_type(dest_layout.ty).unwrap(); // Read the tag/niche-encoded discriminant from memory. - let tag = value.value_field(fx, FieldIdx::new(tag_field)); + let tag = value.value_field(fx, tag_field); let tag = tag.load_scalar(fx); // Decode the discriminant (specifically if it's niche-encoded). diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 07075be55fa1f..06b72d395d41f 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use libc::c_uint; -use rustc_abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants}; +use rustc_abi::{Align, Endian, FieldIdx, Size, TagEncoding, VariantIdx, Variants}; use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name; use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo}; use rustc_codegen_ssa::traits::{ConstCodegenMethods, MiscCodegenMethods}; @@ -401,7 +401,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>( enum_type_and_layout: TyAndLayout<'tcx>, enum_type_di_node: &'ll DIType, variant_indices: impl Iterator + Clone, - tag_field: usize, + tag_field: FieldIdx, untagged_variant_index: Option, ) -> SmallVec<&'ll DIType> { let tag_base_type = tag_base_type(cx.tcx, enum_type_and_layout); @@ -806,7 +806,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( variant_field_infos: &[VariantFieldInfo<'ll>], discr_type_di_node: &'ll DIType, tag_base_type: Ty<'tcx>, - tag_field: usize, + tag_field: FieldIdx, untagged_variant_index: Option, di_flags: DIFlags, ) -> SmallVec<&'ll DIType> { @@ -859,7 +859,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( })); assert_eq!( - cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), + cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field.as_usize()).ty), cx.size_and_align_of(self::tag_base_type(cx.tcx, enum_type_and_layout)) ); @@ -876,7 +876,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( Endian::Big => (8, 0), }; - let tag_field_offset = enum_type_and_layout.fields.offset(tag_field).bytes(); + let tag_field_offset = enum_type_and_layout.fields.offset(tag_field.as_usize()).bytes(); let lo_offset = Size::from_bytes(tag_field_offset + lo_offset); let hi_offset = Size::from_bytes(tag_field_offset + hi_offset); @@ -906,8 +906,8 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( cx, enum_type_di_node, TAG_FIELD_NAME, - enum_type_and_layout.field(cx, tag_field), - enum_type_and_layout.fields.offset(tag_field), + enum_type_and_layout.field(cx, tag_field.as_usize()), + enum_type_and_layout.fields.offset(tag_field.as_usize()), di_flags, tag_base_type_di_node, None, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index bfd131cfd3dbb..68dec007f81a5 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -375,7 +375,7 @@ fn build_discr_member_di_node<'ll, 'tcx>( file, UNKNOWN_LINE_NUMBER, layout, - enum_or_coroutine_type_and_layout.fields.offset(tag_field), + enum_or_coroutine_type_and_layout.fields.offset(tag_field.as_usize()), DIFlags::FlagArtificial, ty, )) diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml index caa1db8274d97..141e1b80295df 100644 --- a/compiler/rustc_codegen_ssa/Cargo.toml +++ b/compiler/rustc_codegen_ssa/Cargo.toml @@ -6,13 +6,11 @@ edition = "2024" [dependencies] # tidy-alphabetical-start ar_archive_writer = "0.4.2" -arrayvec = { version = "0.7", default-features = false } bitflags = "2.4.1" bstr = "1.11.3" # Pinned so `cargo update` bumps don't cause breakage. Please also update the # `cc` in `rustc_llvm` if you update the `cc` here. cc = "=1.2.16" -either = "1.5.0" itertools = "0.12" pathdiff = "0.2.0" regex = "1.4" diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 7e355b6406aed..c88aa56f64934 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -1,14 +1,15 @@ use std::fmt; -use arrayvec::ArrayVec; -use either::Either; use rustc_abi as abi; -use rustc_abi::{Align, BackendRepr, FIRST_VARIANT, Primitive, Size, TagEncoding, Variants}; +use rustc_abi::{ + Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants, +}; use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range}; use rustc_middle::mir::{self, ConstValue}; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::{bug, span_bug}; +use rustc_session::config::OptLevel; use tracing::{debug, instrument}; use super::place::{PlaceRef, PlaceValue}; @@ -62,31 +63,6 @@ pub enum OperandValue { } impl OperandValue { - /// If this is ZeroSized/Immediate/Pair, return an array of the 0/1/2 values. - /// If this is Ref, return the place. - #[inline] - pub(crate) fn immediates_or_place(self) -> Either, PlaceValue> { - match self { - OperandValue::ZeroSized => Either::Left(ArrayVec::new()), - OperandValue::Immediate(a) => Either::Left(ArrayVec::from_iter([a])), - OperandValue::Pair(a, b) => Either::Left([a, b].into()), - OperandValue::Ref(p) => Either::Right(p), - } - } - - /// Given an array of 0/1/2 immediate values, return ZeroSized/Immediate/Pair. - #[inline] - pub(crate) fn from_immediates(immediates: ArrayVec) -> Self { - let mut it = immediates.into_iter(); - let Some(a) = it.next() else { - return OperandValue::ZeroSized; - }; - let Some(b) = it.next() else { - return OperandValue::Immediate(a); - }; - OperandValue::Pair(a, b) - } - /// Treat this value as a pointer and return the data pointer and /// optional metadata as backend values. /// @@ -461,10 +437,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let tag_op = match self.val { OperandValue::ZeroSized => bug!(), OperandValue::Immediate(_) | OperandValue::Pair(_, _) => { - self.extract_field(fx, bx, tag_field) + self.extract_field(fx, bx, tag_field.as_usize()) } OperandValue::Ref(place) => { - let tag = place.with_type(self.layout).project_field(bx, tag_field); + let tag = place.with_type(self.layout).project_field(bx, tag_field.as_usize()); bx.load_operand(tag) } }; @@ -559,6 +535,123 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } } } + + pub(crate) fn builder(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, Result> { + let val = match layout.backend_repr { + BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized, + BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)), + BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Err(a), Err(b)), + _ => bug!("Cannot use type in operand builder: {layout:?}"), + }; + OperandRef { val, layout } + } + + pub(crate) fn supports_builder(layout: TyAndLayout<'tcx>) -> bool { + match layout.backend_repr { + BackendRepr::Memory { .. } if layout.is_zst() => true, + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _) => true, + _ => false, + } + } +} + +impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result> { + pub(crate) fn insert_field>( + &mut self, + bx: &mut Bx, + v: VariantIdx, + f: FieldIdx, + operand: OperandRef<'tcx, V>, + ) { + let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields { + // Don't ask for field layout for primitives, because that will panic. + if !self.layout.uninhabited { + // Real primitives only have one variant, but weird types like + // `Result` turn out to also be "Primitive", and dead code + // like `Err(never)` needs to not ICE. + assert_eq!(v, FIRST_VARIANT); + } + let first_field = f == FieldIdx::ZERO; + (self.layout.is_zst() || !first_field, first_field) + } else { + let variant_layout = self.layout.for_variant(bx.cx(), v); + let field_layout = variant_layout.field(bx.cx(), f.as_usize()); + let field_offset = variant_layout.fields.offset(f.as_usize()); + (field_layout.is_zst(), field_offset == Size::ZERO) + }; + + let mut update = |tgt: &mut Result, src, from_scalar| { + let from_bty = bx.cx().type_from_scalar(from_scalar); + let to_scalar = tgt.unwrap_err(); + let to_bty = bx.cx().type_from_scalar(to_scalar); + let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty); + *tgt = Ok(imm); + }; + + match (operand.val, operand.layout.backend_repr) { + (OperandValue::ZeroSized, _) if expect_zst => {} + (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val { + OperandValue::Immediate(val @ Err(_)) if is_zero_offset => { + update(val, v, from_scalar); + } + OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => { + update(fst, v, from_scalar); + } + OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => { + update(snd, v, from_scalar); + } + _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"), + }, + (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => { + match &mut self.val { + OperandValue::Pair(fst @ Err(_), snd @ Err(_)) => { + update(fst, a, from_sa); + update(snd, b, from_sb); + } + _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"), + } + } + _ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"), + } + } + + pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) { + let field_offset = self.layout.fields.offset(f.as_usize()); + let is_zero_offset = field_offset == Size::ZERO; + match &mut self.val { + OperandValue::Immediate(val @ Err(_)) if is_zero_offset => { + *val = Ok(imm); + } + OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => { + *fst = Ok(imm); + } + OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => { + *snd = Ok(imm); + } + _ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"), + } + } + + pub fn finalize(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> { + let OperandRef { val, layout } = *self; + + let unwrap = |r: Result| match r { + Ok(v) => v, + Err(s) if s.is_uninit_valid() => { + let bty = cx.type_from_scalar(s); + cx.const_undef(bty) + } + Err(_) => bug!("OperandRef::finalize called while fields are missing {self:?}"), + }; + + let val = match val { + OperandValue::ZeroSized => OperandValue::ZeroSized, + OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)), + OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)), + OperandValue::Ref(_) => bug!(), + }; + OperandRef { val, layout } + } } impl<'a, 'tcx, V: CodegenObject> OperandValue { @@ -808,3 +901,93 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } + +/// Transmutes one of the immediates from an [`OperandValue::Immediate`] +/// or an [`OperandValue::Pair`] to an immediate of the target type. +/// +/// `to_backend_ty` must be the *non*-immediate backend type (so it will be +/// `i8`, not `i1`, for `bool`-like types.) +pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + mut imm: Bx::Value, + from_scalar: abi::Scalar, + from_backend_ty: Bx::Type, + to_scalar: abi::Scalar, + to_backend_ty: Bx::Type, +) -> Bx::Value { + assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx())); + + // While optimizations will remove no-op transmutes, they might still be + // there in debug or things that aren't no-op in MIR because they change + // the Rust type but not the underlying layout/niche. + if from_scalar == to_scalar && from_backend_ty == to_backend_ty { + return imm; + } + + use abi::Primitive::*; + imm = bx.from_immediate(imm); + + // If we have a scalar, we must already know its range. Either + // + // 1) It's a parameter with `range` parameter metadata, + // 2) It's something we `load`ed with `!range` metadata, or + // 3) After a transmute we `assume`d the range (see below). + // + // That said, last time we tried removing this, it didn't actually help + // the rustc-perf results, so might as well keep doing it + // + assume_scalar_range(bx, imm, from_scalar, from_backend_ty); + + imm = match (from_scalar.primitive(), to_scalar.primitive()) { + (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty), + (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty), + (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm), + (Pointer(..), Int(..)) => { + // FIXME: this exposes the provenance, which shouldn't be necessary. + bx.ptrtoint(imm, to_backend_ty) + } + (Float(_), Pointer(..)) => { + let int_imm = bx.bitcast(imm, bx.cx().type_isize()); + bx.ptradd(bx.const_null(bx.type_ptr()), int_imm) + } + (Pointer(..), Float(_)) => { + // FIXME: this exposes the provenance, which shouldn't be necessary. + let int_imm = bx.ptrtoint(imm, bx.cx().type_isize()); + bx.bitcast(int_imm, to_backend_ty) + } + }; + + // This `assume` remains important for cases like (a conceptual) + // transmute::(x) == 0 + // since it's never passed to something with parameter metadata (especially + // after MIR inlining) so the only way to tell the backend about the + // constraint that the `transmute` introduced is to `assume` it. + assume_scalar_range(bx, imm, to_scalar, to_backend_ty); + + imm = bx.to_immediate_scalar(imm, to_scalar); + imm +} + +pub(super) fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + imm: Bx::Value, + scalar: abi::Scalar, + backend_ty: Bx::Type, +) { + if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) { + return; + } + + match scalar.primitive() { + abi::Primitive::Int(..) => { + let range = scalar.valid_range(bx.cx()); + bx.assume_integer_range(imm, backend_ty, range); + } + abi::Primitive::Pointer(abi::AddressSpace::DATA) + if !scalar.valid_range(bx.cx()).contains(0) => + { + bx.assume_nonnull(imm); + } + abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {} + } +} diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 31db7fa9a1888..7306640ab62dc 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -1,4 +1,6 @@ -use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants}; +use rustc_abi::{ + Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants, +}; use rustc_middle::mir::PlaceTy; use rustc_middle::mir::interpret::Scalar; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; @@ -239,53 +241,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, variant_index: VariantIdx, ) { - if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() { - // We play it safe by using a well-defined `abort`, but we could go for immediate UB - // if that turns out to be helpful. - bx.abort(); - return; - } - match self.layout.variants { - Variants::Empty => unreachable!("we already handled uninhabited types"), - Variants::Single { index } => assert_eq!(index, variant_index), - - Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { - let ptr = self.project_field(bx, tag_field); - let to = - self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; - bx.store_to_place( - bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), - ptr.val, - ); + match codegen_tagged_field_value(bx.cx(), variant_index, self.layout) { + Err(UninhabitedVariantError) => { + // We play it safe by using a well-defined `abort`, but we could go for immediate UB + // if that turns out to be helpful. + bx.abort(); } - Variants::Multiple { - tag_encoding: - TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, - tag_field, - .. - } => { - if variant_index != untagged_variant { - let niche = self.project_field(bx, tag_field); - let niche_llty = bx.cx().immediate_backend_type(niche.layout); - let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else { - bug!("expected a scalar placeref for the niche"); - }; - // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping - // around the `niche`'s type. - // The easiest way to do that is to do wrapping arithmetic on `u128` and then - // masking off any extra bits that occur because we did the arithmetic with too many bits. - let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); - let niche_value = (niche_value as u128).wrapping_add(niche_start); - let niche_value = niche_value & niche.layout.size.unsigned_int_max(); - - let niche_llval = bx.cx().scalar_to_backend( - Scalar::from_uint(niche_value, niche.layout.size), - scalar, - niche_llty, - ); - OperandValue::Immediate(niche_llval).store(bx, niche); - } + Ok(Some((tag_field, imm))) => { + let tag_place = self.project_field(bx, tag_field.as_usize()); + OperandValue::Immediate(imm).store(bx, tag_place); } + Ok(None) => {} } } @@ -471,3 +437,73 @@ fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let offset = bx.and(neg_value, align_minus_1); bx.add(value, offset) } + +/// Calculates the value that needs to be stored to mark the discriminant. +/// +/// This might be `None` for a `struct` or a niched variant (like `Some(&3)`). +/// +/// If it's `Some`, it returns the value to store and the field in which to +/// store it. Note that this value is *not* the same as the discriminant, in +/// general, as it might be a niche value or have a different size. +/// +/// It might also be an `Err` because the variant is uninhabited. +pub(super) fn codegen_tagged_field_value<'tcx, V>( + cx: &impl CodegenMethods<'tcx, Value = V>, + variant_index: VariantIdx, + layout: TyAndLayout<'tcx>, +) -> Result, UninhabitedVariantError> { + // By checking uninhabited-ness first we don't need to worry about types + // like `(u32, !)` which are single-variant but weird. + if layout.for_variant(cx, variant_index).is_uninhabited() { + return Err(UninhabitedVariantError); + } + + Ok(match layout.variants { + Variants::Empty => unreachable!("we already handled uninhabited types"), + Variants::Single { index } => { + assert_eq!(index, variant_index); + None + } + + Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { + let discr = layout.ty.discriminant_for_variant(cx.tcx(), variant_index); + let to = discr.unwrap().val; + let tag_layout = layout.field(cx, tag_field.as_usize()); + let tag_llty = cx.immediate_backend_type(tag_layout); + let imm = cx.const_uint_big(tag_llty, to); + Some((tag_field, imm)) + } + Variants::Multiple { + tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, + tag_field, + .. + } => { + if variant_index != untagged_variant { + let niche_layout = layout.field(cx, tag_field.as_usize()); + let niche_llty = cx.immediate_backend_type(niche_layout); + let BackendRepr::Scalar(scalar) = niche_layout.backend_repr else { + bug!("expected a scalar placeref for the niche"); + }; + // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping + // around the `niche`'s type. + // The easiest way to do that is to do wrapping arithmetic on `u128` and then + // masking off any extra bits that occur because we did the arithmetic with too many bits. + let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); + let niche_value = (niche_value as u128).wrapping_add(niche_start); + let niche_value = niche_value & niche_layout.size.unsigned_int_max(); + + let niche_llval = cx.scalar_to_backend( + Scalar::from_uint(niche_value, niche_layout.size), + scalar, + niche_llty, + ); + Some((tag_field, niche_llval)) + } else { + None + } + } + }) +} + +#[derive(Debug)] +pub(super) struct UninhabitedVariantError; diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 0fe6a17473592..00f15b8fda88a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1,7 +1,6 @@ use std::assert_matches::assert_matches; -use arrayvec::ArrayVec; -use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx}; +use rustc_abi::{self as abi, FIRST_VARIANT}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; @@ -10,8 +9,8 @@ use rustc_session::config::OptLevel; use rustc_span::{DUMMY_SP, Span}; use tracing::{debug, instrument}; -use super::operand::{OperandRef, OperandValue}; -use super::place::PlaceRef; +use super::operand::{OperandRef, OperandValue, assume_scalar_range, transmute_immediate}; +use super::place::{PlaceRef, codegen_tagged_field_value}; use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; use crate::traits::*; @@ -261,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { { let from_backend_ty = bx.backend_type(operand.layout); let to_backend_ty = bx.backend_type(cast); - Some(OperandValue::Immediate(self.transmute_immediate( + Some(OperandValue::Immediate(transmute_immediate( bx, imm, from_scalar, @@ -286,8 +285,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false); let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false); Some(OperandValue::Pair( - self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty), - self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty), + transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty), + transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty), )) } else { None @@ -301,7 +300,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// /// Returns `None` if the cast is not possible. fn cast_immediate( - &self, bx: &mut Bx, mut imm: Bx::Value, from_scalar: abi::Scalar, @@ -315,7 +313,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // valid ranges. For example, `char`s are passed as just `i32`, with no // way for LLVM to know that they're 0x10FFFF at most. Thus we assume // the range of the input value too, not just the output range. - self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty); + assume_scalar_range(bx, imm, from_scalar, from_backend_ty); imm = match (from_scalar.primitive(), to_scalar.primitive()) { (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed), @@ -348,98 +346,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(imm) } - /// Transmutes one of the immediates from an [`OperandValue::Immediate`] - /// or an [`OperandValue::Pair`] to an immediate of the target type. - /// - /// `to_backend_ty` must be the *non*-immediate backend type (so it will be - /// `i8`, not `i1`, for `bool`-like types.) - fn transmute_immediate( - &self, - bx: &mut Bx, - mut imm: Bx::Value, - from_scalar: abi::Scalar, - from_backend_ty: Bx::Type, - to_scalar: abi::Scalar, - to_backend_ty: Bx::Type, - ) -> Bx::Value { - assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx)); - - // While optimizations will remove no-op transmutes, they might still be - // there in debug or things that aren't no-op in MIR because they change - // the Rust type but not the underlying layout/niche. - if from_scalar == to_scalar && from_backend_ty == to_backend_ty { - return imm; - } - - use abi::Primitive::*; - imm = bx.from_immediate(imm); - - // If we have a scalar, we must already know its range. Either - // - // 1) It's a parameter with `range` parameter metadata, - // 2) It's something we `load`ed with `!range` metadata, or - // 3) After a transmute we `assume`d the range (see below). - // - // That said, last time we tried removing this, it didn't actually help - // the rustc-perf results, so might as well keep doing it - // - self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty); - - imm = match (from_scalar.primitive(), to_scalar.primitive()) { - (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty), - (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty), - (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm), - (Pointer(..), Int(..)) => { - // FIXME: this exposes the provenance, which shouldn't be necessary. - bx.ptrtoint(imm, to_backend_ty) - } - (Float(_), Pointer(..)) => { - let int_imm = bx.bitcast(imm, bx.cx().type_isize()); - bx.ptradd(bx.const_null(bx.type_ptr()), int_imm) - } - (Pointer(..), Float(_)) => { - // FIXME: this exposes the provenance, which shouldn't be necessary. - let int_imm = bx.ptrtoint(imm, bx.cx().type_isize()); - bx.bitcast(int_imm, to_backend_ty) - } - }; - - // This `assume` remains important for cases like (a conceptual) - // transmute::(x) == 0 - // since it's never passed to something with parameter metadata (especially - // after MIR inlining) so the only way to tell the backend about the - // constraint that the `transmute` introduced is to `assume` it. - self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty); - - imm = bx.to_immediate_scalar(imm, to_scalar); - imm - } - - fn assume_scalar_range( - &self, - bx: &mut Bx, - imm: Bx::Value, - scalar: abi::Scalar, - backend_ty: Bx::Type, - ) { - if matches!(self.cx.sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(self.cx) { - return; - } - - match scalar.primitive() { - abi::Primitive::Int(..) => { - let range = scalar.valid_range(self.cx); - bx.assume_integer_range(imm, backend_ty, range); - } - abi::Primitive::Pointer(abi::AddressSpace::DATA) - if !scalar.valid_range(self.cx).contains(0) => - { - bx.assume_nonnull(imm); - } - abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {} - } - } - pub(crate) fn codegen_rvalue_unsized( &mut self, bx: &mut Bx, @@ -578,7 +484,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bug!("Found {cast_kind:?} for operand {cast:?}"); }; - self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty) + Self::cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty) .map(OperandValue::Immediate) .unwrap_or_else(|| { bug!("Unsupported cast of {operand:?} to {cast:?}"); @@ -776,45 +682,42 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand), mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"), - mir::Rvalue::Aggregate(_, ref fields) => { + mir::Rvalue::Aggregate(ref kind, ref fields) => { + let (variant_index, active_field_index) = match **kind { + mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { + (variant_index, active_field_index) + } + _ => (FIRST_VARIANT, None), + }; + let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(ty); let layout = self.cx.layout_of(ty); - // `rvalue_creates_operand` has arranged that we only get here if - // we can build the aggregate immediate from the field immediates. - let mut inputs = ArrayVec::::new(); - let mut input_scalars = ArrayVec::::new(); - for field_idx in layout.fields.index_by_increasing_offset() { - let field_idx = FieldIdx::from_usize(field_idx); - let op = self.codegen_operand(bx, &fields[field_idx]); - let values = op.val.immediates_or_place().left_or_else(|p| { - bug!("Field {field_idx:?} is {p:?} making {layout:?}"); - }); - let scalars = self.value_kind(op.layout).scalars().unwrap(); - assert_eq!(values.len(), scalars.len()); - inputs.extend(values); - input_scalars.extend(scalars); + let mut builder = OperandRef::builder(layout); + for (field_idx, field) in fields.iter_enumerated() { + let op = self.codegen_operand(bx, field); + let fi = active_field_index.unwrap_or(field_idx); + builder.insert_field(bx, variant_index, fi, op); } - let output_scalars = self.value_kind(layout).scalars().unwrap(); - itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each( - |(v, in_s, out_s)| { - if in_s != out_s { - // We have to be really careful about bool here, because - // `(bool,)` stays i1 but `Cell` becomes i8. - *v = bx.from_immediate(*v); - *v = bx.to_immediate_scalar(*v, out_s); + let tag_result = codegen_tagged_field_value(self.cx, variant_index, layout); + match tag_result { + Err(super::place::UninhabitedVariantError) => { + // Like codegen_set_discr we use a sound abort, but could + // potentially `unreachable` or just return the poison for + // more optimizability, if that turns out to be helpful. + bx.abort(); + let val = OperandValue::poison(bx, layout); + OperandRef { val, layout } + } + Ok(maybe_tag_value) => { + if let Some((tag_field, tag_imm)) = maybe_tag_value { + builder.insert_imm(tag_field, tag_imm); } - }, - ); - - let val = OperandValue::from_immediates(inputs); - assert!( - val.is_expected_variant_for_type(self.cx, layout), - "Made wrong variant {val:?} for type {layout:?}", - ); - OperandRef { val, layout } + builder.finalize(bx.cx()) + } + } } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); @@ -1145,19 +1048,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::AggregateKind::RawPtr(..) => true, mir::AggregateKind::Array(..) => false, mir::AggregateKind::Tuple => true, - mir::AggregateKind::Adt(def_id, ..) => { - let adt_def = self.cx.tcx().adt_def(def_id); - adt_def.is_struct() && !adt_def.repr().simd() - } + mir::AggregateKind::Adt(..) => true, mir::AggregateKind::Closure(..) => true, // FIXME: Can we do this for simple coroutines too? mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false, }; allowed_kind && { - let ty = rvalue.ty(self.mir, self.cx.tcx()); - let ty = self.monomorphize(ty); + let ty = rvalue.ty(self.mir, self.cx.tcx()); + let ty = self.monomorphize(ty); let layout = self.cx.spanned_layout_of(ty, span); - !self.cx.is_backend_ref(layout) + OperandRef::::supports_builder(layout) } } } @@ -1200,14 +1100,3 @@ enum OperandValueKind { Pair(abi::Scalar, abi::Scalar), ZeroSized, } - -impl OperandValueKind { - fn scalars(self) -> Option> { - Some(match self { - OperandValueKind::ZeroSized => ArrayVec::new(), - OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]), - OperandValueKind::Pair(a, b) => [a, b].into(), - OperandValueKind::Ref => return None, - }) - } -} diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index 32d9f27d32d34..e4d80b0b47b5e 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -1,4 +1,4 @@ -use rustc_abi::{AddressSpace, Float, Integer, Reg}; +use rustc_abi::{AddressSpace, Float, Integer, Primitive, Reg, Scalar}; use rustc_middle::bug; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, TyAndLayout}; @@ -73,6 +73,24 @@ pub trait DerivedTypeCodegenMethods<'tcx>: } } + fn type_from_primitive(&self, p: Primitive) -> Self::Type { + use Primitive::*; + match p { + Int(i, _) => self.type_from_integer(i), + Float(f) => self.type_from_float(f), + Pointer(address_space) => self.type_ptr_ext(address_space), + } + } + + fn type_from_scalar(&self, s: Scalar) -> Self::Type { + // `MaybeUninit` being `repr(transparent)` somewhat implies that the type + // of a scalar has to be the type of its primitive (which is true in LLVM, + // where noundef is a parameter attribute or metadata) but if we ever get + // a backend where that's no longer true, every use of this will need to + // to carefully scrutinized and re-evaluated. + self.type_from_primitive(s.primitive()) + } + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { ty.needs_drop(self.tcx(), self.typing_env()) } diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index 2f0b1cb6d1ee5..020cd65d75dca 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -1,6 +1,6 @@ //! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines). -use rustc_abi::{self as abi, TagEncoding, VariantIdx, Variants}; +use rustc_abi::{self as abi, FieldIdx, TagEncoding, VariantIdx, Variants}; use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; use rustc_middle::ty::{self, CoroutineArgsExt, ScalarInt, Ty}; use rustc_middle::{mir, span_bug}; @@ -26,7 +26,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // No need to validate that the discriminant here because the // `TyAndLayout::for_variant()` call earlier already checks the // variant is valid. - let tag_dest = self.project_field(dest, tag_field)?; + let tag_dest = self.project_field(dest, tag_field.as_usize())?; self.write_scalar(tag, &tag_dest) } None => { @@ -96,7 +96,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?; // Read tag and sanity-check `tag_layout`. - let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?; + let tag_val = self.read_immediate(&self.project_field(op, tag_field.as_usize())?)?; assert_eq!(tag_layout.size, tag_val.layout.size); assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed()); trace!("tag value: {}", tag_val); @@ -231,7 +231,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { &self, layout: TyAndLayout<'tcx>, variant_index: VariantIdx, - ) -> InterpResult<'tcx, Option<(ScalarInt, usize)>> { + ) -> InterpResult<'tcx, Option<(ScalarInt, FieldIdx)>> { // Layout computation excludes uninhabited variants from consideration. // Therefore, there's no way to represent those variants in the given layout. // Essentially, uninhabited variants do not have a tag that corresponds to their diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index eb3f552cd2787..c0d83afbf6dbc 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -294,7 +294,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // First, check if we are projecting to a variant. match layout.variants { Variants::Multiple { tag_field, .. } => { - if tag_field == field { + if tag_field.as_usize() == field { return match layout.ty.kind() { ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, ty::Coroutine(..) => PathElem::CoroutineTag, diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index ebb6a8c08a54c..838edc4c2bfc2 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -934,7 +934,7 @@ where .unwrap(), ), Variants::Multiple { tag, tag_field, .. } => { - if i == tag_field { + if FieldIdx::from_usize(i) == tag_field { return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); } TyMaybeWithLayout::Ty(args.as_coroutine().prefix_tys()[i]) @@ -1060,8 +1060,10 @@ where tag_field, variants, .. - } if variants.len() == 2 && this.fields.offset(*tag_field) == offset => { - let tagged_variant = if untagged_variant.as_u32() == 0 { + } if variants.len() == 2 + && this.fields.offset(tag_field.as_usize()) == offset => + { + let tagged_variant = if *untagged_variant == VariantIdx::ZERO { VariantIdx::from_u32(1) } else { VariantIdx::from_u32(0) diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index 62cbab9b723cc..6f8af501f0c48 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -172,7 +172,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::Variants( // However, if the discriminant is placed past the end of the variant, then we need // to factor in the size of the discriminant manually. This really should be refactored // better, but this "works" for now. - if layout.fields.offset(tag_field) >= variant_size { + if layout.fields.offset(tag_field.as_usize()) >= variant_size { variant_size += match tag_encoding { TagEncoding::Direct => tag.size(cx), _ => Size::ZERO, diff --git a/tests/codegen/align-struct.rs b/tests/codegen/align-struct.rs index cc65b08a9223c..cb5a0a729062d 100644 --- a/tests/codegen/align-struct.rs +++ b/tests/codegen/align-struct.rs @@ -13,9 +13,11 @@ pub struct Nested64 { d: i8, } +// This has the extra field in B to ensure it's not ScalarPair, +// and thus that the test actually emits it via memory, not `insertvalue`. pub enum Enum4 { A(i32), - B(i32), + B(i32, i32), } pub enum Enum64 { @@ -52,7 +54,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { // CHECK-LABEL: @enum4 #[no_mangle] pub fn enum4(a: i32) -> Enum4 { - // CHECK: %e4 = alloca [8 x i8], align 4 + // CHECK: %e4 = alloca [12 x i8], align 4 let e4 = Enum4::A(a); e4 } diff --git a/tests/codegen/common_prim_int_ptr.rs b/tests/codegen/common_prim_int_ptr.rs index a1d7a125f324c..53716adccbf21 100644 --- a/tests/codegen/common_prim_int_ptr.rs +++ b/tests/codegen/common_prim_int_ptr.rs @@ -11,9 +11,9 @@ #[no_mangle] pub fn insert_int(x: usize) -> Result> { // CHECK: start: - // CHECK-NEXT: inttoptr i{{[0-9]+}} %x to ptr - // CHECK-NEXT: insertvalue - // CHECK-NEXT: ret { i{{[0-9]+}}, ptr } + // CHECK-NEXT: %[[WO_PROV:.+]] = getelementptr i8, ptr null, [[USIZE:i[0-9]+]] %x + // CHECK-NEXT: %[[R:.+]] = insertvalue { [[USIZE]], ptr } { [[USIZE]] 0, ptr poison }, ptr %[[WO_PROV]], 1 + // CHECK-NEXT: ret { [[USIZE]], ptr } %[[R]] Ok(x) } diff --git a/tests/codegen/enum/enum-aggregate.rs b/tests/codegen/enum/enum-aggregate.rs new file mode 100644 index 0000000000000..c028bde14d7d3 --- /dev/null +++ b/tests/codegen/enum/enum-aggregate.rs @@ -0,0 +1,94 @@ +//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes +//@ min-llvm-version: 19 +//@ only-64bit + +#![crate_type = "lib"] + +use std::cmp::Ordering; +use std::num::NonZero; +use std::ptr::NonNull; + +#[no_mangle] +fn make_some_bool(x: bool) -> Option { + // CHECK-LABEL: i8 @make_some_bool(i1 zeroext %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8 + // CHECK-NEXT: ret i8 %[[WIDER]] + Some(x) +} + +#[no_mangle] +fn make_none_bool() -> Option { + // CHECK-LABEL: i8 @make_none_bool() + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 2 + None +} + +#[no_mangle] +fn make_some_ordering(x: Ordering) -> Option { + // CHECK-LABEL: i8 @make_some_ordering(i8 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 %x + Some(x) +} + +#[no_mangle] +fn make_some_u16(x: u16) -> Option { + // CHECK-LABEL: { i16, i16 } @make_some_u16(i16 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i16, i16 } { i16 1, i16 poison }, i16 %x, 1 + // CHECK-NEXT: ret { i16, i16 } %0 + Some(x) +} + +#[no_mangle] +fn make_none_u16() -> Option { + // CHECK-LABEL: { i16, i16 } @make_none_u16() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { i16, i16 } { i16 0, i16 undef } + None +} + +#[no_mangle] +fn make_some_nzu32(x: NonZero) -> Option> { + // CHECK-LABEL: i32 @make_some_nzu32(i32 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret i32 %x + Some(x) +} + +#[no_mangle] +fn make_ok_ptr(x: NonNull) -> Result, usize> { + // CHECK-LABEL: { i64, ptr } @make_ok_ptr(ptr %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %x, 1 + // CHECK-NEXT: ret { i64, ptr } %0 + Ok(x) +} + +#[no_mangle] +fn make_ok_int(x: usize) -> Result> { + // CHECK-LABEL: { i64, ptr } @make_ok_int(i64 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[NOPROV:.+]] = getelementptr i8, ptr null, i64 %x + // CHECK-NEXT: %[[R:.+]] = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %[[NOPROV]], 1 + // CHECK-NEXT: ret { i64, ptr } %[[R]] + Ok(x) +} + +#[no_mangle] +fn make_some_ref(x: &u16) -> Option<&u16> { + // CHECK-LABEL: ptr @make_some_ref(ptr align 2 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr %x + Some(x) +} + +#[no_mangle] +fn make_none_ref<'a>() -> Option<&'a u16> { + // CHECK-LABEL: ptr @make_none_ref() + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr null + None +} diff --git a/tests/codegen/set-discriminant-invalid.rs b/tests/codegen/set-discriminant-invalid.rs index 0b7cb14880c98..dd584ef1c1420 100644 --- a/tests/codegen/set-discriminant-invalid.rs +++ b/tests/codegen/set-discriminant-invalid.rs @@ -16,10 +16,9 @@ impl IntoError for Api { type Source = ApiError; // CHECK-LABEL: @into_error // CHECK: llvm.trap() - // Also check the next two instructions to make sure we do not match against `trap` + // Also check the next instruction to make sure we do not match against `trap` // elsewhere in the code. - // CHECK-NEXT: load - // CHECK-NEXT: ret + // CHECK-NEXT: ret i8 poison #[no_mangle] fn into_error(self, error: Self::Source) -> Error { Error::Api { source: error } diff --git a/tests/codegen/union-aggregate.rs b/tests/codegen/union-aggregate.rs new file mode 100644 index 0000000000000..3c6053379fa3b --- /dev/null +++ b/tests/codegen/union-aggregate.rs @@ -0,0 +1,85 @@ +//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes +//@ min-llvm-version: 19 +//@ only-64bit + +#![crate_type = "lib"] +#![feature(transparent_unions)] + +#[repr(transparent)] +union MU { + uninit: (), + value: T, +} + +use std::cmp::Ordering; +use std::num::NonZero; +use std::ptr::NonNull; + +#[no_mangle] +fn make_mu_bool(x: bool) -> MU { + // CHECK-LABEL: i8 @make_mu_bool(i1 zeroext %x) + // CHECK-NEXT: start: + // CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8 + // CHECK-NEXT: ret i8 %[[WIDER]] + MU { value: x } +} + +#[no_mangle] +fn make_mu_bool_uninit() -> MU { + // CHECK-LABEL: i8 @make_mu_bool_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret i8 undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_ref(x: &u16) -> MU<&u16> { + // CHECK-LABEL: ptr @make_mu_ref(ptr align 2 %x) + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr %x + MU { value: x } +} + +#[no_mangle] +fn make_mu_ref_uninit<'a>() -> MU<&'a u16> { + // CHECK-LABEL: ptr @make_mu_ref_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret ptr undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_str(x: &str) -> MU<&str> { + // CHECK-LABEL: { ptr, i64 } @make_mu_str(ptr align 1 %x.0, i64 %x.1) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { ptr, i64 } poison, ptr %x.0, 0 + // CHECK-NEXT: %1 = insertvalue { ptr, i64 } %0, i64 %x.1, 1 + // CHECK-NEXT: ret { ptr, i64 } %1 + MU { value: x } +} + +#[no_mangle] +fn make_mu_str_uninit<'a>() -> MU<&'a str> { + // CHECK-LABEL: { ptr, i64 } @make_mu_str_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { ptr, i64 } undef + MU { uninit: () } +} + +#[no_mangle] +fn make_mu_pair(x: (u8, u32)) -> MU<(u8, u32)> { + // CHECK-LABEL: { i8, i32 } @make_mu_pair(i8 %x.0, i32 %x.1) + // CHECK-NEXT: start: + // CHECK-NEXT: %0 = insertvalue { i8, i32 } poison, i8 %x.0, 0 + // CHECK-NEXT: %1 = insertvalue { i8, i32 } %0, i32 %x.1, 1 + // CHECK-NEXT: ret { i8, i32 } %1 + MU { value: x } +} + +#[no_mangle] +fn make_mu_pair_uninit() -> MU<(u8, u32)> { + // CHECK-LABEL: { i8, i32 } @make_mu_pair_uninit() + // CHECK-NEXT: start: + // CHECK-NEXT: ret { i8, i32 } undef + MU { uninit: () } +}