From 09a384643ee4188fd1e837ab25856a7e8ff62d07 Mon Sep 17 00:00:00 2001
From: hkalbasi
Date: Fri, 28 Oct 2022 18:45:55 +0330
Subject: [PATCH 1/3] make rustc_target usable outside of rustc
---
compiler/rustc_index/Cargo.toml | 8 +-
compiler/rustc_index/src/lib.rs | 17 +--
compiler/rustc_index/src/vec.rs | 5 +
compiler/rustc_middle/src/arena.rs | 2 +-
compiler/rustc_middle/src/ty/context.rs | 4 +-
compiler/rustc_middle/src/ty/layout.rs | 2 +-
compiler/rustc_target/Cargo.toml | 23 ++--
compiler/rustc_target/src/abi/mod.rs | 127 +++++++++++++++++------
compiler/rustc_target/src/lib.rs | 18 ++--
compiler/rustc_ty_utils/src/layout.rs | 26 ++---
src/librustdoc/html/render/print_item.rs | 8 +-
11 files changed, 163 insertions(+), 77 deletions(-)
diff --git a/compiler/rustc_index/Cargo.toml b/compiler/rustc_index/Cargo.toml
index d8ea5aa80b87a..e1cda5a9edda3 100644
--- a/compiler/rustc_index/Cargo.toml
+++ b/compiler/rustc_index/Cargo.toml
@@ -7,6 +7,10 @@ edition = "2021"
[dependencies]
arrayvec = { version = "0.7", default-features = false }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize", optional = true }
+rustc_macros = { path = "../rustc_macros", optional = true }
smallvec = "1.8.1"
+
+[features]
+default = ["nightly"]
+nightly = ["rustc_serialize", "rustc_macros"]
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 23a4c1f069662..03d8ee139188d 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -1,17 +1,20 @@
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![feature(allow_internal_unstable)]
-#![feature(extend_one)]
-#![feature(min_specialization)]
-#![feature(new_uninit)]
-#![feature(step_trait)]
-#![feature(stmt_expr_attributes)]
-#![feature(test)]
+#![cfg_attr(feature = "nightly", feature(allow_internal_unstable))]
+#![cfg_attr(feature = "nightly", feature(extend_one))]
+#![cfg_attr(feature = "nightly", feature(min_specialization))]
+#![cfg_attr(feature = "nightly", feature(new_uninit))]
+#![cfg_attr(feature = "nightly", feature(step_trait))]
+#![cfg_attr(feature = "nightly", feature(stmt_expr_attributes))]
+#![cfg_attr(feature = "nightly", feature(test))]
+#[cfg(feature = "nightly")]
pub mod bit_set;
+#[cfg(feature = "nightly")]
pub mod interval;
pub mod vec;
+#[cfg(feature = "rustc_macros")]
pub use rustc_macros::newtype_index;
/// Type size assertion. The first argument is a type and the second argument is its expected size.
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
index 1519258c79430..39aa27a23c1d2 100644
--- a/compiler/rustc_index/src/vec.rs
+++ b/compiler/rustc_index/src/vec.rs
@@ -1,3 +1,4 @@
+#[cfg(feature = "rustc_serialize")]
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
@@ -61,12 +62,14 @@ pub struct IndexVec {
// not the phantom data.
unsafe impl Send for IndexVec where T: Send {}
+#[cfg(feature = "rustc_serialize")]
impl> Encodable for IndexVec {
fn encode(&self, s: &mut S) {
Encodable::encode(&self.raw, s);
}
}
+#[cfg(feature = "rustc_serialize")]
impl> Decodable for IndexVec {
fn decode(d: &mut D) -> Self {
IndexVec { raw: Decodable::decode(d), _marker: PhantomData }
@@ -359,11 +362,13 @@ impl Extend for IndexVec {
}
#[inline]
+ #[cfg(feature = "nightly")]
fn extend_one(&mut self, item: T) {
self.raw.push(item);
}
#[inline]
+ #[cfg(feature = "nightly")]
fn extend_reserve(&mut self, additional: usize) {
self.raw.reserve(additional);
}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index f8aae86fe3dc3..7bd4b6c0c2767 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -6,7 +6,7 @@
macro_rules! arena_types {
($macro:path) => (
$macro!([
- [] layout: rustc_target::abi::LayoutS<'tcx>,
+ [] layout: rustc_target::abi::LayoutS,
[] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
// AdtDef are interned and compared by address
[decode] adt_def: rustc_middle::ty::AdtDefData,
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 26d30308ed371..f298e44e08936 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -148,7 +148,7 @@ pub struct CtxtInterners<'tcx> {
const_: InternedSet<'tcx, ConstS<'tcx>>,
const_allocation: InternedSet<'tcx, Allocation>,
bound_variable_kinds: InternedSet<'tcx, List>,
- layout: InternedSet<'tcx, LayoutS<'tcx>>,
+ layout: InternedSet<'tcx, LayoutS>,
adt_def: InternedSet<'tcx, AdtDefData>,
}
@@ -2244,7 +2244,7 @@ direct_interners! {
region: mk_region(RegionKind<'tcx>): Region -> Region<'tcx>,
const_: mk_const_internal(ConstS<'tcx>): Const -> Const<'tcx>,
const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
- layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
+ layout: intern_layout(LayoutS): Layout -> Layout<'tcx>,
adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index c74d6bc3774a2..fea2aa8cbf821 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -610,7 +610,7 @@ where
})
}
- Variants::Multiple { ref variants, .. } => variants[variant_index],
+ Variants::Multiple { ref variants, .. } => cx.tcx().intern_layout(variants[variant_index].clone()),
};
assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index fc37fdb1c43ca..58eb4f69c44f9 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -7,9 +7,20 @@ edition = "2021"
bitflags = "1.2.1"
tracing = "0.1"
serde_json = "1.0.59"
-rustc_data_structures = { path = "../rustc_data_structures" }
-rustc_feature = { path = "../rustc_feature" }
-rustc_index = { path = "../rustc_index" }
-rustc_macros = { path = "../rustc_macros" }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_span = { path = "../rustc_span" }
+rustc_data_structures = { path = "../rustc_data_structures", optional = true }
+rustc_feature = { path = "../rustc_feature", optional = true }
+rustc_index = { path = "../rustc_index", default-features = false }
+rustc_macros = { path = "../rustc_macros", optional = true }
+rustc_serialize = { path = "../rustc_serialize", optional = true }
+rustc_span = { path = "../rustc_span", optional = true }
+
+[features]
+default = ["nightly"]
+nightly = [
+ "rustc_data_structures",
+ "rustc_feature",
+ "rustc_index/nightly",
+ "rustc_macros",
+ "rustc_serialize",
+ "rustc_span",
+]
\ No newline at end of file
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index decbefc2f7c62..fa6af2ed7f3a0 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -2,23 +2,29 @@ pub use Integer::*;
pub use Primitive::*;
use crate::json::{Json, ToJson};
+#[cfg(feature = "nightly")]
use crate::spec::Target;
use std::convert::{TryFrom, TryInto};
use std::fmt;
+#[cfg(feature = "nightly")]
use std::iter::Step;
use std::num::{NonZeroUsize, ParseIntError};
use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
use std::str::FromStr;
+#[cfg(feature = "nightly")]
use rustc_data_structures::intern::Interned;
use rustc_index::vec::{Idx, IndexVec};
+#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
+#[cfg(feature = "nightly")]
pub mod call;
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
/// for a target, which contains everything needed to compute layouts.
+#[derive(Debug, PartialEq, Eq)]
pub struct TargetDataLayout {
pub endian: Endian,
pub i1_align: AbiAndPrefAlign,
@@ -80,6 +86,7 @@ pub enum TargetDataLayoutErrors<'a> {
}
impl TargetDataLayout {
+ #[cfg(feature = "nightly")]
pub fn parse<'a>(target: &'a Target) -> Result> {
// Parse an address space index from a string.
let parse_address_space = |s: &'a str, cause: &'a str| {
@@ -248,7 +255,7 @@ impl HasDataLayout for TargetDataLayout {
}
/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
pub enum Endian {
Little,
Big,
@@ -288,8 +295,8 @@ impl ToJson for Endian {
}
/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct Size {
raw: u64,
}
@@ -466,6 +473,7 @@ impl AddAssign for Size {
}
}
+#[cfg(feature = "nightly")]
impl Step for Size {
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option {
@@ -504,8 +512,8 @@ impl Step for Size {
}
/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct Align {
pow2: u8,
}
@@ -588,7 +596,8 @@ impl Align {
/// A pair of alignments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
@@ -612,7 +621,9 @@ impl AbiAndPrefAlign {
}
/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
pub enum Integer {
I8,
I16,
@@ -710,7 +721,8 @@ impl Integer {
}
/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum Primitive {
/// The `bool` is the signedness of the `Integer` type.
///
@@ -777,7 +789,7 @@ impl Primitive {
///
/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-#[derive(HashStable_Generic)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub struct WrappingRange {
pub start: u128,
pub end: u128,
@@ -834,7 +846,7 @@ impl fmt::Debug for WrappingRange {
/// Information about one scalar component of a Rust type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum Scalar {
Initialized {
value: Primitive,
@@ -924,7 +936,8 @@ impl Scalar {
}
/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum FieldsShape {
/// Scalar primitives and `!`, which never have fields.
Primitive,
@@ -1058,7 +1071,9 @@ impl AddressSpace {
/// Describes how values of the type are passed by target ABIs,
/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
pub enum Abi {
Uninhabited,
Scalar(Scalar),
@@ -1113,16 +1128,18 @@ impl Abi {
}
}
+#[cfg(feature = "nightly")]
rustc_index::newtype_index! {
pub struct VariantIdx {
derive [HashStable_Generic]
}
}
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Variants<'a> {
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Variants {
/// Single enum variants, structs/tuples, unions, and all non-ADTs.
- Single { index: VariantIdx },
+ Single { index: V },
/// Enum-likes with more than one inhabited variant: each variant comes with
/// a *discriminant* (usually the same as the variant index but the user can
@@ -1132,14 +1149,15 @@ pub enum Variants<'a> {
/// For enums, the tag is the sole field of the layout.
Multiple {
tag: Scalar,
- tag_encoding: TagEncoding,
+ tag_encoding: TagEncoding,
tag_field: usize,
- variants: IndexVec>,
+ variants: IndexVec>,
},
}
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum TagEncoding {
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum TagEncoding {
/// The tag directly stores the discriminant, but possibly with a smaller layout
/// (so converting the tag to the discriminant can require sign extension).
Direct,
@@ -1155,13 +1173,15 @@ pub enum TagEncoding {
/// `None` has a null pointer for the second tuple field, and
/// `Some` is the identity function (with a non-null reference).
Niche {
- untagged_variant: VariantIdx,
- niche_variants: RangeInclusive,
+ untagged_variant: V,
+ #[cfg(feature = "nightly")]
+ niche_variants: RangeInclusive,
niche_start: u128,
},
}
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub struct Niche {
pub offset: Size,
pub value: Primitive,
@@ -1244,8 +1264,9 @@ impl Niche {
}
}
-#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
-pub struct LayoutS<'a> {
+#[derive(PartialEq, Eq, Hash, Clone)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct LayoutS {
/// Says where the fields are located within the layout.
pub fields: FieldsShape,
@@ -1256,7 +1277,7 @@ pub struct LayoutS<'a> {
///
/// To access all fields of this layout, both `fields` and the fields of the active variant
/// must be taken into account.
- pub variants: Variants<'a>,
+ pub variants: Variants,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
@@ -1275,13 +1296,13 @@ pub struct LayoutS<'a> {
pub size: Size,
}
-impl<'a> LayoutS<'a> {
+impl LayoutS {
pub fn scalar(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.size(cx);
let align = scalar.align(cx);
LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
+ variants: Variants::Single { index: V::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
largest_niche,
@@ -1289,9 +1310,39 @@ impl<'a> LayoutS<'a> {
align,
}
}
+
+ #[inline]
+ pub fn fields(&self) -> &FieldsShape {
+ &self.fields
+ }
+
+ #[inline]
+ pub fn variants(&self) -> &Variants {
+ &self.variants
+ }
+
+ #[inline]
+ pub fn abi(&self) -> Abi {
+ self.abi
+ }
+
+ #[inline]
+ pub fn largest_niche(&self) -> Option {
+ self.largest_niche
+ }
+
+ #[inline]
+ pub fn align(&self) -> AbiAndPrefAlign {
+ self.align
+ }
+
+ #[inline]
+ pub fn size(&self) -> Size {
+ self.size
+ }
}
-impl<'a> fmt::Debug for LayoutS<'a> {
+impl fmt::Debug for LayoutS {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// This is how `Layout` used to print before it become
// `Interned`. We print it like this to avoid having to update
@@ -1308,10 +1359,12 @@ impl<'a> fmt::Debug for LayoutS<'a> {
}
}
+#[cfg(feature = "nightly")]
#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
#[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+pub struct Layout<'a>(pub Interned<'a, LayoutS>);
+#[cfg(feature = "nightly")]
impl<'a> fmt::Debug for Layout<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// See comment on `::fmt` above.
@@ -1319,12 +1372,13 @@ impl<'a> fmt::Debug for Layout<'a> {
}
}
+#[cfg(feature = "nightly")]
impl<'a> Layout<'a> {
pub fn fields(self) -> &'a FieldsShape {
&self.0.0.fields
}
- pub fn variants(self) -> &'a Variants<'a> {
+ pub fn variants(self) -> &'a Variants {
&self.0.0.variants
}
@@ -1352,15 +1406,18 @@ impl<'a> Layout<'a> {
/// to that obtained from `layout_of(ty)`, as we need to produce
/// layouts for which Rust types do not exist, such as enum variants
/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
+#[cfg(feature = "nightly")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub struct TyAndLayout<'a, Ty> {
pub ty: Ty,
pub layout: Layout<'a>,
}
+#[cfg(feature = "nightly")]
impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
- type Target = &'a LayoutS<'a>;
- fn deref(&self) -> &&'a LayoutS<'a> {
+ type Target = &'a LayoutS;
+ fn deref(&self) -> &&'a LayoutS {
&self.layout.0.0
}
}
@@ -1402,6 +1459,7 @@ pub enum InitKind {
/// Trait that needs to be implemented by the higher-level type representation
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
+#[cfg(feature = "nightly")]
pub trait TyAbiInterface<'a, C>: Sized {
fn ty_and_layout_for_variant(
this: TyAndLayout<'a, Self>,
@@ -1420,6 +1478,7 @@ pub trait TyAbiInterface<'a, C>: Sized {
fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
}
+#[cfg(feature = "nightly")]
impl<'a, Ty> TyAndLayout<'a, Ty> {
pub fn for_variant(self, cx: &C, variant_index: VariantIdx) -> Self
where
@@ -1489,7 +1548,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
}
}
-impl<'a, Ty> TyAndLayout<'a, Ty> {
+impl LayoutS {
/// Returns `true` if the layout corresponds to an unsized type.
pub fn is_unsized(&self) -> bool {
self.abi.is_unsized()
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index aaba0d7f093ae..1065980a26acc 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -8,13 +8,13 @@
//! LLVM.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(assert_matches)]
-#![feature(associated_type_bounds)]
-#![feature(exhaustive_patterns)]
-#![feature(min_specialization)]
-#![feature(never_type)]
-#![feature(rustc_attrs)]
-#![feature(step_trait)]
+#![cfg_attr(feature = "nightly", feature(assert_matches))]
+#![cfg_attr(feature = "nightly", feature(associated_type_bounds))]
+#![cfg_attr(feature = "nightly", feature(exhaustive_patterns))]
+#![cfg_attr(feature = "nightly", feature(min_specialization))]
+#![cfg_attr(feature = "nightly", feature(never_type))]
+#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
+#![cfg_attr(feature = "nightly", feature(step_trait))]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
@@ -22,14 +22,18 @@ use std::iter::FromIterator;
use std::path::{Path, PathBuf};
#[macro_use]
+#[cfg(feature = "nightly")]
extern crate rustc_macros;
#[macro_use]
+#[cfg(feature = "nightly")]
extern crate tracing;
pub mod abi;
+#[cfg(feature = "nightly")]
pub mod asm;
pub mod json;
+#[cfg(feature = "nightly")]
pub mod spec;
#[cfg(test)]
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 07af3dc516478..5e77ad4054a56 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -89,7 +89,11 @@ fn invert_mapping(map: &[u32]) -> Vec {
inverse
}
-fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
+fn scalar_pair<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ a: Scalar,
+ b: Scalar,
+) -> LayoutS {
let dl = cx.data_layout();
let b_align = b.align(dl);
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
@@ -122,7 +126,7 @@ fn univariant_uninterned<'tcx>(
fields: &[TyAndLayout<'_>],
repr: &ReprOptions,
kind: StructKind,
-) -> Result, LayoutError<'tcx>> {
+) -> Result, LayoutError<'tcx>> {
let dl = cx.data_layout();
let pack = repr.pack;
if pack.is_some() && repr.align.is_some() {
@@ -864,13 +868,13 @@ fn layout_of_uncached<'tcx>(
// variant layouts, so we can't store them in the
// overall LayoutS. Store the overall LayoutS
// and the variant LayoutSs here until then.
- struct TmpLayout<'tcx> {
- layout: LayoutS<'tcx>,
- variants: IndexVec>,
+ struct TmpLayout {
+ layout: LayoutS,
+ variants: IndexVec>,
}
let calculate_niche_filling_layout =
- || -> Result
"
);
w.write_str("Size: ");
- write_size_of_layout(w, ty_layout.layout, 0);
+ write_size_of_layout(w, &ty_layout.layout.0, 0);
writeln!(w, "
");
if let Variants::Multiple { variants, tag, tag_encoding, .. } =
&ty_layout.layout.variants()
@@ -1953,7 +1953,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
for (index, layout) in variants.iter_enumerated() {
let name = adt.variant(index).name;
write!(w, "{name}
: ", name = name);
- write_size_of_layout(w, *layout, tag_size);
+ write_size_of_layout(w, layout, tag_size);
writeln!(w, "");
}
w.write_str("");
From 27fb904d680996fe48e04aef65d4d655bdab843b Mon Sep 17 00:00:00 2001
From: hkalbasi
Date: Tue, 1 Nov 2022 19:50:30 +0330
Subject: [PATCH 2/3] move some layout logic to rustc_target::abi::layout
---
Cargo.lock | 3 +
compiler/rustc_hir_analysis/src/collect.rs | 4 +-
compiler/rustc_lint/src/types.rs | 6 +-
compiler/rustc_middle/src/ty/adt.rs | 6 +-
compiler/rustc_middle/src/ty/layout.rs | 31 +-
compiler/rustc_middle/src/ty/mod.rs | 208 ++--
compiler/rustc_middle/src/ty/util.rs | 23 +-
compiler/rustc_target/Cargo.toml | 4 +-
compiler/rustc_target/src/abi/layout.rs | 943 +++++++++++++++++
compiler/rustc_target/src/abi/mod.rs | 151 ++-
compiler/rustc_traits/Cargo.toml | 1 +
compiler/rustc_traits/src/chalk/db.rs | 30 +-
compiler/rustc_ty_utils/src/layout.rs | 972 +-----------------
.../src/casts/cast_possible_truncation.rs | 5 +-
src/tools/clippy/clippy_lints/src/lib.rs | 1 -
15 files changed, 1231 insertions(+), 1157 deletions(-)
create mode 100644 compiler/rustc_target/src/abi/layout.rs
diff --git a/Cargo.lock b/Cargo.lock
index c987bf44ec00d..13d37c9337535 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4281,6 +4281,8 @@ name = "rustc_target"
version = "0.0.0"
dependencies = [
"bitflags",
+ "rand 0.8.5",
+ "rand_xoshiro",
"rustc_data_structures",
"rustc_feature",
"rustc_index",
@@ -4336,6 +4338,7 @@ dependencies = [
"rustc_infer",
"rustc_middle",
"rustc_span",
+ "rustc_target",
"rustc_trait_selection",
"smallvec",
"tracing",
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index 5d63d90f304b1..6bdd551145994 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -32,8 +32,8 @@ use rustc_middle::hir::nested_filter;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::mono::Linkage;
use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::repr_options_of_def;
use rustc_middle::ty::util::{Discr, IntTypeExt};
-use rustc_middle::ty::ReprOptions;
use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
use rustc_session::lint;
use rustc_session::parse::feature_err;
@@ -860,7 +860,7 @@ fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
bug!();
};
- let repr = ReprOptions::new(tcx, def_id.to_def_id());
+ let repr = repr_options_of_def(tcx, def_id.to_def_id());
let (kind, variants) = match item.kind {
ItemKind::Enum(ref def, _) => {
let mut distance_from_explicit = 0;
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index afc568f3a505b..fadd47eed723c 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -12,7 +12,7 @@ use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable,
use rustc_span::source_map;
use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol};
-use rustc_target::abi::{Abi, WrappingRange};
+use rustc_target::abi::{Abi, Size, WrappingRange};
use rustc_target::abi::{Integer, TagEncoding, Variants};
use rustc_target::spec::abi::Abi as SpecAbi;
@@ -225,11 +225,11 @@ fn report_bin_hex_error(
cx: &LateContext<'_>,
expr: &hir::Expr<'_>,
ty: attr::IntType,
+ size: Size,
repr_str: String,
val: u128,
negative: bool,
) {
- let size = Integer::from_attr(&cx.tcx, ty).size();
cx.struct_span_lint(
OVERFLOWING_LITERALS,
expr.span,
@@ -352,6 +352,7 @@ fn lint_int_literal<'tcx>(
cx,
e,
attr::IntType::SignedInt(ty::ast_int_ty(t)),
+ Integer::from_int_ty(cx, t).size(),
repr_str,
v,
negative,
@@ -437,6 +438,7 @@ fn lint_uint_literal<'tcx>(
cx,
e,
attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
+ Integer::from_uint_ty(cx, t).size(),
repr_str,
lit_val,
false,
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index 6b6aa40a1604f..d3d667f68407f 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -14,7 +14,7 @@ use rustc_index::vec::{Idx, IndexVec};
use rustc_query_system::ich::StableHashingContext;
use rustc_session::DataTypeKind;
use rustc_span::symbol::sym;
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{ReprOptions, VariantIdx};
use std::cell::RefCell;
use std::cmp::Ordering;
@@ -22,9 +22,7 @@ use std::hash::{Hash, Hasher};
use std::ops::Range;
use std::str;
-use super::{
- Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
-};
+use super::{Destructor, FieldDef, GenericPredicates, Ty, TyCtxt, VariantDef, VariantDiscr};
bitflags! {
#[derive(HashStable, TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index fea2aa8cbf821..488fd567846a3 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,8 +1,6 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::normalize_erasing_regions::NormalizationError;
use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable};
-use rustc_ast as ast;
-use rustc_attr as attr;
use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
@@ -20,7 +18,6 @@ use std::ops::Bound;
pub trait IntegerExt {
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
- fn from_attr(cx: &C, ity: attr::IntType) -> Integer;
fn from_int_ty(cx: &C, ity: ty::IntTy) -> Integer;
fn from_uint_ty(cx: &C, uty: ty::UintTy) -> Integer;
fn repr_discr<'tcx>(
@@ -49,22 +46,6 @@ impl IntegerExt for Integer {
}
}
- /// Gets the Integer type from an attr::IntType.
- fn from_attr(cx: &C, ity: attr::IntType) -> Integer {
- let dl = cx.data_layout();
-
- match ity {
- attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
- attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
- attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
- attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
- attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
- attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
- dl.ptr_sized_integer()
- }
- }
- }
-
fn from_int_ty(cx: &C, ity: ty::IntTy) -> Integer {
match ity {
ty::IntTy::I8 => I8,
@@ -237,6 +218,18 @@ pub struct LayoutCx<'tcx, C> {
pub param_env: ty::ParamEnv<'tcx>,
}
+impl<'tcx> LayoutCalculator for LayoutCx<'tcx, TyCtxt<'tcx>> {
+ type TargetDataLayoutRef = &'tcx TargetDataLayout;
+
+ fn delay_bug(&self, txt: &str) {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, txt);
+ }
+
+ fn current_data_layout(&self) -> Self::TargetDataLayoutRef {
+ &self.tcx.data_layout
+ }
+}
+
/// Type size "skeleton", i.e., the only information determining a type's size.
/// While this is conservative, (aside from constant sizes, only pointers,
/// newtypes thereof and null pointer optimized enums are allowed), it is
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 0458c4abd3d4c..e3421ab9ce0bf 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -48,7 +48,8 @@ use rustc_session::cstore::CrateStoreDyn;
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{ExpnId, Span};
-use rustc_target::abi::{Align, VariantIdx};
+use rustc_target::abi::{Align, Integer, IntegerType, VariantIdx};
+pub use rustc_target::abi::{ReprFlags, ReprOptions};
pub use subst::*;
pub use vtable::*;
@@ -1994,161 +1995,76 @@ impl Hash for FieldDef {
}
}
-bitflags! {
- #[derive(TyEncodable, TyDecodable, Default, HashStable)]
- pub struct ReprFlags: u8 {
- const IS_C = 1 << 0;
- const IS_SIMD = 1 << 1;
- const IS_TRANSPARENT = 1 << 2;
- // Internal only for now. If true, don't reorder fields.
- const IS_LINEAR = 1 << 3;
- // If true, the type's layout can be randomized using
- // the seed stored in `ReprOptions.layout_seed`
- const RANDOMIZE_LAYOUT = 1 << 4;
- // Any of these flags being set prevent field reordering optimisation.
- const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
- | ReprFlags::IS_SIMD.bits
- | ReprFlags::IS_LINEAR.bits;
- }
-}
-
-/// Represents the repr options provided by the user,
-#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
-pub struct ReprOptions {
- pub int: Option,
- pub align: Option,
- pub pack: Option,
- pub flags: ReprFlags,
- /// The seed to be used for randomizing a type's layout
- ///
- /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
- /// be the "most accurate" hash as it'd encompass the item and crate
- /// hash without loss, but it does pay the price of being larger.
- /// Everything's a tradeoff, a `u64` seed should be sufficient for our
- /// purposes (primarily `-Z randomize-layout`)
- pub field_shuffle_seed: u64,
-}
-
-impl ReprOptions {
- pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
- let mut flags = ReprFlags::empty();
- let mut size = None;
- let mut max_align: Option = None;
- let mut min_pack: Option = None;
-
- // Generate a deterministically-derived seed from the item's path hash
- // to allow for cross-crate compilation to actually work
- let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
-
- // If the user defined a custom seed for layout randomization, xor the item's
- // path hash with the user defined seed, this will allowing determinism while
- // still allowing users to further randomize layout generation for e.g. fuzzing
- if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
- field_shuffle_seed ^= user_seed;
- }
-
- for attr in tcx.get_attrs(did, sym::repr) {
- for r in attr::parse_repr_attr(&tcx.sess, attr) {
- flags.insert(match r {
- attr::ReprC => ReprFlags::IS_C,
- attr::ReprPacked(pack) => {
- let pack = Align::from_bytes(pack as u64).unwrap();
- min_pack = Some(if let Some(min_pack) = min_pack {
- min_pack.min(pack)
- } else {
- pack
- });
- ReprFlags::empty()
- }
- attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
- attr::ReprSimd => ReprFlags::IS_SIMD,
- attr::ReprInt(i) => {
- size = Some(i);
- ReprFlags::empty()
- }
- attr::ReprAlign(align) => {
- max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
- ReprFlags::empty()
- }
- });
- }
- }
+pub fn repr_options_of_def(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
+ let mut flags = ReprFlags::empty();
+ let mut size = None;
+ let mut max_align: Option = None;
+ let mut min_pack: Option = None;
- // If `-Z randomize-layout` was enabled for the type definition then we can
- // consider performing layout randomization
- if tcx.sess.opts.unstable_opts.randomize_layout {
- flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
- }
+ // Generate a deterministically-derived seed from the item's path hash
+ // to allow for cross-crate compilation to actually work
+ let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
- // This is here instead of layout because the choice must make it into metadata.
- if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
- flags.insert(ReprFlags::IS_LINEAR);
- }
-
- Self { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
- }
-
- #[inline]
- pub fn simd(&self) -> bool {
- self.flags.contains(ReprFlags::IS_SIMD)
+ // If the user defined a custom seed for layout randomization, xor the item's
+ // path hash with the user defined seed, this will allowing determinism while
+ // still allowing users to further randomize layout generation for e.g. fuzzing
+ if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
+ field_shuffle_seed ^= user_seed;
}
- #[inline]
- pub fn c(&self) -> bool {
- self.flags.contains(ReprFlags::IS_C)
- }
-
- #[inline]
- pub fn packed(&self) -> bool {
- self.pack.is_some()
- }
-
- #[inline]
- pub fn transparent(&self) -> bool {
- self.flags.contains(ReprFlags::IS_TRANSPARENT)
- }
-
- #[inline]
- pub fn linear(&self) -> bool {
- self.flags.contains(ReprFlags::IS_LINEAR)
- }
-
- /// Returns the discriminant type, given these `repr` options.
- /// This must only be called on enums!
- pub fn discr_type(&self) -> attr::IntType {
- self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
- }
-
- /// Returns `true` if this `#[repr()]` should inhabit "smart enum
- /// layout" optimizations, such as representing `Foo<&T>` as a
- /// single pointer.
- pub fn inhibit_enum_layout_opt(&self) -> bool {
- self.c() || self.int.is_some()
- }
-
- /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
- /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr()`.
- pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
- if let Some(pack) = self.pack {
- if pack.bytes() == 1 {
- return true;
- }
+ for attr in tcx.get_attrs(did, sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ flags.insert(match r {
+ attr::ReprC => ReprFlags::IS_C,
+ attr::ReprPacked(pack) => {
+ let pack = Align::from_bytes(pack as u64).unwrap();
+ min_pack =
+ Some(if let Some(min_pack) = min_pack { min_pack.min(pack) } else { pack });
+ ReprFlags::empty()
+ }
+ attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+ attr::ReprSimd => ReprFlags::IS_SIMD,
+ attr::ReprInt(i) => {
+ size = Some(match i {
+ attr::IntType::SignedInt(x) => match x {
+ ast::IntTy::Isize => IntegerType::Pointer(true),
+ ast::IntTy::I8 => IntegerType::Fixed(Integer::I8, true),
+ ast::IntTy::I16 => IntegerType::Fixed(Integer::I16, true),
+ ast::IntTy::I32 => IntegerType::Fixed(Integer::I32, true),
+ ast::IntTy::I64 => IntegerType::Fixed(Integer::I64, true),
+ ast::IntTy::I128 => IntegerType::Fixed(Integer::I128, true),
+ },
+ attr::IntType::UnsignedInt(x) => match x {
+ ast::UintTy::Usize => IntegerType::Pointer(false),
+ ast::UintTy::U8 => IntegerType::Fixed(Integer::I8, false),
+ ast::UintTy::U16 => IntegerType::Fixed(Integer::I16, false),
+ ast::UintTy::U32 => IntegerType::Fixed(Integer::I32, false),
+ ast::UintTy::U64 => IntegerType::Fixed(Integer::I64, false),
+ ast::UintTy::U128 => IntegerType::Fixed(Integer::I128, false),
+ },
+ });
+ ReprFlags::empty()
+ }
+ attr::ReprAlign(align) => {
+ max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+ ReprFlags::empty()
+ }
+ });
}
-
- self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
}
- /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
- /// was enabled for its declaration crate
- pub fn can_randomize_type_layout(&self) -> bool {
- !self.inhibit_struct_field_reordering_opt()
- && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
+ // If `-Z randomize-layout` was enabled for the type definition then we can
+ // consider performing layout randomization
+ if tcx.sess.opts.unstable_opts.randomize_layout {
+ flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
}
- /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
- pub fn inhibit_union_abi_opt(&self) -> bool {
- self.c()
+ // This is here instead of layout because the choice must make it into metadata.
+ if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
+ flags.insert(ReprFlags::IS_LINEAR);
}
+
+ ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
}
impl<'tcx> FieldDef {
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index f72e236eda133..6561c4c278d0e 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -8,8 +8,6 @@ use crate::ty::{
};
use crate::ty::{GenericArgKind, SubstsRef};
use rustc_apfloat::Float as _;
-use rustc_ast as ast;
-use rustc_attr::{self as attr, SignedInt, UnsignedInt};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_errors::ErrorGuaranteed;
@@ -19,7 +17,7 @@ use rustc_hir::def_id::DefId;
use rustc_index::bit_set::GrowableBitSet;
use rustc_macros::HashStable;
use rustc_span::{sym, DUMMY_SP};
-use rustc_target::abi::{Integer, Size, TargetDataLayout};
+use rustc_target::abi::{Integer, IntegerType, Size, TargetDataLayout};
use rustc_target::spec::abi::Abi;
use smallvec::SmallVec;
use std::{fmt, iter};
@@ -104,21 +102,12 @@ pub trait IntTypeExt {
fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
}
-impl IntTypeExt for attr::IntType {
+impl IntTypeExt for IntegerType {
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
- match *self {
- SignedInt(ast::IntTy::I8) => tcx.types.i8,
- SignedInt(ast::IntTy::I16) => tcx.types.i16,
- SignedInt(ast::IntTy::I32) => tcx.types.i32,
- SignedInt(ast::IntTy::I64) => tcx.types.i64,
- SignedInt(ast::IntTy::I128) => tcx.types.i128,
- SignedInt(ast::IntTy::Isize) => tcx.types.isize,
- UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
- UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
- UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
- UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
- UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
- UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
+ match self {
+ IntegerType::Pointer(true) => tcx.types.isize,
+ IntegerType::Pointer(false) => tcx.types.usize,
+ IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
}
}
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index 58eb4f69c44f9..f2e21078b446b 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -6,6 +6,8 @@ edition = "2021"
[dependencies]
bitflags = "1.2.1"
tracing = "0.1"
+rand = "0.8.4"
+rand_xoshiro = "0.6.0"
serde_json = "1.0.59"
rustc_data_structures = { path = "../rustc_data_structures", optional = true }
rustc_feature = { path = "../rustc_feature", optional = true }
@@ -23,4 +25,4 @@ nightly = [
"rustc_macros",
"rustc_serialize",
"rustc_span",
-]
\ No newline at end of file
+]
diff --git a/compiler/rustc_target/src/abi/layout.rs b/compiler/rustc_target/src/abi/layout.rs
new file mode 100644
index 0000000000000..cf4843e9d6cb5
--- /dev/null
+++ b/compiler/rustc_target/src/abi/layout.rs
@@ -0,0 +1,943 @@
+use super::*;
+use std::{
+ borrow::Borrow,
+ cmp,
+ fmt::Debug,
+ iter,
+ ops::{Bound, Deref},
+};
+
+use rand::{seq::SliceRandom, SeedableRng};
+use rand_xoshiro::Xoshiro128StarStar;
+
+use tracing::debug;
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec {
+ let mut inverse = vec![0; map.len()];
+ for i in 0..map.len() {
+ inverse[map[i] as usize] = i as u32;
+ }
+ inverse
+}
+
+pub trait LayoutCalculator {
+ type TargetDataLayoutRef: Borrow;
+
+ fn delay_bug(&self, txt: &str);
+ fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
+
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+ let b_align = b.align(dl);
+ let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+ // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+ // returns the last maximum.
+ let largest_niche = Niche::from_scalar(dl, b_offset, b)
+ .into_iter()
+ .chain(Niche::from_scalar(dl, Size::ZERO, a))
+ .max_by_key(|niche| niche.available(dl));
+
+ LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1],
+ },
+ abi: Abi::ScalarPair(a, b),
+ largest_niche,
+ align,
+ size,
+ }
+ }
+
+ fn univariant<'a, V: Idx, F: Deref> + Debug>(
+ &self,
+ dl: &TargetDataLayout,
+ fields: &[F],
+ repr: &ReprOptions,
+ kind: StructKind,
+ ) -> Option> {
+ let pack = repr.pack;
+ let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+ let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect();
+ let optimize = !repr.inhibit_struct_field_reordering_opt();
+ if optimize {
+ let end =
+ if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+ let optimizing = &mut inverse_memory_index[..end];
+ let effective_field_align = |f: &F| {
+ if let Some(pack) = pack {
+ // return the packed alignment in bytes
+ f.align.abi.min(pack).bytes()
+ } else {
+ // returns log2(effective-align).
+ // This is ok since `pack` applies to all fields equally.
+ // The calculation assumes that size is an integer multiple of align, except for ZSTs.
+ //
+ // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+ f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
+ }
+ };
+
+ // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+ // the field ordering to try and catch some code making assumptions about layouts
+ // we don't guarantee
+ if repr.can_randomize_type_layout() {
+ // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+ // randomize field ordering with
+ let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+ // Shuffle the ordering of the fields
+ optimizing.shuffle(&mut rng);
+
+ // Otherwise we just leave things alone and actually optimize the type's fields
+ } else {
+ match kind {
+ StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ // Then place largest alignments first, largest niches within an alignment group last
+ let f = &fields[x as usize];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+ (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
+ });
+ }
+
+ StructKind::Prefixed(..) => {
+ // Sort in ascending alignment so that the layout stays optimal
+ // regardless of the prefix.
+ // And put the largest niche in an alignment group at the end
+ // so it can be used as discriminant in jagged enums
+ optimizing.sort_by_key(|&x| {
+ let f = &fields[x as usize];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+ (effective_field_align(f), niche_size)
+ });
+ }
+ }
+
+ // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+ // regardless of the status of `-Z randomize-layout`
+ }
+ }
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we invert `inverse_memory_index` to
+ // produce `memory_index` (see `invert_mapping`).
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut offset = Size::ZERO;
+ let mut largest_niche = None;
+ let mut largest_niche_available = 0;
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align =
+ if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+ for &i in &inverse_memory_index {
+ let field = &fields[i as usize];
+ if !sized {
+ self.delay_bug(&format!(
+ "univariant: field #{} comes after unsized field",
+ offsets.len(),
+ ));
+ }
+
+ if field.is_unsized() {
+ sized = false;
+ }
+
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if let Some(pack) = pack {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
+
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
+
+ if let Some(mut niche) = field.largest_niche {
+ let available = niche.available(dl);
+ if available > largest_niche_available {
+ largest_niche_available = available;
+ niche.offset += offset;
+ largest_niche = Some(niche);
+ }
+ }
+
+ offset = offset.checked_add(field.size, dl)?;
+ }
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
+ let memory_index =
+ if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs.
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+ {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi;
+ }
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, a)), Some((j, b)), None) => {
+ match (a.abi, b.abi) {
+ (Abi::Scalar(a), Abi::Scalar(b)) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = self.scalar_pair::(a, b);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => panic!(),
+ };
+ if offsets[i] == pair_offsets[0]
+ && offsets[j] == pair_offsets[1]
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
+ Some(LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Arbitrary { offsets, memory_index },
+ abi,
+ largest_niche,
+ align,
+ size,
+ })
+ }
+
+ fn layout_of_never_type(&self) -> LayoutS {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+ LayoutS {
+ variants: Variants::Single { index: V::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }
+ }
+
+ fn layout_of_struct_or_enum<'a, V: Idx, F: Deref> + Debug>(
+ &self,
+ repr: &ReprOptions,
+ variants: &IndexVec>,
+ is_enum: bool,
+ is_unsafe_cell: bool,
+ scalar_valid_range: (Bound, Bound),
+ discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
+ discriminants: impl Iterator- ,
+ niche_optimize_enum: bool,
+ always_sized: bool,
+ ) -> Option> {
+ let dl = self.current_data_layout();
+ let dl = dl.borrow();
+
+ let scalar_unit = |value: Primitive| {
+ let size = value.size(dl);
+ assert!(size.bits() <= 128);
+ Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+ };
+
+ // A variant is absent if it's uninhabited and only has ZST fields.
+ // Present uninhabited variants only require space for their fields,
+ // but *not* an encoding of the discriminant (e.g., a tag value).
+ // See issue #49298 for more details on the need to leave space
+ // for non-ZST uninhabited data (mostly partial initialization).
+ let absent = |fields: &[F]| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.is_zst());
+ uninhabited && is_zst
+ };
+ let (present_first, present_second) = {
+ let mut present_variants = variants
+ .iter_enumerated()
+ .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+ (present_variants.next(), present_variants.next())
+ };
+ let present_first = match present_first {
+ Some(present_first) => present_first,
+ // Uninhabited because it has no variants, or only absent ones.
+ None if is_enum => {
+ return Some(self.layout_of_never_type());
+ }
+ // If it's a struct, still compute a layout so that we can still compute the
+ // field offsets.
+ None => V::new(0),
+ };
+
+ let is_struct = !is_enum ||
+ // Only one variant is present.
+ (present_second.is_none() &&
+ // Representation optimizations are allowed.
+ !repr.inhibit_enum_layout_opt());
+ if is_struct {
+ // Struct, or univariant enum equivalent to a struct.
+ // (Typechecking will reject discriminant-sizing attrs.)
+
+ let v = present_first;
+ let kind = if is_enum || variants[v].is_empty() {
+ StructKind::AlwaysSized
+ } else {
+ if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
+ };
+
+ let mut st = self.univariant(dl, &variants[v], &repr, kind)?;
+ st.variants = Variants::Single { index: v };
+
+ if is_unsafe_cell {
+ let hide_niches = |scalar: &mut _| match scalar {
+ Scalar::Initialized { value, valid_range } => {
+ *valid_range = WrappingRange::full(value.size(dl))
+ }
+ // Already doesn't have any niches
+ Scalar::Union { .. } => {}
+ };
+ match &mut st.abi {
+ Abi::Uninhabited => {}
+ Abi::Scalar(scalar) => hide_niches(scalar),
+ Abi::ScalarPair(a, b) => {
+ hide_niches(a);
+ hide_niches(b);
+ }
+ Abi::Vector { element, count: _ } => hide_niches(element),
+ Abi::Aggregate { sized: _ } => {}
+ }
+ st.largest_niche = None;
+ return Some(st);
+ }
+
+ let (start, end) = scalar_valid_range;
+ match st.abi {
+ Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ // the asserts ensure that we are not using the
+ // `#[rustc_layout_scalar_valid_range(n)]`
+ // attribute to widen the range of anything as that would probably
+ // result in UB somewhere
+ // FIXME(eddyb) the asserts are probably not needed,
+ // as larger validity ranges would result in missed
+ // optimizations, *not* wrongly assuming the inner
+ // value is valid. e.g. unions enlarge validity ranges,
+ // because the values may be uninitialized.
+ if let Bound::Included(start) = start {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.start <= start);
+ valid_range.start = start;
+ }
+ if let Bound::Included(end) = end {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.end >= end);
+ valid_range.end = end;
+ }
+
+ // Update `largest_niche` if we have introduced a larger niche.
+ let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+ if let Some(niche) = niche {
+ match st.largest_niche {
+ Some(largest_niche) => {
+ // Replace the existing niche even if they're equal,
+ // because this one is at a lower offset.
+ if largest_niche.available(dl) <= niche.available(dl) {
+ st.largest_niche = Some(niche);
+ }
+ }
+ None => st.largest_niche = Some(niche),
+ }
+ }
+ }
+ _ => assert!(
+ start == Bound::Unbounded && end == Bound::Unbounded,
+ "nonscalar layout for layout_scalar_valid_range type: {:#?}",
+ st,
+ ),
+ }
+
+ return Some(st);
+ }
+
+ // At this point, we have handled all unions and
+ // structs. (We have also handled univariant enums
+ // that allow representation optimization.)
+ assert!(is_enum);
+
+ // Until we've decided whether to use the tagged or
+ // niche filling LayoutS, we don't want to intern the
+ // variant layouts, so we can't store them in the
+ // overall LayoutS. Store the overall LayoutS
+ // and the variant LayoutSs here until then.
+ struct TmpLayout {
+ layout: LayoutS,
+ variants: IndexVec>,
+ }
+
+ let calculate_niche_filling_layout = || -> Option> {
+ if niche_optimize_enum {
+ return None;
+ }
+
+ if variants.len() < 2 {
+ return None;
+ }
+
+ let mut align = dl.aggregate_align;
+ let mut variant_layouts = variants
+ .iter_enumerated()
+ .map(|(j, v)| {
+ let mut st = self.univariant(dl, v, &repr, StructKind::AlwaysSized)?;
+ st.variants = Variants::Single { index: j };
+
+ align = align.max(st.align);
+
+ Some(st)
+ })
+ .collect::