diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4dcc9556965ba..4e59982909732 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -21,6 +21,8 @@ name: CI
- auto
- try
- try-perf
+ - automation/bors/try
+ - automation/bors/try-merge
- master
pull_request:
branches:
@@ -264,9 +266,6 @@ jobs:
- name: test-various
os: ubuntu-20.04-8core-32gb
env: {}
- - name: wasm32
- os: ubuntu-20.04-8core-32gb
- env: {}
- name: x86_64-gnu
os: ubuntu-20.04-4core-16gb
env: {}
@@ -542,7 +541,7 @@ jobs:
ARTIFACTS_AWS_ACCESS_KEY_ID: AKIA46X5W6CZN24CBO55
AWS_REGION: us-west-1
CACHE_DOMAIN: ci-caches.rust-lang.org
- if: "github.event_name == 'push' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf') && github.repository == 'rust-lang-ci/rust'"
+ if: "github.event_name == 'push' && (((github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf') && github.repository == 'rust-lang-ci/rust') || ((github.ref == 'refs/heads/automation/bors/try') && github.repository == 'rust-lang/rust'))"
strategy:
matrix:
include:
diff --git a/.reuse/dep5 b/.reuse/dep5
index 5546a7cf39112..245ed2659f91f 100644
--- a/.reuse/dep5
+++ b/.reuse/dep5
@@ -25,6 +25,7 @@ Files: compiler/*
README.md
RELEASES.md
rustfmt.toml
+ rust-bors.toml
triagebot.toml
x
x.ps1
diff --git a/Cargo.lock b/Cargo.lock
index 7f91d12a419c1..60a8f77c07d7c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2009,9 +2009,9 @@ checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
[[package]]
name = "jemalloc-sys"
-version = "0.5.3+5.3.0-patched"
+version = "0.5.4+5.3.0-patched"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1"
+checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
dependencies = [
"cc",
"libc",
@@ -4473,6 +4473,7 @@ dependencies = [
"rustc_data_structures",
"rustc_feature",
"rustc_fs_util",
+ "rustc_index",
"rustc_macros",
"rustc_serialize",
"rustc_span",
diff --git a/README.md b/README.md
index 8a6c559b0b312..f0c45f341d812 100644
--- a/README.md
+++ b/README.md
@@ -116,7 +116,7 @@ See [the rustc-dev-guide for more info][sysllvm].
#### Configure and Make
This project provides a configure script and makefile (the latter of which just
-invokes `x.py`). `./configure` is the recommended way to programatically
+invokes `x.py`). `./configure` is the recommended way to programmatically
generate a `config.toml`. `make` is not recommended (we suggest using `x.py`
directly), but it is supported and we try not to break it unnecessarily.
diff --git a/RELEASES.md b/RELEASES.md
index d390f2b7f5ea8..e261294a032fd 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -1,3 +1,120 @@
+Version 1.73.0 (2023-10-05)
+==========================
+
+
+
+Language
+--------
+
+- [Uplift `clippy::fn_null_check` lint as `useless_ptr_null_checks`.](https://github.com/rust-lang/rust/pull/111717/)
+- [Make `noop_method_call` warn by default.](https://github.com/rust-lang/rust/pull/111916/)
+- [Support interpolated block for `try` and `async` in macros.](https://github.com/rust-lang/rust/pull/112953/)
+- [Make `unconditional_recursion` lint detect recursive drops.](https://github.com/rust-lang/rust/pull/113902/)
+- [Future compatibility warning for some impls being incorrectly considered not overlapping.](https://github.com/rust-lang/rust/pull/114023/)
+- [The `invalid_reference_casting` lint is now **deny-by-default** (instead of allow-by-default)](https://github.com/rust-lang/rust/pull/112431)
+
+
+
+Compiler
+--------
+
+- [Write version information in a `.comment` section like GCC/Clang.](https://github.com/rust-lang/rust/pull/97550/)
+- [Add documentation on v0 symbol mangling.](https://github.com/rust-lang/rust/pull/97571/)
+- [Stabilize `extern "thiscall"` and `"thiscall-unwind"` ABIs.](https://github.com/rust-lang/rust/pull/114562/)
+- [Only check outlives goals on impl compared to trait.](https://github.com/rust-lang/rust/pull/109356/)
+- [Infer type in irrefutable slice patterns with fixed length as array.](https://github.com/rust-lang/rust/pull/113199/)
+- [Discard default auto trait impls if explicit ones exist.](https://github.com/rust-lang/rust/pull/113312/)
+- Add several new tier 3 targets:
+ - [`aarch64-unknown-teeos`](https://github.com/rust-lang/rust/pull/113480/)
+ - [`csky-unknown-linux-gnuabiv2`](https://github.com/rust-lang/rust/pull/113658/)
+ - [`riscv64-linux-android`](https://github.com/rust-lang/rust/pull/112858/)
+ - [`riscv64gc-unknown-hermit`](https://github.com/rust-lang/rust/pull/114004/)
+ - [`x86_64-unikraft-linux-musl`](https://github.com/rust-lang/rust/pull/113411/)
+ - [`x86_64-unknown-linux-ohos`](https://github.com/rust-lang/rust/pull/113061/)
+- [Add `wasm32-wasi-preview1-threads` as a tier 2 target.](https://github.com/rust-lang/rust/pull/112922/)
+
+Refer to Rust's [platform support page][platform-support-doc]
+for more information on Rust's tiered platform support.
+
+
+
+Libraries
+---------
+
+- [Add `Read`, `Write` and `Seek` impls for `Arc`.](https://github.com/rust-lang/rust/pull/94748/)
+- [Merge functionality of `io::Sink` into `io::Empty`.](https://github.com/rust-lang/rust/pull/98154/)
+- [Implement `RefUnwindSafe` for `Backtrace`](https://github.com/rust-lang/rust/pull/100455/)
+- [Make `ExitStatus` implement `Default`](https://github.com/rust-lang/rust/pull/106425/)
+- [`impl SliceIndex for (Bound, Bound)`](https://github.com/rust-lang/rust/pull/111081/)
+- [Change default panic handler message format.](https://github.com/rust-lang/rust/pull/112849/)
+- [Cleaner `assert_eq!` & `assert_ne!` panic messages.](https://github.com/rust-lang/rust/pull/111071/)
+- [Correct the (deprecated) Android `stat` struct definitions.](https://github.com/rust-lang/rust/pull/113130/)
+
+
+
+Stabilized APIs
+---------------
+
+- [Unsigned `{integer}::div_ceil`](https://doc.rust-lang.org/stable/std/primitive.u32.html#method.div_ceil)
+- [Unsigned `{integer}::next_multiple_of`](https://doc.rust-lang.org/stable/std/primitive.u32.html#method.next_multiple_of)
+- [Unsigned `{integer}::checked_next_multiple_of`](https://doc.rust-lang.org/stable/std/primitive.u32.html#method.checked_next_multiple_of)
+- [`std::ffi::FromBytesUntilNulError`](https://doc.rust-lang.org/stable/std/ffi/struct.FromBytesUntilNulError.html)
+- [`std::os::unix::fs::chown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.chown.html)
+- [`std::os::unix::fs::fchown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.fchown.html)
+- [`std::os::unix::fs::lchown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.lchown.html)
+- [`LocalKey::>::get`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.get)
+- [`LocalKey::>::set`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.set)
+- [`LocalKey::>::take`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.take)
+- [`LocalKey::>::replace`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.replace)
+- [`LocalKey::>::with_borrow`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.with_borrow)
+- [`LocalKey::>::with_borrow_mut`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.with_borrow_mut)
+- [`LocalKey::>::set`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.set-1)
+- [`LocalKey::>::take`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.take-1)
+- [`LocalKey::>::replace`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.replace-1)
+
+These APIs are now stable in const contexts:
+
+- [`rc::Weak::new`](https://doc.rust-lang.org/stable/alloc/rc/struct.Weak.html#method.new)
+- [`sync::Weak::new`](https://doc.rust-lang.org/stable/alloc/sync/struct.Weak.html#method.new)
+- [`NonNull::as_ref`](https://doc.rust-lang.org/stable/core/ptr/struct.NonNull.html#method.as_ref)
+
+
+
+Cargo
+-----
+
+- [Encode URL params correctly for `SourceId` in `Cargo.lock`.](https://github.com/rust-lang/cargo/pull/12280/)
+- [Bail out an error when using `cargo::` in custom build script.](https://github.com/rust-lang/cargo/pull/12332/)
+
+
+
+Misc
+----
+
+
+
+Compatibility Notes
+-------------------
+
+- [Update the minimum external LLVM to 15.](https://github.com/rust-lang/rust/pull/114148/)
+- [Check for non-defining uses of return position `impl Trait`.](https://github.com/rust-lang/rust/pull/112842/)
+
+
+
+Internal Changes
+----------------
+
+These changes do not affect any public interfaces of Rust, but they represent
+significant improvements to the performance or internals of rustc and related
+tools.
+
+- [Remove LLVM pointee types, supporting only opaque pointers.](https://github.com/rust-lang/rust/pull/105545/)
+- [Port PGO/LTO/BOLT optimized build pipeline to Rust.](https://github.com/rust-lang/rust/pull/112235/)
+- [Replace in-tree `rustc_apfloat` with the new version of the crate.](https://github.com/rust-lang/rust/pull/113843/)
+- [Update to LLVM 17.](https://github.com/rust-lang/rust/pull/114048/)
+- [Add `internal_features` lint for internal unstable features.](https://github.com/rust-lang/rust/pull/108955/)
+- [Mention style for new syntax in tracking issue template.](https://github.com/rust-lang/rust/pull/113586/)
+
Version 1.72.1 (2023-09-19)
===========================
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
index 48b199cb8eed9..c43fd745e8f5b 100644
--- a/compiler/rustc_abi/Cargo.toml
+++ b/compiler/rustc_abi/Cargo.toml
@@ -15,7 +15,9 @@ rustc_serialize = { path = "../rustc_serialize", optional = true }
[features]
default = ["nightly", "randomize"]
-randomize = ["rand", "rand_xoshiro"]
+randomize = ["rand", "rand_xoshiro", "nightly"]
+# rust-analyzer depends on this crate and we therefore require it to built on a stable toolchain
+# without depending on rustc_data_structures, rustc_macros and rustc_serialize
nightly = [
"rustc_data_structures",
"rustc_index/nightly",
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 0706dc18f0ec6..00d862ca27b7b 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,21 +1,27 @@
-use super::*;
-use std::fmt::Write;
+use std::fmt::{self, Write};
+use std::ops::Deref;
use std::{borrow::Borrow, cmp, iter, ops::Bound};
-#[cfg(feature = "randomize")]
-use rand::{seq::SliceRandom, SeedableRng};
-#[cfg(feature = "randomize")]
-use rand_xoshiro::Xoshiro128StarStar;
-
+use rustc_index::Idx;
use tracing::debug;
+use crate::{
+ Abi, AbiAndPrefAlign, Align, FieldsShape, IndexSlice, IndexVec, Integer, LayoutS, Niche,
+ NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout,
+ Variants, WrappingRange,
+};
+
pub trait LayoutCalculator {
type TargetDataLayoutRef: Borrow;
fn delay_bug(&self, txt: String);
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
+ fn scalar_pair(
+ &self,
+ a: Scalar,
+ b: Scalar,
+ ) -> LayoutS {
let dl = self.current_data_layout();
let dl = dl.borrow();
let b_align = b.align(dl);
@@ -31,7 +37,7 @@ pub trait LayoutCalculator {
.max_by_key(|niche| niche.available(dl));
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
@@ -45,40 +51,45 @@ pub trait LayoutCalculator {
}
}
- fn univariant(
+ fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref> + fmt::Debug,
+ >(
&self,
dl: &TargetDataLayout,
- fields: &IndexSlice>,
+ fields: &IndexSlice,
repr: &ReprOptions,
kind: StructKind,
- ) -> Option {
+ ) -> Option> {
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
- // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
- // data-carrying variants can be packed into the space after/before the niche.
+ // Enums prefer niches close to the beginning or the end of the variants so that other
+ // (smaller) data-carrying variants can be packed into the space after/before the niche.
// If the default field ordering does not give us a niche at the front then we do a second
- // run and bias niches to the right and then check which one is closer to one of the struct's
- // edges.
+ // run and bias niches to the right and then check which one is closer to one of the
+ // struct's edges.
if let Some(layout) = &layout {
// Don't try to calculate an end-biased layout for unsizable structs,
// otherwise we could end up with different layouts for
- // Foo and Foo which would break unsizing
+ // Foo and Foo which would break unsizing.
if !matches!(kind, StructKind::MaybeUnsized) {
if let Some(niche) = layout.largest_niche {
let head_space = niche.offset.bytes();
- let niche_length = niche.value.size(dl).bytes();
- let tail_space = layout.size.bytes() - head_space - niche_length;
+ let niche_len = niche.value.size(dl).bytes();
+ let tail_space = layout.size.bytes() - head_space - niche_len;
- // This may end up doing redundant work if the niche is already in the last field
- // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
- // the unpadded size so we try anyway.
+ // This may end up doing redundant work if the niche is already in the last
+ // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
+ // to get the unpadded size so we try anyway.
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
.expect("alt layout should always work");
- let niche = alt_layout
+ let alt_niche = alt_layout
.largest_niche
.expect("alt layout should have a niche like the regular one");
- let alt_head_space = niche.offset.bytes();
- let alt_niche_len = niche.value.size(dl).bytes();
+ let alt_head_space = alt_niche.offset.bytes();
+ let alt_niche_len = alt_niche.value.size(dl).bytes();
let alt_tail_space =
alt_layout.size.bytes() - alt_head_space - alt_niche_len;
@@ -93,7 +104,7 @@ pub trait LayoutCalculator {
alt_layout: {}\n",
layout.size.bytes(),
head_space,
- niche_length,
+ niche_len,
tail_space,
alt_head_space,
alt_niche_len,
@@ -114,11 +125,13 @@ pub trait LayoutCalculator {
layout
}
- fn layout_of_never_type(&self) -> LayoutS {
+ fn layout_of_never_type(
+ &self,
+ ) -> LayoutS {
let dl = self.current_data_layout();
let dl = dl.borrow();
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
largest_niche: None,
@@ -129,10 +142,15 @@ pub trait LayoutCalculator {
}
}
- fn layout_of_struct_or_enum(
+ fn layout_of_struct_or_enum<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice>>,
+ variants: &IndexSlice>,
is_enum: bool,
is_unsafe_cell: bool,
scalar_valid_range: (Bound, Bound),
@@ -140,7 +158,7 @@ pub trait LayoutCalculator {
discriminants: impl Iterator- ,
dont_niche_optimize_enum: bool,
always_sized: bool,
- ) -> Option {
+ ) -> Option> {
let dl = self.current_data_layout();
let dl = dl.borrow();
@@ -155,11 +173,11 @@ pub trait LayoutCalculator {
// but *not* an encoding of the discriminant (e.g., a tag value).
// See issue #49298 for more details on the need to leave space
// for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &IndexSlice>| {
- let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
+ let absent = |fields: &IndexSlice| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
// We cannot ignore alignment; that might lead us to entirely discard a variant and
// produce an enum that is less aligned than it should be!
- let is_1zst = fields.iter().all(|f| f.0.is_1zst());
+ let is_1zst = fields.iter().all(|f| f.is_1zst());
uninhabited && is_1zst
};
let (present_first, present_second) = {
@@ -176,7 +194,7 @@ pub trait LayoutCalculator {
}
// If it's a struct, still compute a layout so that we can still compute the
// field offsets.
- None => FIRST_VARIANT,
+ None => VariantIdx::new(0),
};
let is_struct = !is_enum ||
@@ -279,12 +297,12 @@ pub trait LayoutCalculator {
// variant layouts, so we can't store them in the
// overall LayoutS. Store the overall LayoutS
// and the variant LayoutSs here until then.
- struct TmpLayout {
- layout: LayoutS,
- variants: IndexVec,
+ struct TmpLayout {
+ layout: LayoutS,
+ variants: IndexVec>,
}
- let calculate_niche_filling_layout = || -> Option {
+ let calculate_niche_filling_layout = || -> Option> {
if dont_niche_optimize_enum {
return None;
}
@@ -322,13 +340,14 @@ pub trait LayoutCalculator {
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
- let count = niche_variants.size_hint().1.unwrap() as u128;
+ let count =
+ (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
// Find the field with the largest niche
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
.iter()
.enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
.max_by_key(|(_, niche)| niche.available(dl))
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
let niche_offset =
@@ -443,7 +462,7 @@ pub trait LayoutCalculator {
let discr_type = repr.discr_type();
let bits = Integer::from_attr(dl, discr_type).size().bits();
for (i, mut val) in discriminants {
- if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
continue;
}
if discr_type.is_signed() {
@@ -484,7 +503,7 @@ pub trait LayoutCalculator {
if repr.c() {
for fields in variants {
for field in fields {
- prefix_align = prefix_align.max(field.align().abi);
+ prefix_align = prefix_align.max(field.align.abi);
}
}
}
@@ -503,9 +522,9 @@ pub trait LayoutCalculator {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field_idx in st.fields.index_by_increasing_offset() {
- let field = &field_layouts[FieldIdx::from_usize(field_idx)];
- if !field.0.is_1zst() {
- start_align = start_align.min(field.align().abi);
+ let field = &field_layouts[FieldIdx::new(field_idx)];
+ if !field.is_1zst() {
+ start_align = start_align.min(field.align.abi);
break;
}
}
@@ -587,7 +606,7 @@ pub trait LayoutCalculator {
let tag_mask = ity.size().unsigned_int_max();
let tag = Scalar::Initialized {
- value: Int(ity, signed),
+ value: Primitive::Int(ity, signed),
valid_range: WrappingRange {
start: (min as u128 & tag_mask),
end: (max as u128 & tag_mask),
@@ -612,7 +631,7 @@ pub trait LayoutCalculator {
};
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
+ let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => {
common_prim_initialized_in_all_variants = false;
@@ -624,7 +643,7 @@ pub trait LayoutCalculator {
break;
}
};
- let prim = match field.abi() {
+ let prim = match field.abi {
Abi::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
@@ -655,7 +674,7 @@ pub trait LayoutCalculator {
// Common prim might be uninit.
Scalar::Union { value: prim }
};
- let pair = self.scalar_pair(tag, prim_scalar);
+ let pair = self.scalar_pair::(tag, prim_scalar);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -663,8 +682,8 @@ pub trait LayoutCalculator {
}
_ => panic!(),
};
- if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
- && pair_offsets[FieldIdx::from_u32(1)] == *offset
+ if pair_offsets[FieldIdx::new(0)] == Size::ZERO
+ && pair_offsets[FieldIdx::new(1)] == *offset
&& align == pair.align
&& size == pair.size
{
@@ -684,7 +703,8 @@ pub trait LayoutCalculator {
// Also do not overwrite any already existing "clever" ABIs.
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
+ // Also need to bump up the size and alignment, so that the entire value fits
+ // in here.
variant.size = cmp::max(variant.size, size);
variant.align.abi = cmp::max(variant.align.abi, align.abi);
}
@@ -720,8 +740,9 @@ pub trait LayoutCalculator {
// pick the layout with the larger niche; otherwise,
// pick tagged as it has simpler codegen.
use cmp::Ordering::*;
- let niche_size =
- |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
+ let niche_size = |tmp_l: &TmpLayout| {
+ tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+ };
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
(Greater, _) => nl,
(Equal, Less) => nl,
@@ -741,11 +762,16 @@ pub trait LayoutCalculator {
Some(best_layout.layout)
}
- fn layout_of_union(
+ fn layout_of_union<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref> + fmt::Debug,
+ >(
&self,
repr: &ReprOptions,
- variants: &IndexSlice>>,
- ) -> Option {
+ variants: &IndexSlice>,
+ ) -> Option> {
let dl = self.current_data_layout();
let dl = dl.borrow();
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
@@ -762,24 +788,24 @@ pub trait LayoutCalculator {
};
let mut size = Size::ZERO;
- let only_variant = &variants[FIRST_VARIANT];
+ let only_variant = &variants[VariantIdx::new(0)];
for field in only_variant {
- if field.0.is_unsized() {
+ if field.is_unsized() {
self.delay_bug("unsized field in union".to_string());
}
- align = align.max(field.align());
- max_repr_align = max_repr_align.max(field.max_repr_align());
- size = cmp::max(size, field.size());
+ align = align.max(field.align);
+ max_repr_align = max_repr_align.max(field.max_repr_align);
+ size = cmp::max(size, field.size);
- if field.0.is_zst() {
+ if field.is_zst() {
// Nothing more to do for ZST fields
continue;
}
if let Ok(common) = common_non_zst_abi_and_align {
// Discard valid range information and allow undef
- let field_abi = field.abi().to_union();
+ let field_abi = field.abi.to_union();
if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
@@ -790,15 +816,14 @@ pub trait LayoutCalculator {
// have the same alignment
if !matches!(common_abi, Abi::Aggregate { .. }) {
assert_eq!(
- common_align,
- field.align().abi,
+ common_align, field.align.abi,
"non-Aggregate field with matching ABI but differing alignment"
);
}
}
} else {
// First non-ZST field: record its ABI and alignment
- common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
+ common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi)));
}
}
}
@@ -830,7 +855,7 @@ pub trait LayoutCalculator {
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
abi,
largest_niche: None,
@@ -848,14 +873,19 @@ enum NicheBias {
End,
}
-fn univariant(
+fn univariant<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref> + fmt::Debug,
+>(
this: &(impl LayoutCalculator + ?Sized),
dl: &TargetDataLayout,
- fields: &IndexSlice>,
+ fields: &IndexSlice,
repr: &ReprOptions,
kind: StructKind,
niche_bias: NicheBias,
-) -> Option {
+) -> Option> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut max_repr_align = repr.align;
@@ -868,15 +898,18 @@ fn univariant(
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
+ // we don't guarantee.
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
-
- // Shuffle the ordering of the fields
+ use rand::{seq::SliceRandom, SeedableRng};
+ // `ReprOptions.layout_seed` is a deterministic seed we can use to randomize field
+ // ordering.
+ let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
+ repr.field_shuffle_seed.as_u64(),
+ );
+
+ // Shuffle the ordering of the fields.
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
@@ -884,35 +917,34 @@ fn univariant(
// To allow unsizing `&Foo` -> `&Foo`, the layout of the struct must
// not depend on the layout of the tail.
let max_field_align =
- fields_excluding_tail.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
+ fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
let largest_niche_size = fields_excluding_tail
.iter()
- .filter_map(|f| f.largest_niche())
+ .filter_map(|f| f.largest_niche)
.map(|n| n.available(dl))
.max()
.unwrap_or(0);
- // Calculates a sort key to group fields by their alignment or possibly some size-derived
- // pseudo-alignment.
- let alignment_group_key = |layout: Layout<'_>| {
+ // Calculates a sort key to group fields by their alignment or possibly some
+ // size-derived pseudo-alignment.
+ let alignment_group_key = |layout: &F| {
if let Some(pack) = pack {
- // return the packed alignment in bytes
- layout.align().abi.min(pack).bytes()
+ // Return the packed alignment in bytes.
+ layout.align.abi.min(pack).bytes()
} else {
- // returns log2(effective-align).
- // This is ok since `pack` applies to all fields equally.
- // The calculation assumes that size is an integer multiple of align, except for ZSTs.
- //
- let align = layout.align().abi.bytes();
- let size = layout.size().bytes();
- let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
- // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+ // Returns `log2(effective-align)`. This is ok since `pack` applies to all
+ // fields equally. The calculation assumes that size is an integer multiple of
+ // align, except for ZSTs.
+ let align = layout.align.abi.bytes();
+ let size = layout.size.bytes();
+ let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
+ // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if largest_niche_size > 0 {
match niche_bias {
- // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
- // to the front in the first case (for aligned loads) but keep the bool in front
- // in the second case for its niches.
+ // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
+ // array to the front in the first case (for aligned loads) but keep
+ // the bool in front in the second case for its niches.
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
// When moving niches towards the end of the struct then for
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
@@ -931,18 +963,18 @@ fn univariant(
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
- // to get one niche into the preferred position. If it ever supported multiple niches
- // then a more advanced pick-and-pack approach could provide better results.
- // But even for the single-niche cache it's not optimal. E.g. for
- // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
- // but it would require packing the tuple together with the u16 to build a 4-byte
- // group so that the u32 can be placed after it without padding. This kind
- // of packing can't be achieved by sorting.
+ // Currently `LayoutS` only exposes a single niche so sorting is usually
+ // sufficient to get one niche into the preferred position. If it ever
+ // supported multiple niches then a more advanced pick-and-pack approach could
+ // provide better results. But even for the single-niche cache it's not
+ // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
+ // bool to the front but it would require packing the tuple together with the
+ // u16 to build a 4-byte group so that the u32 can be placed after it without
+ // padding. This kind of packing can't be achieved by sorting.
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let field_size = f.size().bytes();
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let field_size = f.size.bytes();
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
let niche_size_key = match niche_bias {
// large niche first
NicheBias::Start => !niche_size,
@@ -950,8 +982,8 @@ fn univariant(
NicheBias::End => niche_size,
};
let inner_niche_offset_key = match niche_bias {
- NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
- NicheBias::End => f.largest_niche().map_or(0, |n| {
+ NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
+ NicheBias::End => f.largest_niche.map_or(0, |n| {
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
}),
};
@@ -975,8 +1007,8 @@ fn univariant(
// And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| {
- let f = fields[x];
- let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+ let f = &fields[x];
+ let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
(alignment_group_key(f), niche_size)
});
}
@@ -1012,24 +1044,24 @@ fn univariant(
));
}
- if field.0.is_unsized() {
+ if field.is_unsized() {
sized = false;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
let field_align = if let Some(pack) = pack {
- field.align().min(AbiAndPrefAlign::new(pack))
+ field.align.min(AbiAndPrefAlign::new(pack))
} else {
- field.align()
+ field.align
};
offset = offset.align_to(field_align.abi);
align = align.max(field_align);
- max_repr_align = max_repr_align.max(field.max_repr_align());
+ max_repr_align = max_repr_align.max(field.max_repr_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i] = offset;
- if let Some(mut niche) = field.largest_niche() {
+ if let Some(mut niche) = field.largest_niche {
let available = niche.available(dl);
// Pick up larger niches.
let prefer_new_niche = match niche_bias {
@@ -1044,7 +1076,7 @@ fn univariant(
}
}
- offset = offset.checked_add(field.size(), dl)?;
+ offset = offset.checked_add(field.size, dl)?;
}
// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
@@ -1068,7 +1100,7 @@ fn univariant(
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
- inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
+ inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
};
let size = min_size.align_to(align.abi);
let mut layout_of_single_non_zst_field = None;
@@ -1077,7 +1109,7 @@ fn univariant(
if sized && size.bytes() > 0 {
// We skip *all* ZST here and later check if we are good in terms of alignment.
// This lets us handle some cases involving aligned ZST.
- let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
+ let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
@@ -1085,18 +1117,17 @@ fn univariant(
layout_of_single_non_zst_field = Some(field);
// Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
- {
- match field.abi() {
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
+ match field.abi {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi();
+ abi = field.abi;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
- abi = field.abi();
+ abi = field.abi;
}
_ => {}
}
@@ -1105,7 +1136,7 @@ fn univariant(
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
- match (a.abi(), b.abi()) {
+ match (a.abi, b.abi) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
@@ -1113,7 +1144,7 @@ fn univariant(
} else {
((j, b), (i, a))
};
- let pair = this.scalar_pair(a, b);
+ let pair = this.scalar_pair::(a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
@@ -1121,8 +1152,8 @@ fn univariant(
}
_ => panic!(),
};
- if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
- && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
+ if offsets[i] == pair_offsets[FieldIdx::new(0)]
+ && offsets[j] == pair_offsets[FieldIdx::new(1)]
&& align == pair.align
&& size == pair.size
{
@@ -1138,13 +1169,13 @@ fn univariant(
_ => {}
}
}
- if fields.iter().any(|f| f.abi().is_uninhabited()) {
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
abi = Abi::Uninhabited;
}
let unadjusted_abi_align = if repr.transparent() {
match layout_of_single_non_zst_field {
- Some(l) => l.unadjusted_abi_align(),
+ Some(l) => l.unadjusted_abi_align,
None => {
// `repr(transparent)` with all ZST fields.
align.abi
@@ -1155,7 +1186,7 @@ fn univariant(
};
Some(LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
largest_niche,
@@ -1166,17 +1197,22 @@ fn univariant(
})
}
-fn format_field_niches(
- layout: &LayoutS,
- fields: &IndexSlice>,
+fn format_field_niches<
+ 'a,
+ FieldIdx: Idx,
+ VariantIdx: Idx,
+ F: Deref> + fmt::Debug,
+>(
+ layout: &LayoutS,
+ fields: &IndexSlice,
dl: &TargetDataLayout,
) -> String {
let mut s = String::new();
for i in layout.fields.index_by_increasing_offset() {
let offset = layout.fields.offset(i);
- let f = fields[i.into()];
- write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
- if let Some(n) = f.largest_niche() {
+ let f = &fields[FieldIdx::new(i)];
+ write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
+ if let Some(n) = f.largest_niche {
write!(
s,
" n{}b{}s{}",
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index b30ff058a3092..45b3e76cca69a 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,23 +1,22 @@
-#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
+#![cfg_attr(feature = "nightly", feature(step_trait))]
#![cfg_attr(feature = "nightly", allow(internal_features))]
use std::fmt;
-#[cfg(feature = "nightly")]
-use std::iter::Step;
use std::num::{NonZeroUsize, ParseIntError};
use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
use std::str::FromStr;
use bitflags::bitflags;
-use rustc_data_structures::intern::Interned;
-use rustc_data_structures::stable_hasher::Hash64;
+use rustc_index::{Idx, IndexSlice, IndexVec};
+
#[cfg(feature = "nightly")]
use rustc_data_structures::stable_hasher::StableOrd;
-use rustc_index::{IndexSlice, IndexVec};
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable, Encodable};
+#[cfg(feature = "nightly")]
+use std::iter::Step;
mod layout;
@@ -28,9 +27,6 @@ pub use layout::LayoutCalculator;
/// instead of implementing everything in `rustc_middle`.
pub trait HashStableContext {}
-use Integer::*;
-use Primitive::*;
-
bitflags! {
#[derive(Default)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
@@ -53,10 +49,11 @@ bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub enum IntegerType {
- /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
- /// is, `Pointer(true)` is isize.
+ /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
+ /// `Pointer(true)` means `isize`.
Pointer(bool),
- /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
+ /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
+ /// `Fixed(I8, false)` means `u8`.
Fixed(Integer, bool),
}
@@ -69,7 +66,7 @@ impl IntegerType {
}
}
-/// Represents the repr options provided by the user,
+/// Represents the repr options provided by the user.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct ReprOptions {
@@ -77,6 +74,7 @@ pub struct ReprOptions {
pub align: Option,
pub pack: Option,
pub flags: ReprFlags,
+ #[cfg(feature = "randomize")]
/// The seed to be used for randomizing a type's layout
///
/// Note: This could technically be a `Hash128` which would
@@ -84,7 +82,7 @@ pub struct ReprOptions {
/// hash without loss, but it does pay the price of being larger.
/// Everything's a tradeoff, a 64-bit seed should be sufficient for our
/// purposes (primarily `-Z randomize-layout`)
- pub field_shuffle_seed: Hash64,
+ pub field_shuffle_seed: rustc_data_structures::stable_hasher::Hash64,
}
impl ReprOptions {
@@ -139,7 +137,7 @@ impl ReprOptions {
}
/// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
- /// was enabled for its declaration crate
+ /// was enabled for its declaration crate.
pub fn can_randomize_type_layout(&self) -> bool {
!self.inhibit_struct_field_reordering_opt()
&& self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
@@ -217,7 +215,8 @@ pub enum TargetDataLayoutErrors<'a> {
}
impl TargetDataLayout {
- /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
+ /// Parse data layout from an
+ /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
///
/// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
/// determined from llvm string.
@@ -242,10 +241,11 @@ impl TargetDataLayout {
};
// Parse a size string.
- let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
+ let parse_size =
+ |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
// Parse an alignment string.
- let align = |s: &[&'a str], cause: &'a str| {
+ let parse_align = |s: &[&'a str], cause: &'a str| {
if s.is_empty() {
return Err(TargetDataLayoutErrors::MissingAlignment { cause });
}
@@ -269,22 +269,22 @@ impl TargetDataLayout {
[p] if p.starts_with('P') => {
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
}
- ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
- ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
- ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+ ["a", ref a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
+ ["f32", ref a @ ..] => dl.f32_align = parse_align(a, "f32")?,
+ ["f64", ref a @ ..] => dl.f64_align = parse_align(a, "f64")?,
// FIXME(erikdesjardins): we should be parsing nonzero address spaces
// this will require replacing TargetDataLayout::{pointer_size,pointer_align}
// with e.g. `fn pointer_size_in(AddressSpace)`
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
- dl.pointer_size = size(s, p)?;
- dl.pointer_align = align(a, p)?;
+ dl.pointer_size = parse_size(s, p)?;
+ dl.pointer_align = parse_align(a, p)?;
}
[s, ref a @ ..] if s.starts_with('i') => {
let Ok(bits) = s[1..].parse::() else {
- size(&s[1..], "i")?; // For the user error.
+ parse_size(&s[1..], "i")?; // For the user error.
continue;
};
- let a = align(a, s)?;
+ let a = parse_align(a, s)?;
match bits {
1 => dl.i1_align = a,
8 => dl.i8_align = a,
@@ -301,8 +301,8 @@ impl TargetDataLayout {
}
}
[s, ref a @ ..] if s.starts_with('v') => {
- let v_size = size(&s[1..], "v")?;
- let a = align(a, s)?;
+ let v_size = parse_size(&s[1..], "v")?;
+ let a = parse_align(a, s)?;
if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
v.1 = a;
continue;
@@ -339,6 +339,7 @@ impl TargetDataLayout {
#[inline]
pub fn ptr_sized_integer(&self) -> Integer {
+ use Integer::*;
match self.pointer_size.bits() {
16 => I16,
32 => I32,
@@ -747,7 +748,6 @@ impl Align {
/// A pair of alignments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
@@ -773,7 +773,6 @@ impl AbiAndPrefAlign {
/// Integers, also used for enum discriminants.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-
pub enum Integer {
I8,
I16,
@@ -785,6 +784,7 @@ pub enum Integer {
impl Integer {
#[inline]
pub fn size(self) -> Size {
+ use Integer::*;
match self {
I8 => Size::from_bytes(1),
I16 => Size::from_bytes(2),
@@ -805,6 +805,7 @@ impl Integer {
}
pub fn align(self, cx: &C) -> AbiAndPrefAlign {
+ use Integer::*;
let dl = cx.data_layout();
match self {
@@ -819,6 +820,7 @@ impl Integer {
/// Returns the largest signed value that can be represented by this Integer.
#[inline]
pub fn signed_max(self) -> i128 {
+ use Integer::*;
match self {
I8 => i8::MAX as i128,
I16 => i16::MAX as i128,
@@ -831,6 +833,7 @@ impl Integer {
/// Finds the smallest Integer type which can represent the signed value.
#[inline]
pub fn fit_signed(x: i128) -> Integer {
+ use Integer::*;
match x {
-0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
-0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
@@ -843,6 +846,7 @@ impl Integer {
/// Finds the smallest Integer type which can represent the unsigned value.
#[inline]
pub fn fit_unsigned(x: u128) -> Integer {
+ use Integer::*;
match x {
0..=0x0000_0000_0000_00ff => I8,
0..=0x0000_0000_0000_ffff => I16,
@@ -854,6 +858,7 @@ impl Integer {
/// Finds the smallest integer with the given alignment.
pub fn for_align(cx: &C, wanted: Align) -> Option {
+ use Integer::*;
let dl = cx.data_layout();
[I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
@@ -863,6 +868,7 @@ impl Integer {
/// Find the largest integer with the given alignment or less.
pub fn approximate_align(cx: &C, wanted: Align) -> Integer {
+ use Integer::*;
let dl = cx.data_layout();
// FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
@@ -908,6 +914,7 @@ pub enum Primitive {
impl Primitive {
pub fn size(self, cx: &C) -> Size {
+ use Primitive::*;
let dl = cx.data_layout();
match self {
@@ -922,6 +929,7 @@ impl Primitive {
}
pub fn align(self, cx: &C) -> AbiAndPrefAlign {
+ use Primitive::*;
let dl = cx.data_layout();
match self {
@@ -937,8 +945,7 @@ impl Primitive {
}
/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
+/// start > end, it represents `start..=MAX`, followed by `0..=end`.
///
/// That is, for an i8 primitive, a range of `254..=2` means following
/// sequence:
@@ -970,21 +977,21 @@ impl WrappingRange {
/// Returns `self` with replaced `start`
#[inline(always)]
- pub fn with_start(mut self, start: u128) -> Self {
+ fn with_start(mut self, start: u128) -> Self {
self.start = start;
self
}
/// Returns `self` with replaced `end`
#[inline(always)]
- pub fn with_end(mut self, end: u128) -> Self {
+ fn with_end(mut self, end: u128) -> Self {
self.end = end;
self
}
/// Returns `true` if `size` completely fills the range.
#[inline]
- pub fn is_full_for(&self, size: Size) -> bool {
+ fn is_full_for(&self, size: Size) -> bool {
let max_value = size.unsigned_int_max();
debug_assert!(self.start <= max_value && self.end <= max_value);
self.start == (self.end.wrapping_add(1) & max_value)
@@ -1027,10 +1034,11 @@ pub enum Scalar {
impl Scalar {
#[inline]
pub fn is_bool(&self) -> bool {
+ use Integer::*;
matches!(
self,
Scalar::Initialized {
- value: Int(I8, false),
+ value: Primitive::Int(I8, false),
valid_range: WrappingRange { start: 0, end: 1 }
}
)
@@ -1066,7 +1074,8 @@ impl Scalar {
}
#[inline]
- /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+ /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
+ /// union.
pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
match self {
Scalar::Initialized { valid_range, .. } => valid_range,
@@ -1074,7 +1083,8 @@ impl Scalar {
}
}
- /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+ /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
+ /// layout.
#[inline]
pub fn is_always_valid(&self, cx: &C) -> bool {
match *self {
@@ -1093,36 +1103,11 @@ impl Scalar {
}
}
-rustc_index::newtype_index! {
- /// The *source-order* index of a field in a variant.
- ///
- /// This is how most code after type checking refers to fields, rather than
- /// using names (as names have hygiene complications and more complex lookup).
- ///
- /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
- /// (It is for `repr(C)` `struct`s, however.)
- ///
- /// For example, in the following types,
- /// ```rust
- /// # enum Never {}
- /// # #[repr(u16)]
- /// enum Demo1 {
- /// Variant0 { a: Never, b: i32 } = 100,
- /// Variant1 { c: u8, d: u64 } = 10,
- /// }
- /// struct Demo2 { e: u8, f: u16, g: u8 }
- /// ```
- /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
- /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
- /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
- #[derive(HashStable_Generic)]
- pub struct FieldIdx {}
-}
-
+// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
/// Describes how the fields of a type are located in memory.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum FieldsShape {
+pub enum FieldsShape {
/// Scalar primitives and `!`, which never have fields.
Primitive,
@@ -1162,7 +1147,7 @@ pub enum FieldsShape {
},
}
-impl FieldsShape {
+impl FieldsShape {
#[inline]
pub fn count(&self) -> usize {
match *self {
@@ -1188,7 +1173,7 @@ impl FieldsShape {
assert!(i < count, "tried to access field {i} of array with {count} fields");
stride * i
}
- FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
}
}
@@ -1200,7 +1185,7 @@ impl FieldsShape {
}
FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
FieldsShape::Arbitrary { ref memory_index, .. } => {
- memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
+ memory_index[FieldIdx::new(i)].try_into().unwrap()
}
}
}
@@ -1216,7 +1201,7 @@ impl FieldsShape {
if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
if use_small {
for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
- inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
+ inverse_small[mem_idx as usize] = field_idx.index() as u8;
}
} else {
inverse_big = memory_index.invert_bijective_mapping();
@@ -1229,7 +1214,7 @@ impl FieldsShape {
if use_small {
inverse_small[i] as usize
} else {
- inverse_big[i as u32].as_usize()
+ inverse_big[i as u32].index()
}
}
})
@@ -1252,7 +1237,6 @@ impl AddressSpace {
/// in terms of categories of C types there are ABI rules for.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
pub enum Abi {
Uninhabited,
Scalar(Scalar),
@@ -1373,9 +1357,10 @@ impl Abi {
}
}
+// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Variants {
+pub enum Variants {
/// Single enum variants, structs/tuples, unions, and all non-ADTs.
Single { index: VariantIdx },
@@ -1387,15 +1372,16 @@ pub enum Variants {
/// For enums, the tag is the sole field of the layout.
Multiple {
tag: Scalar,
- tag_encoding: TagEncoding,
+ tag_encoding: TagEncoding,
tag_field: usize,
- variants: IndexVec,
+ variants: IndexVec>,
},
}
+// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum TagEncoding {
+pub enum TagEncoding {
/// The tag directly stores the discriminant, but possibly with a smaller layout
/// (so converting the tag to the discriminant can require sign extension).
Direct,
@@ -1457,17 +1443,19 @@ impl Niche {
return None;
}
- // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
- // Given an eventual `Option`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
- // Having `None` in niche zero can enable some special optimizations.
+ // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
+ // bound. Given an eventual `Option`, we try to maximize the chance for `None` to occupy
+ // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
+ // and always taking the shortest path to niche zero. Having `None` in niche zero can
+ // enable some special optimizations.
//
// Bound selection criteria:
// 1. Select closest to zero given wrapping semantics.
// 2. Avoid moving past zero if possible.
//
- // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
- // If niche zero is already reserved, the selection of bounds are of little interest.
+ // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
+ // since they have to fit perfectly. If niche zero is already reserved, the selection of
+ // bounds are of little interest.
let move_start = |v: WrappingRange| {
let start = v.start.wrapping_sub(count) & max_value;
Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
@@ -1501,29 +1489,12 @@ impl Niche {
}
}
-rustc_index::newtype_index! {
- /// The *source-order* index of a variant in a type.
- ///
- /// For enums, these are always `0..variant_count`, regardless of any
- /// custom discriminants that may have been defined, and including any
- /// variants that may end up uninhabited due to field types. (Some of the
- /// variants may not be present in a monomorphized ABI [`Variants`], but
- /// those skipped variants are always counted when determining the *index*.)
- ///
- /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
- /// with variant index zero, aka [`FIRST_VARIANT`].
- #[derive(HashStable_Generic)]
- pub struct VariantIdx {
- /// Equivalent to `VariantIdx(0)`.
- const FIRST_VARIANT = 0;
- }
-}
-
+// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
#[derive(PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct LayoutS {
+pub struct LayoutS {
/// Says where the fields are located within the layout.
- pub fields: FieldsShape,
+ pub fields: FieldsShape,
/// Encodes information about multi-variant layouts.
/// Even with `Multiple` variants, a layout still has its own fields! Those are then
@@ -1532,7 +1503,7 @@ pub struct LayoutS {
///
/// To access all fields of this layout, both `fields` and the fields of the active variant
/// must be taken into account.
- pub variants: Variants,
+ pub variants: Variants,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
@@ -1561,13 +1532,13 @@ pub struct LayoutS {
pub unadjusted_abi_align: Align,
}
-impl LayoutS {
+impl LayoutS {
pub fn scalar(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.size(cx);
let align = scalar.align(cx);
LayoutS {
- variants: Variants::Single { index: FIRST_VARIANT },
+ variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
largest_niche,
@@ -1579,7 +1550,11 @@ impl LayoutS {
}
}
-impl fmt::Debug for LayoutS {
+impl fmt::Debug for LayoutS
+where
+ FieldsShape: fmt::Debug,
+ Variants: fmt::Debug,
+{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// This is how `Layout` used to print before it become
// `Interned`. We print it like this to avoid having to update
@@ -1607,61 +1582,6 @@ impl fmt::Debug for LayoutS {
}
}
-#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
-#[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS>);
-
-impl<'a> fmt::Debug for Layout<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // See comment on `::fmt` above.
- self.0.0.fmt(f)
- }
-}
-
-impl<'a> Layout<'a> {
- pub fn fields(self) -> &'a FieldsShape {
- &self.0.0.fields
- }
-
- pub fn variants(self) -> &'a Variants {
- &self.0.0.variants
- }
-
- pub fn abi(self) -> Abi {
- self.0.0.abi
- }
-
- pub fn largest_niche(self) -> Option {
- self.0.0.largest_niche
- }
-
- pub fn align(self) -> AbiAndPrefAlign {
- self.0.0.align
- }
-
- pub fn size(self) -> Size {
- self.0.0.size
- }
-
- pub fn max_repr_align(self) -> Option {
- self.0.0.max_repr_align
- }
-
- pub fn unadjusted_abi_align(self) -> Align {
- self.0.0.unadjusted_abi_align
- }
-
- /// Whether the layout is from a type that implements [`std::marker::PointerLike`].
- ///
- /// Currently, that means that the type is pointer-sized, pointer-aligned,
- /// and has a scalar ABI.
- pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
- self.size() == data_layout.pointer_size
- && self.align().abi == data_layout.pointer_align.abi
- && matches!(self.abi(), Abi::Scalar(..))
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum PointerKind {
/// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
@@ -1681,7 +1601,7 @@ pub struct PointeeInfo {
pub safe: Option,
}
-impl LayoutS {
+impl LayoutS {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 23fdd272ffd33..bf8a7eb293e0d 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -15,7 +15,6 @@
#![feature(dropck_eyepatch)]
#![feature(new_uninit)]
#![feature(maybe_uninit_slice)]
-#![feature(min_specialization)]
#![feature(decl_macro)]
#![feature(pointer_byte_offsets)]
#![feature(rustc_attrs)]
@@ -44,23 +43,6 @@ fn outline R, R>(f: F) -> R {
f()
}
-/// An arena that can hold objects of only one type.
-pub struct TypedArena {
- /// A pointer to the next object to be allocated.
- ptr: Cell<*mut T>,
-
- /// A pointer to the end of the allocated area. When this pointer is
- /// reached, a new chunk is allocated.
- end: Cell<*mut T>,
-
- /// A vector of arena chunks.
- chunks: RefCell>>,
-
- /// Marker indicating that dropping the arena causes its owned
- /// instances of `T` to be dropped.
- _own: PhantomData,
-}
-
struct ArenaChunk {
/// The raw storage for the arena chunk.
storage: NonNull<[MaybeUninit]>,
@@ -130,6 +112,23 @@ impl ArenaChunk {
const PAGE: usize = 4096;
const HUGE_PAGE: usize = 2 * 1024 * 1024;
+/// An arena that can hold objects of only one type.
+pub struct TypedArena {
+ /// A pointer to the next object to be allocated.
+ ptr: Cell<*mut T>,
+
+ /// A pointer to the end of the allocated area. When this pointer is
+ /// reached, a new chunk is allocated.
+ end: Cell<*mut T>,
+
+ /// A vector of arena chunks.
+ chunks: RefCell>>,
+
+ /// Marker indicating that dropping the arena causes its owned
+ /// instances of `T` to be dropped.
+ _own: PhantomData,
+}
+
impl Default for TypedArena {
/// Creates a new `TypedArena`.
fn default() -> TypedArena {
@@ -144,77 +143,6 @@ impl Default for TypedArena {
}
}
-trait IterExt {
- fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T];
-}
-
-impl IterExt for I
-where
- I: IntoIterator
- ,
-{
- // This default collects into a `SmallVec` and then allocates by copying
- // from it. The specializations below for types like `Vec` are more
- // efficient, copying directly without the intermediate collecting step.
- // This default could be made more efficient, like
- // `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
- #[inline]
- default fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] {
- let vec: SmallVec<[_; 8]> = self.into_iter().collect();
- vec.alloc_from_iter(arena)
- }
-}
-
-impl IterExt for std::array::IntoIter {
- #[inline]
- fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
- mem::forget(self);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
-impl IterExt for Vec {
- #[inline]
- fn alloc_from_iter(mut self, arena: &TypedArena) -> &mut [T] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
- self.set_len(0);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
-impl IterExt for SmallVec {
- #[inline]
- fn alloc_from_iter(mut self, arena: &TypedArena) -> &mut [A::Item] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
- self.set_len(0);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
impl TypedArena {
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
@@ -250,33 +178,55 @@ impl TypedArena {
available_bytes >= additional_bytes
}
- /// Ensures there's enough space in the current chunk to fit `len` objects.
#[inline]
- fn ensure_capacity(&self, additional: usize) {
- if !self.can_allocate(additional) {
- self.grow(additional);
- debug_assert!(self.can_allocate(additional));
- }
- }
-
- #[inline]
- unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
+ fn alloc_raw_slice(&self, len: usize) -> *mut T {
assert!(mem::size_of::() != 0);
assert!(len != 0);
- self.ensure_capacity(len);
+ // Ensure the current chunk can fit `len` objects.
+ if !self.can_allocate(len) {
+ self.grow(len);
+ debug_assert!(self.can_allocate(len));
+ }
let start_ptr = self.ptr.get();
- // SAFETY: `self.ensure_capacity` makes sure that there is enough space
- // for `len` elements.
+ // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
+ // `len` elements.
unsafe { self.ptr.set(start_ptr.add(len)) };
start_ptr
}
#[inline]
pub fn alloc_from_iter>(&self, iter: I) -> &mut [T] {
+ // This implementation is entirely separate to
+ // `DroplessIterator::alloc_from_iter`, even though conceptually they
+ // are the same.
+ //
+ // `DroplessIterator` (in the fast case) writes elements from the
+ // iterator one at a time into the allocated memory. That's easy
+ // because the elements don't implement `Drop`. But for `TypedArena`
+ // they do implement `Drop`, which means that if the iterator panics we
+ // could end up with some allocated-but-uninitialized elements, which
+ // will then cause UB in `TypedArena::drop`.
+ //
+ // Instead we use an approach where any iterator panic will occur
+ // before the memory is allocated. This function is much less hot than
+ // `DroplessArena::alloc_from_iter`, so it doesn't need to be
+ // hyper-optimized.
assert!(mem::size_of::() != 0);
- iter.alloc_from_iter(self)
+
+ let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
+ if vec.is_empty() {
+ return &mut [];
+ }
+ // Move the content to the arena by copying and then forgetting it.
+ let len = vec.len();
+ let start_ptr = self.alloc_raw_slice(len);
+ unsafe {
+ vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ vec.set_len(0);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
}
/// Grows the arena.
@@ -407,6 +357,8 @@ impl Default for DroplessArena {
#[inline]
fn default() -> DroplessArena {
DroplessArena {
+ // We set both `start` and `end` to 0 so that the first call to
+ // alloc() will trigger a grow().
start: Cell::new(ptr::null_mut()),
end: Cell::new(ptr::null_mut()),
chunks: Default::default(),
@@ -415,9 +367,11 @@ impl Default for DroplessArena {
}
impl DroplessArena {
+ #[inline(never)]
+ #[cold]
fn grow(&self, layout: Layout) {
// Add some padding so we can align `self.end` while
- // stilling fitting in a `layout` allocation.
+ // still fitting in a `layout` allocation.
let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
unsafe {
@@ -441,7 +395,7 @@ impl DroplessArena {
let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
self.start.set(chunk.start());
- // Align the end to DROPLESS_ALIGNMENT
+ // Align the end to DROPLESS_ALIGNMENT.
let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
// Make sure we don't go past `start`. This should not happen since the allocation
@@ -454,55 +408,40 @@ impl DroplessArena {
}
}
- #[inline(never)]
- #[cold]
- fn grow_and_alloc_raw(&self, layout: Layout) -> *mut u8 {
- self.grow(layout);
- self.alloc_raw_without_grow(layout).unwrap()
- }
-
- #[inline(never)]
- #[cold]
- fn grow_and_alloc(&self) -> *mut u8 {
- self.grow_and_alloc_raw(Layout::new::())
- }
-
- /// Allocates a byte slice with specified layout from the current memory
- /// chunk. Returns `None` if there is no free space left to satisfy the
- /// request.
- #[inline]
- fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
- let start = self.start.get().addr();
- let old_end = self.end.get();
- let end = old_end.addr();
-
- // Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT
- let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
-
- // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT
- unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
-
- let new_end = align_down(end.checked_sub(bytes)?, layout.align());
- if start <= new_end {
- let new_end = old_end.with_addr(new_end);
- // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment
- // as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT.
- self.end.set(new_end);
- Some(new_end)
- } else {
- None
- }
- }
-
#[inline]
pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
assert!(layout.size() != 0);
- if let Some(a) = self.alloc_raw_without_grow(layout) {
- return a;
+
+ // This loop executes once or twice: if allocation fails the first
+ // time, the `grow` ensures it will succeed the second time.
+ loop {
+ let start = self.start.get().addr();
+ let old_end = self.end.get();
+ let end = old_end.addr();
+
+ // Align allocated bytes so that `self.end` stays aligned to
+ // DROPLESS_ALIGNMENT.
+ let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
+
+ // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
+ unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
+
+ if let Some(sub) = end.checked_sub(bytes) {
+ let new_end = align_down(sub, layout.align());
+ if start <= new_end {
+ let new_end = old_end.with_addr(new_end);
+ // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
+ // preserves alignment as both `end` and `bytes` are already
+ // aligned to DROPLESS_ALIGNMENT.
+ self.end.set(new_end);
+ return new_end;
+ }
+ }
+
+ // No free space left. Allocate a new chunk to satisfy the request.
+ // On failure the grow will panic or abort.
+ self.grow(layout);
}
- // No free space left. Allocate a new chunk to satisfy the request.
- // On failure the grow will panic or abort.
- self.grow_and_alloc_raw(layout)
}
#[inline]
@@ -510,13 +449,7 @@ impl DroplessArena {
assert!(!mem::needs_drop::());
assert!(mem::size_of::() != 0);
- let mem = if let Some(a) = self.alloc_raw_without_grow(Layout::for_value::(&object)) {
- a
- } else {
- // No free space left. Allocate a new chunk to satisfy the request.
- // On failure the grow will panic or abort.
- self.grow_and_alloc::()
- } as *mut T;
+ let mem = self.alloc_raw(Layout::new::()) as *mut T;
unsafe {
// Write into uninitialized memory.
@@ -713,10 +646,10 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
}
#[allow(clippy::mut_from_ref)]
- pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>(
- &'a self,
+ pub fn alloc_from_iter, C>(
+ &self,
iter: impl ::std::iter::IntoIterator
- ,
- ) -> &'a mut [T] {
+ ) -> &mut [T] {
T::allocate_from_iter(self, iter)
}
}
diff --git a/compiler/rustc_ast/src/entry.rs b/compiler/rustc_ast/src/entry.rs
index 3370146193a52..2dd5e96e513a3 100644
--- a/compiler/rustc_ast/src/entry.rs
+++ b/compiler/rustc_ast/src/entry.rs
@@ -1,3 +1,7 @@
+use crate::{attr, Attribute};
+use rustc_span::symbol::sym;
+use rustc_span::Symbol;
+
#[derive(Debug)]
pub enum EntryPointType {
None,
@@ -6,3 +10,26 @@ pub enum EntryPointType {
Start,
OtherMain, // Not an entry point, but some other function named main
}
+
+pub fn entry_point_type(
+ attrs: &[Attribute],
+ at_root: bool,
+ name: Option,
+) -> EntryPointType {
+ if attr::contains_name(attrs, sym::start) {
+ EntryPointType::Start
+ } else if attr::contains_name(attrs, sym::rustc_main) {
+ EntryPointType::RustcMainAttr
+ } else {
+ if let Some(name) = name && name == sym::main {
+ if at_root {
+ // This is a top-level function so it can be `main`.
+ EntryPointType::MainNamed
+ } else {
+ EntryPointType::OtherMain
+ }
+ } else {
+ EntryPointType::None
+ }
+ }
+}
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index 300b1486f9ba0..09bfbd02198c0 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -446,7 +446,7 @@ impl Token {
}
}
- /// Returns `true` if the token can appear at the start of an pattern.
+ /// Returns `true` if the token can appear at the start of a pattern.
///
/// Shamelessly borrowed from `can_begin_expr`, only used for diagnostics right now.
pub fn can_begin_pattern(&self) -> bool {
diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl
index 8115c4b55b0af..aaeef1ff77d3d 100644
--- a/compiler/rustc_ast_lowering/messages.ftl
+++ b/compiler/rustc_ast_lowering/messages.ftl
@@ -99,7 +99,7 @@ ast_lowering_misplaced_double_dot =
.note = only allowed in tuple, tuple struct, and slice patterns
ast_lowering_misplaced_impl_trait =
- `impl Trait` only allowed in function and inherent method return types, not in {$position}
+ `impl Trait` only allowed in function and inherent method argument and return types, not in {$position}
ast_lowering_misplaced_relax_trait_bound =
`?Trait` bounds are only permitted at the point where a type parameter is declared
diff --git a/compiler/rustc_ast_lowering/src/format.rs b/compiler/rustc_ast_lowering/src/format.rs
index afcf8b15cd800..45a9bebfcf627 100644
--- a/compiler/rustc_ast_lowering/src/format.rs
+++ b/compiler/rustc_ast_lowering/src/format.rs
@@ -410,15 +410,11 @@ fn expand_format_args<'hir>(
let format_options = use_format_options.then(|| {
// Generate:
// &[format_spec_0, format_spec_1, format_spec_2]
- let elements: Vec<_> = fmt
- .template
- .iter()
- .filter_map(|piece| {
- let FormatArgsPiece::Placeholder(placeholder) = piece else { return None };
- Some(make_format_spec(ctx, macsp, placeholder, &mut argmap))
- })
- .collect();
- ctx.expr_array_ref(macsp, ctx.arena.alloc_from_iter(elements))
+ let elements = ctx.arena.alloc_from_iter(fmt.template.iter().filter_map(|piece| {
+ let FormatArgsPiece::Placeholder(placeholder) = piece else { return None };
+ Some(make_format_spec(ctx, macsp, placeholder, &mut argmap))
+ }));
+ ctx.expr_array_ref(macsp, elements)
});
let arguments = fmt.arguments.all_args();
@@ -477,10 +473,8 @@ fn expand_format_args<'hir>(
// ::new_debug(&arg2),
// …
// ]
- let elements: Vec<_> = arguments
- .iter()
- .zip(argmap)
- .map(|(arg, ((_, ty), placeholder_span))| {
+ let elements = ctx.arena.alloc_from_iter(arguments.iter().zip(argmap).map(
+ |(arg, ((_, ty), placeholder_span))| {
let placeholder_span =
placeholder_span.unwrap_or(arg.expr.span).with_ctxt(macsp.ctxt());
let arg_span = match arg.kind {
@@ -493,9 +487,9 @@ fn expand_format_args<'hir>(
hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg),
));
make_argument(ctx, placeholder_span, ref_arg, ty)
- })
- .collect();
- ctx.expr_array_ref(macsp, ctx.arena.alloc_from_iter(elements))
+ },
+ ));
+ ctx.expr_array_ref(macsp, elements)
} else {
// Generate:
// &match (&arg0, &arg1, &…) {
@@ -528,19 +522,14 @@ fn expand_format_args<'hir>(
make_argument(ctx, placeholder_span, arg, ty)
},
));
- let elements: Vec<_> = arguments
- .iter()
- .map(|arg| {
- let arg_expr = ctx.lower_expr(&arg.expr);
- ctx.expr(
- arg.expr.span.with_ctxt(macsp.ctxt()),
- hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg_expr),
- )
- })
- .collect();
- let args_tuple = ctx
- .arena
- .alloc(ctx.expr(macsp, hir::ExprKind::Tup(ctx.arena.alloc_from_iter(elements))));
+ let elements = ctx.arena.alloc_from_iter(arguments.iter().map(|arg| {
+ let arg_expr = ctx.lower_expr(&arg.expr);
+ ctx.expr(
+ arg.expr.span.with_ctxt(macsp.ctxt()),
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg_expr),
+ )
+ }));
+ let args_tuple = ctx.arena.alloc(ctx.expr(macsp, hir::ExprKind::Tup(elements)));
let array = ctx.arena.alloc(ctx.expr(macsp, hir::ExprKind::Array(args)));
let match_arms = ctx.arena.alloc_from_iter([ctx.arm(args_pat, array)]);
let match_expr = ctx.arena.alloc(ctx.expr_match(
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 85ab5e7223bf1..7e3ada9c1234b 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -1824,7 +1824,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let fn_def_id = self.local_def_id(fn_node_id);
- self.lower_async_fn_ret_ty(&decl.output, fn_def_id, ret_id, kind)
+ self.lower_async_fn_ret_ty(&decl.output, fn_def_id, ret_id, kind, fn_span)
} else {
match &decl.output {
FnRetTy::Ty(ty) => {
@@ -1901,8 +1901,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn_def_id: LocalDefId,
opaque_ty_node_id: NodeId,
fn_kind: FnDeclKind,
+ fn_span: Span,
) -> hir::FnRetTy<'hir> {
- let span = self.lower_span(output.span());
+ let span = self.lower_span(fn_span);
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
let captured_lifetimes: Vec<_> = self
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 7bc685a545014..743fad8e86502 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -52,7 +52,8 @@ struct AstValidator<'a> {
/// Are we inside a trait impl?
in_trait_impl: bool,
- in_const_trait_impl: bool,
+ /// Are we inside a const trait defn or impl?
+ in_const_trait_or_impl: bool,
has_proc_macro_decls: bool,
@@ -78,11 +79,19 @@ impl<'a> AstValidator<'a> {
f: impl FnOnce(&mut Self),
) {
let old = mem::replace(&mut self.in_trait_impl, is_in);
- let old_const =
- mem::replace(&mut self.in_const_trait_impl, matches!(constness, Some(Const::Yes(_))));
+ let old_const = mem::replace(
+ &mut self.in_const_trait_or_impl,
+ matches!(constness, Some(Const::Yes(_))),
+ );
f(self);
self.in_trait_impl = old;
- self.in_const_trait_impl = old_const;
+ self.in_const_trait_or_impl = old_const;
+ }
+
+ fn with_in_trait(&mut self, is_const: bool, f: impl FnOnce(&mut Self)) {
+ let old = mem::replace(&mut self.in_const_trait_or_impl, is_const);
+ f(self);
+ self.in_const_trait_or_impl = old;
}
fn with_banned_impl_trait(&mut self, f: impl FnOnce(&mut Self)) {
@@ -933,23 +942,26 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
}
ItemKind::Trait(box Trait { is_auto, generics, bounds, items, .. }) => {
- if *is_auto == IsAuto::Yes {
- // Auto traits cannot have generics, super traits nor contain items.
- self.deny_generic_params(generics, item.ident.span);
- self.deny_super_traits(bounds, item.ident.span);
- self.deny_where_clause(&generics.where_clause, item.ident.span);
- self.deny_items(items, item.ident.span);
- }
+ let is_const_trait = attr::contains_name(&item.attrs, sym::const_trait);
+ self.with_in_trait(is_const_trait, |this| {
+ if *is_auto == IsAuto::Yes {
+ // Auto traits cannot have generics, super traits nor contain items.
+ this.deny_generic_params(generics, item.ident.span);
+ this.deny_super_traits(bounds, item.ident.span);
+ this.deny_where_clause(&generics.where_clause, item.ident.span);
+ this.deny_items(items, item.ident.span);
+ }
- // Equivalent of `visit::walk_item` for `ItemKind::Trait` that inserts a bound
- // context for the supertraits.
- self.visit_vis(&item.vis);
- self.visit_ident(item.ident);
- self.visit_generics(generics);
- self.with_tilde_const_allowed(|this| {
- walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits)
+ // Equivalent of `visit::walk_item` for `ItemKind::Trait` that inserts a bound
+ // context for the supertraits.
+ this.visit_vis(&item.vis);
+ this.visit_ident(item.ident);
+ this.visit_generics(generics);
+ this.with_tilde_const_allowed(|this| {
+ walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits)
+ });
+ walk_list!(this, visit_assoc_item, items, AssocCtxt::Trait);
});
- walk_list!(self, visit_assoc_item, items, AssocCtxt::Trait);
walk_list!(self, visit_attribute, &item.attrs);
return; // Avoid visiting again
}
@@ -1278,7 +1290,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
let tilde_const_allowed =
matches!(fk.header(), Some(FnHeader { constness: ast::Const::Yes(_), .. }))
- || matches!(fk.ctxt(), Some(FnCtxt::Assoc(_)));
+ || matches!(fk.ctxt(), Some(FnCtxt::Assoc(_)) if self.in_const_trait_or_impl);
let disallowed = (!tilde_const_allowed).then(|| DisallowTildeConstContext::Fn(fk));
@@ -1363,7 +1375,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
walk_list!(self, visit_ty, ty);
}
AssocItemKind::Fn(box Fn { sig, generics, body, .. })
- if self.in_const_trait_impl
+ if self.in_const_trait_or_impl
|| ctxt == AssocCtxt::Trait
|| matches!(sig.header.constness, Const::Yes(_)) =>
{
@@ -1510,7 +1522,7 @@ pub fn check_crate(
features,
extern_mod: None,
in_trait_impl: false,
- in_const_trait_impl: false,
+ in_const_trait_or_impl: false,
has_proc_macro_decls: false,
outer_impl_trait: None,
disallow_tilde_const: None,
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 62dc7ae58a2f0..5d279943f1e83 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -603,6 +603,7 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
"exclusive range pattern syntax is experimental"
);
gate_all_legacy_dont_use!(try_blocks, "`try` blocks are unstable");
+ gate_all_legacy_dont_use!(auto_traits, "`auto` traits are unstable");
visit::walk_crate(&mut visitor, krate);
}
diff --git a/compiler/rustc_attr/Cargo.toml b/compiler/rustc_attr/Cargo.toml
index 2c4c3a0c2635b..a14d279681792 100644
--- a/compiler/rustc_attr/Cargo.toml
+++ b/compiler/rustc_attr/Cargo.toml
@@ -6,14 +6,14 @@ edition = "2021"
[lib]
[dependencies]
+rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_errors = { path = "../rustc_errors" }
-rustc_fluent_macro = { path = "../rustc_fluent_macro" }
-rustc_span = { path = "../rustc_span" }
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
rustc_feature = { path = "../rustc_feature" }
+rustc_fluent_macro = { path = "../rustc_fluent_macro" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
-rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index ca4b3662a089f..6f82d6f932330 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -162,7 +162,7 @@ pub enum StabilityLevel {
is_soft: bool,
/// If part of a feature is stabilized and a new feature is added for the remaining parts,
/// then the `implied_by` attribute is used to indicate which now-stable feature previously
- /// contained a item.
+ /// contained an item.
///
/// ```pseudo-Rust
/// #[unstable(feature = "foo", issue = "...")]
@@ -846,7 +846,7 @@ pub fn find_deprecation(
),
);
} else {
- sess.emit_err(session_diagnostics::IncorrectMetaItem2 {
+ sess.emit_err(session_diagnostics::IncorrectMetaItem {
span: meta.span,
});
}
diff --git a/compiler/rustc_attr/src/session_diagnostics.rs b/compiler/rustc_attr/src/session_diagnostics.rs
index ee79545e304ae..86f27254db25c 100644
--- a/compiler/rustc_attr/src/session_diagnostics.rs
+++ b/compiler/rustc_attr/src/session_diagnostics.rs
@@ -165,15 +165,6 @@ pub(crate) struct MissingIssue {
pub span: Span,
}
-// FIXME: This diagnostic is identical to `IncorrectMetaItem`, barring the error code. Consider
-// changing this to `IncorrectMetaItem`. See #51489.
-#[derive(Diagnostic)]
-#[diag(attr_incorrect_meta_item, code = "E0551")]
-pub(crate) struct IncorrectMetaItem2 {
- #[primary_span]
- pub span: Span,
-}
-
// FIXME: Why is this the same error code as `InvalidReprHintNoParen` and `InvalidReprHintNoValue`?
// It is more similar to `IncorrectReprFormatGeneric`.
#[derive(Diagnostic)]
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index 48d09f2c2b2a4..4488276e0e7a6 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -1364,7 +1364,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err.note(format!(
"a for loop advances the iterator for you, the result is stored in `{loop_bind}`."
));
- err.help("if you want to call `next` on a iterator within the loop, consider using `while let`.");
+ err.help("if you want to call `next` on an iterator within the loop, consider using `while let`.");
}
}
@@ -2828,6 +2828,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(_)
| ProjectionElem::Index(_) => kind,
},
place_ty.projection_ty(tcx, elem),
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index f70263e9dcf01..c3cf7db32b149 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -242,6 +242,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
ProjectionElem::Downcast(..) if opt.including_downcast => return None,
ProjectionElem::Downcast(..) => (),
ProjectionElem::OpaqueCast(..) => (),
+ ProjectionElem::Subtype(..) => (),
ProjectionElem::Field(field, _ty) => {
// FIXME(project-rfc_2229#36): print capture precisely here.
if let Some(field) = self.is_upvar_field_projection(PlaceRef {
@@ -322,7 +323,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
PlaceRef { local, projection: proj_base }.ty(self.body, self.infcx.tcx)
}
ProjectionElem::Downcast(..) => place.ty(self.body, self.infcx.tcx),
- ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(*ty),
+ ProjectionElem::Subtype(ty) | ProjectionElem::OpaqueCast(ty) => {
+ PlaceTy::from_ty(*ty)
+ }
ProjectionElem::Field(_, field_type) => PlaceTy::from_ty(*field_type),
},
};
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
index a0edeec59d058..e6bde6a8c54dc 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -159,6 +159,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
[
..,
ProjectionElem::Index(_)
+ | ProjectionElem::Subtype(_)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
@@ -1358,9 +1359,9 @@ fn suggest_ampmut<'tcx>(
None => (false, decl_span),
};
- // if the binding already exists and is a reference with a explicit
+ // if the binding already exists and is a reference with an explicit
// lifetime, then we can suggest adding ` mut`. this is special-cased from
- // the path without a explicit lifetime.
+ // the path without an explicit lifetime.
if let Ok(src) = tcx.sess.source_map().span_to_snippet(span)
&& src.starts_with("&'")
// note that `& 'a T` is invalid so this is correct.
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index 8115c61e89d30..9c77767e7a70e 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -1803,6 +1803,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
for (place_base, elem) in place.iter_projections().rev() {
match elem {
ProjectionElem::Index(_/*operand*/) |
+ ProjectionElem::Subtype(_) |
ProjectionElem::OpaqueCast(_) |
ProjectionElem::ConstantIndex { .. } |
// assigning to P[i] requires P to be valid.
@@ -1966,7 +1967,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
Reservation(WriteKind::MutableBorrow(BorrowKind::Mut { kind: mut_borrow_kind }))
| Write(WriteKind::MutableBorrow(BorrowKind::Mut { kind: mut_borrow_kind })) => {
let is_local_mutation_allowed = match mut_borrow_kind {
- // `ClosureCapture` is used for mutable variable with a immutable binding.
+ // `ClosureCapture` is used for mutable variable with an immutable binding.
// This is only behaviour difference between `ClosureCapture` and mutable borrows.
MutBorrowKind::ClosureCapture => LocalMutationIsAllowed::Yes,
MutBorrowKind::Default | MutBorrowKind::TwoPhaseBorrow => {
@@ -2191,6 +2192,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(..)
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Downcast(..) => {
let upvar_field_projection = self.is_upvar_field_projection(place);
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
index c02f6f3b68782..539d0837659c8 100644
--- a/compiler/rustc_borrowck/src/places_conflict.rs
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -249,6 +249,7 @@ fn place_components_conflict<'tcx>(
| (ProjectionElem::ConstantIndex { .. }, _, _)
| (ProjectionElem::Subslice { .. }, _, _)
| (ProjectionElem::OpaqueCast { .. }, _, _)
+ | (ProjectionElem::Subtype(_), _, _)
| (ProjectionElem::Downcast { .. }, _, _) => {
// Recursive case. This can still be disjoint on a
// further iteration if this a shallow access and
@@ -508,6 +509,7 @@ fn place_projection_conflict<'tcx>(
| ProjectionElem::Field(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subtype(_)
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..),
diff --git a/compiler/rustc_borrowck/src/prefixes.rs b/compiler/rustc_borrowck/src/prefixes.rs
index 6f28134986376..e9c9709bd1f1a 100644
--- a/compiler/rustc_borrowck/src/prefixes.rs
+++ b/compiler/rustc_borrowck/src/prefixes.rs
@@ -89,6 +89,9 @@ impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
cursor = cursor_base;
continue 'cursor;
}
+ ProjectionElem::Subtype(..) => {
+ panic!("Subtype projection is not allowed before borrow check")
+ }
ProjectionElem::Deref => {
// (handled below)
}
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index 9b952f3fe3625..e7b1a489f5d7c 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -716,6 +716,9 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
}
PlaceTy::from_ty(fty)
}
+ ProjectionElem::Subtype(_) => {
+ bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+ }
ProjectionElem::OpaqueCast(ty) => {
let ty = self.sanitize_type(place, ty);
let ty = self.cx.normalize(ty, location);
@@ -2563,6 +2566,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
| ProjectionElem::Subslice { .. } => {
// other field access
}
+ ProjectionElem::Subtype(_) => {
+ bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+ }
}
}
}
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
index af437f36b9f84..c73192f440451 100644
--- a/compiler/rustc_borrowck/src/universal_regions.rs
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -164,7 +164,7 @@ struct UniversalRegionIndices<'tcx> {
/// be able to map them to our internal `RegionVid`. This is
/// basically equivalent to an `GenericArgs`, except that it also
/// contains an entry for `ReStatic` -- it might be nice to just
- /// use a args, and then handle `ReStatic` another way.
+ /// use an args, and then handle `ReStatic` another way.
indices: FxHashMap, RegionVid>,
/// The vid assigned to `'static`. Used only for diagnostics.
@@ -290,7 +290,7 @@ impl<'tcx> UniversalRegions<'tcx> {
(FIRST_GLOBAL_INDEX..self.num_universals).map(RegionVid::from_usize)
}
- /// Returns `true` if `r` is classified as an local region.
+ /// Returns `true` if `r` is classified as a local region.
pub fn is_local_free_region(&self, r: RegionVid) -> bool {
self.region_classification(r) == Some(RegionClassification::Local)
}
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index 53ff089d7b4eb..9c57f68f0b81e 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -169,29 +169,15 @@ impl<'a> Visitor<'a> for InnerItemLinter<'_> {
}
}
-// Beware, this is duplicated in librustc_passes/entry.rs (with
-// `rustc_hir::Item`), so make sure to keep them in sync.
-fn entry_point_type(item: &ast::Item, depth: usize) -> EntryPointType {
+fn entry_point_type(item: &ast::Item, at_root: bool) -> EntryPointType {
match item.kind {
ast::ItemKind::Fn(..) => {
- if attr::contains_name(&item.attrs, sym::start) {
- EntryPointType::Start
- } else if attr::contains_name(&item.attrs, sym::rustc_main) {
- EntryPointType::RustcMainAttr
- } else if item.ident.name == sym::main {
- if depth == 0 {
- // This is a top-level function so can be 'main'
- EntryPointType::MainNamed
- } else {
- EntryPointType::OtherMain
- }
- } else {
- EntryPointType::None
- }
+ rustc_ast::entry::entry_point_type(&item.attrs, at_root, Some(item.ident.name))
}
_ => EntryPointType::None,
}
}
+
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner<'a> {
@@ -210,7 +196,7 @@ impl<'a> MutVisitor for EntryPointCleaner<'a> {
// Remove any #[rustc_main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
- let item = match entry_point_type(&item, self.depth) {
+ let item = match entry_point_type(&item, self.depth == 0) {
EntryPointType::MainNamed | EntryPointType::RustcMainAttr | EntryPointType::Start => {
item.map(|ast::Item { id, ident, attrs, kind, vis, span, tokens }| {
let allow_dead_code = attr::mk_attr_nested_word(
diff --git a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
index e6bbac647e5a2..f09b9ef12deb8 100644
--- a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
@@ -31,7 +31,7 @@ index d95b5b7f17f..00b6f0e3635 100644
EOF
cat > config.toml <(
let inputs = fn_abi.args.iter().flat_map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter());
let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
- // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ // Sometimes the first param is a pointer to the place where the return value needs to be stored.
let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
Signature { params, returns, call_conv }
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 6d55fdc307401..0a451dad9d232 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -876,6 +876,7 @@ pub(crate) fn codegen_place<'tcx>(
cplace = cplace.place_deref(fx);
}
PlaceElem::OpaqueCast(ty) => bug!("encountered OpaqueCast({ty}) in codegen"),
+ PlaceElem::Subtype(ty) => cplace = cplace.place_transmute_type(fx, fx.monomorphize(ty)),
PlaceElem::Field(field, _ty) => {
cplace = cplace.place_field(fx, field);
}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index d4273c0b593ac..45893a4f3ac43 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -674,6 +674,16 @@ impl<'tcx> CPlace<'tcx> {
}
}
+ /// Used for `ProjectionElem::Subtype`, `ty` has to be monomorphized before
+ /// passed on.
+ pub(crate) fn place_transmute_type(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: self.inner, layout: fx.layout_of(ty) }
+ }
+
pub(crate) fn place_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 308cb04cac3d5..ecc293aee237b 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -1420,7 +1420,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.cx
}
- fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+ fn apply_attrs_to_cleanup_callsite(&mut self, _llret: RValue<'gcc>) {
// FIXME(bjorn3): implement
}
diff --git a/compiler/rustc_codegen_gcc/test.sh b/compiler/rustc_codegen_gcc/test.sh
index 592997b8ab9da..b462e5d156b23 100755
--- a/compiler/rustc_codegen_gcc/test.sh
+++ b/compiler/rustc_codegen_gcc/test.sh
@@ -214,7 +214,7 @@ function setup_rustc() {
rm config.toml || true
cat > config.toml < BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
}
- fn do_not_inline(&mut self, llret: &'ll Value) {
- let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
- attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
+ if llvm_util::get_version() < (17, 0, 2) {
+ // Work around https://github.com/llvm/llvm-project/issues/66984.
+ let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ } else {
+ // Cleanup is always the cold path.
+ let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
+ }
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index e83110dcad460..55f43aa534173 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -11,7 +11,7 @@ pub struct Expression {
lhs: Operand,
op: Op,
rhs: Operand,
- region: Option,
+ code_regions: Vec,
}
/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
@@ -30,7 +30,7 @@ pub struct FunctionCoverage<'tcx> {
instance: Instance<'tcx>,
source_hash: u64,
is_used: bool,
- counters: IndexVec>,
+ counters: IndexVec>>,
expressions: IndexVec>,
unreachable_regions: Vec,
}
@@ -77,28 +77,40 @@ impl<'tcx> FunctionCoverage<'tcx> {
}
}
- /// Adds a code region to be counted by an injected counter intrinsic.
- pub fn add_counter(&mut self, id: CounterId, region: CodeRegion) {
- if let Some(previous_region) = self.counters[id].replace(region.clone()) {
- assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ /// Adds code regions to be counted by an injected counter intrinsic.
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn add_counter(&mut self, id: CounterId, code_regions: &[CodeRegion]) {
+ if code_regions.is_empty() {
+ return;
+ }
+
+ let slot = &mut self.counters[id];
+ match slot {
+ None => *slot = Some(code_regions.to_owned()),
+ // If this counter ID slot has already been filled, it should
+ // contain identical information.
+ Some(ref previous_regions) => assert_eq!(
+ previous_regions, code_regions,
+ "add_counter: code regions for id changed"
+ ),
}
}
+ /// Adds information about a coverage expression, along with zero or more
+ /// code regions mapped to that expression.
+ ///
/// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
/// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity
/// between operands that are counter IDs and operands that are expression IDs.
- pub fn add_counter_expression(
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn add_counter_expression(
&mut self,
expression_id: ExpressionId,
lhs: Operand,
op: Op,
rhs: Operand,
- region: Option,
+ code_regions: &[CodeRegion],
) {
- debug!(
- "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
- expression_id, lhs, op, rhs, region
- );
debug_assert!(
expression_id.as_usize() < self.expressions.len(),
"expression_id {} is out of range for expressions.len() = {}
@@ -107,23 +119,25 @@ impl<'tcx> FunctionCoverage<'tcx> {
self.expressions.len(),
self,
);
- if let Some(previous_expression) = self.expressions[expression_id].replace(Expression {
- lhs,
- op,
- rhs,
- region: region.clone(),
- }) {
- assert_eq!(
- previous_expression,
- Expression { lhs, op, rhs, region },
+
+ let expression = Expression { lhs, op, rhs, code_regions: code_regions.to_owned() };
+ let slot = &mut self.expressions[expression_id];
+ match slot {
+ None => *slot = Some(expression),
+ // If this expression ID slot has already been filled, it should
+ // contain identical information.
+ Some(ref previous_expression) => assert_eq!(
+ previous_expression, &expression,
"add_counter_expression: expression for id changed"
- );
+ ),
}
}
- /// Add a region that will be marked as "unreachable", with a constant "zero counter".
- pub fn add_unreachable_region(&mut self, region: CodeRegion) {
- self.unreachable_regions.push(region)
+ /// Adds regions that will be marked as "unreachable", with a constant "zero counter".
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn add_unreachable_regions(&mut self, code_regions: &[CodeRegion]) {
+ assert!(!code_regions.is_empty(), "unreachable regions always have code regions");
+ self.unreachable_regions.extend_from_slice(code_regions);
}
/// Perform some simplifications to make the final coverage mappings
@@ -212,11 +226,16 @@ impl<'tcx> FunctionCoverage<'tcx> {
}
fn counter_regions(&self) -> impl Iterator
- {
- self.counters.iter_enumerated().filter_map(|(index, entry)| {
- // Option::map() will return None to filter out missing counters. This may happen
- // if, for example, a MIR-instrumented counter is removed during an optimization.
- entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
- })
+ self.counters
+ .iter_enumerated()
+ // Filter out counter IDs that we never saw during MIR traversal.
+ // This can happen if a counter was optimized out by MIR transforms
+ // (and replaced with `CoverageKind::Unreachable` instead).
+ .filter_map(|(id, maybe_code_regions)| Some((id, maybe_code_regions.as_ref()?)))
+ .flat_map(|(id, code_regions)| {
+ let counter = Counter::counter_value_reference(id);
+ code_regions.iter().map(move |region| (counter, region))
+ })
}
/// Convert this function's coverage expression data into a form that can be
@@ -254,13 +273,17 @@ impl<'tcx> FunctionCoverage<'tcx> {
fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
// Find all of the expression IDs that weren't optimized out AND have
- // an attached code region, and return the corresponding mapping as a
- // counter/region pair.
+ // one or more attached code regions, and return the corresponding
+ // mappings as counter/region pairs.
self.expressions
.iter_enumerated()
- .filter_map(|(id, expression)| {
- let code_region = expression.as_ref()?.region.as_ref()?;
- Some((Counter::expression(id), code_region))
+ .filter_map(|(id, maybe_expression)| {
+ let code_regions = &maybe_expression.as_ref()?.code_regions;
+ Some((id, code_regions))
+ })
+ .flat_map(|(id, code_regions)| {
+ let counter = Counter::expression(id);
+ code_regions.iter().map(move |code_region| (counter, code_region))
})
.collect::>()
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index c70cb670e96fb..dd2ce9b525b77 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -89,9 +89,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
/// `function_coverage_map` (keyed by function `Instance`) during codegen.
/// But in this case, since the unused function was _not_ previously
/// codegenned, collect the coverage `CodeRegion`s from the MIR and add
- /// them. The first `CodeRegion` is used to add a single counter, with the
- /// same counter ID used in the injected `instrprof.increment` intrinsic
- /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// them. Since the function is never called, all of its `CodeRegion`s can be
/// added as `unreachable_region`s.
fn define_unused_fn(&self, def_id: DefId) {
let instance = declare_unused_fn(self, def_id);
@@ -110,25 +108,15 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
- let Coverage { kind, code_region } = coverage.clone();
- match kind {
+ let Coverage { kind, code_regions } = coverage;
+ match *kind {
CoverageKind::Counter { function_source_hash, id } => {
debug!(
"ensuring function source hash is set for instance={:?}; function_source_hash={}",
instance, function_source_hash,
);
func_coverage.set_function_source_hash(function_source_hash);
-
- if let Some(code_region) = code_region {
- // Note: Some counters do not have code regions, but may still be referenced
- // from expressions. In that case, don't add the counter to the coverage map,
- // but do inject the counter intrinsic.
- debug!(
- "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
- instance, id, code_region,
- );
- func_coverage.add_counter(id, code_region);
- }
+ func_coverage.add_counter(id, code_regions);
// We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
// as that needs an exclusive borrow.
drop(coverage_map);
@@ -146,20 +134,10 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
bx.instrprof_increment(fn_name, hash, num_counters, index);
}
CoverageKind::Expression { id, lhs, op, rhs } => {
- debug!(
- "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
- instance, id, lhs, op, rhs, code_region,
- );
- func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
+ func_coverage.add_counter_expression(id, lhs, op, rhs, code_regions);
}
CoverageKind::Unreachable => {
- let code_region =
- code_region.expect("unreachable regions always have code regions");
- debug!(
- "adding unreachable code to coverage_map: instance={:?}, at {:?}",
- instance, code_region,
- );
- func_coverage.add_unreachable_region(code_region);
+ func_coverage.add_unreachable_regions(code_regions);
}
}
}
@@ -227,14 +205,9 @@ fn add_unused_function_coverage<'tcx>(
let tcx = cx.tcx;
let mut function_coverage = FunctionCoverage::unused(tcx, instance);
- for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
- if index == 0 {
- // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
- // definitions.
- function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
- } else {
- function_coverage.add_unreachable_region(code_region.clone());
- }
+ for &code_region in tcx.covered_code_regions(def_id) {
+ let code_region = std::slice::from_ref(code_region);
+ function_coverage.add_unreachable_regions(code_region);
}
if let Some(coverage_context) = cx.coverage_context() {
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 8db6195d931f0..06b7703672fe8 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -112,7 +112,7 @@ impl<'ll> CodegenCx<'ll, '_> {
}
}
- /// Return a LLVM type that has at most the required alignment,
+ /// Return an LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
let unit = Integer::approximate_align(self, align);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index bd0707edfd99d..a0cb97d51a01f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -213,7 +213,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
self.funclet(fx),
);
if fx.mir[self.bb].is_cleanup {
- bx.do_not_inline(invokeret);
+ bx.apply_attrs_to_cleanup_callsite(invokeret);
}
if let Some((ret_dest, target)) = destination {
@@ -228,11 +228,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} else {
let llret = bx.call(fn_ty, fn_attrs, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
- // Cleanup is always the cold path. Don't inline
- // drop glue. Also, when there is a deeply-nested
- // struct, there are "symmetry" issues that cause
- // exponential inlining - see issue #41696.
- bx.do_not_inline(llret);
+ bx.apply_attrs_to_cleanup_callsite(llret);
}
if let Some((ret_dest, target)) = destination {
@@ -1627,7 +1623,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
- bx.do_not_inline(llret);
+ bx.apply_attrs_to_cleanup_callsite(llret);
bx.unreachable();
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index f775711f87020..eb590a45a63f2 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -466,6 +466,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::ProjectionElem::OpaqueCast(ty) => {
bug!("encountered OpaqueCast({ty}) in codegen")
}
+ mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(mir::Place::from(index));
let index = self.codegen_operand(bx, index);
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 853c6934c2c24..aa411f002a0c6 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -332,5 +332,5 @@ pub trait BuilderMethods<'a, 'tcx>:
) -> Self::Value;
fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
- fn do_not_inline(&mut self, llret: Self::Value);
+ fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index dc3dbd9d81949..b1fde8e4d8638 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -30,7 +30,7 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type;
- /// Returns the number of elements in `self` if it is a LLVM vector type.
+ /// Returns the number of elements in `self` if it is an LLVM vector type.
fn vector_length(&self, ty: Self::Type) -> usize;
fn float_width(&self, ty: Self::Type) -> usize;
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 14b9894aad5c3..865e01d0aee0f 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -207,7 +207,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
) -> InterpResult<'tcx, Option>> {
let def_id = instance.def_id();
- if Some(def_id) == self.tcx.lang_items().panic_display()
+ if self.tcx.has_attr(def_id, sym::rustc_const_panic_str)
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
let args = self.copy_fn_args(args)?;
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 94a5cc67d31c8..af7dfbef2ff81 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
};
-use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
use rustc_span::Span;
@@ -384,7 +384,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences.
- if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
+ if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index a32ea204f9847..99dba977a4395 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -670,19 +670,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("eval_place_to_op: got {:?}", op);
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
op.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- op.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter operand with type {}",
+ normalized_place_ty,
+ op.layout.ty,
+ )
+ }
+ }
Ok(op)
}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 503004cbbe1e3..79448f07cae2e 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -573,19 +573,24 @@ where
trace!("{:?}", self.dump_place(&place));
// Sanity-check the type we ended up with.
- debug_assert!(
- mir_assign_valid_types(
+ if cfg!(debug_assertions) {
+ let normalized_place_ty = self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ )?;
+ if !mir_assign_valid_types(
*self.tcx,
self.param_env,
- self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
- )?)?,
+ self.layout_of(normalized_place_ty)?,
place.layout,
- ),
- "eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
- mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
- place.layout.ty,
- );
+ ) {
+ span_bug!(
+ self.cur_span(),
+ "eval_place of a MIR place with type {} produced an interpreter place with type {}",
+ normalized_place_ty,
+ place.layout.ty,
+ )
+ }
+ }
Ok(place)
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index f462c13816ee8..70df3d8fd782c 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -319,6 +319,8 @@ where
OpaqueCast(ty) => {
span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
}
+ // We don't want anything happening here, this is here as a dummy.
+ Subtype(_) => base.transmute(base.layout(), self)?,
Field(field, _) => self.project_field(base, field.index())?,
Downcast(_, variant) => self.project_downcast(base, variant)?,
Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 129e74425b6d7..c4d806c51619d 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -237,7 +237,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
if self.const_kind() == hir::ConstContext::ConstFn {
for (idx, local) in body.local_decls.iter_enumerated() {
// Handle the return place below.
- if idx == RETURN_PLACE || local.internal {
+ if idx == RETURN_PLACE {
continue;
}
@@ -664,6 +664,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| ProjectionElem::Downcast(..)
| ProjectionElem::OpaqueCast(..)
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(..)
| ProjectionElem::Field(..)
| ProjectionElem::Index(_) => {}
}
@@ -886,7 +887,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// At this point, we are calling a function, `callee`, whose `DefId` is known...
- // `begin_panic` and `panic_display` are generic functions that accept
+ // `begin_panic` and `#[rustc_const_panic_str]` functions accept generic
// types other than str. Check to enforce that only str can be used in
// const-eval.
@@ -898,8 +899,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}
}
- // const-eval of the `panic_display` fn assumes the argument is `&&str`
- if Some(callee) == tcx.lang_items().panic_display() {
+ // const-eval of `#[rustc_const_panic_str]` functions assumes the argument is `&&str`
+ if tcx.has_attr(callee, sym::rustc_const_panic_str) {
match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
{
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 34e9b76c4844e..de3186a53c165 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -306,6 +306,7 @@ where
ProjectionElem::Index(index) if in_local(index) => return true,
ProjectionElem::Deref
+ | ProjectionElem::Subtype(_)
| ProjectionElem::Field(_, _)
| ProjectionElem::OpaqueCast(_)
| ProjectionElem::ConstantIndex { .. }
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index 4a9977add78c0..8ede3bdd2b60a 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -357,7 +357,9 @@ impl<'tcx> Validator<'_, 'tcx> {
return Err(Unpromotable);
}
- ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subtype(_)
+ | ProjectionElem::Subslice { .. } => {}
ProjectionElem::Index(local) => {
let mut promotable = false;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 18b22882e7d08..4711f7b47cccc 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -7,7 +7,7 @@ use rustc_infer::traits::Reveal;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
@@ -16,6 +16,8 @@ use rustc_target::spec::abi::Abi;
use crate::util::is_within_packed;
+use crate::util::relate_types;
+
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum EdgeKind {
Unwind,
@@ -602,7 +604,15 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
return true;
}
- crate::util::is_subtype(self.tcx, self.param_env, src, dest)
+ // After borrowck subtyping should be fully explicit via
+ // `Subtype` projections.
+ let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ Variance::Invariant
+ } else {
+ Variance::Covariant
+ };
+
+ crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
}
}
@@ -753,6 +763,23 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
}
+ ProjectionElem::Subtype(ty) => {
+ if !relate_types(
+ self.tcx,
+ self.param_env,
+ Variance::Covariant,
+ ty,
+ place_ref.ty(&self.body.local_decls, self.tcx).ty,
+ ) {
+ self.fail(
+ location,
+ format!(
+ "Failed subtyping {ty:#?} and {:#?}",
+ place_ref.ty(&self.body.local_decls, self.tcx).ty
+ ),
+ )
+ }
+ }
_ => {}
}
self.super_projection_elem(place_ref, elem, context, location);
@@ -1088,6 +1115,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// LHS and RHS of the assignment must have the same type.
let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+
if !self.mir_assign_valid_types(right_ty, left_ty) {
self.fail(
location,
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index 83376c8e99289..265ca0c7884ce 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -5,7 +5,7 @@
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::traits::{DefiningAnchor, ObligationCause};
-use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, Variance};
use rustc_trait_selection::traits::ObligationCtxt;
/// Returns whether the two types are equal up to subtyping.
@@ -24,16 +24,22 @@ pub fn is_equal_up_to_subtyping<'tcx>(
}
// Check for subtyping in either direction.
- is_subtype(tcx, param_env, src, dest) || is_subtype(tcx, param_env, dest, src)
+ relate_types(tcx, param_env, Variance::Covariant, src, dest)
+ || relate_types(tcx, param_env, Variance::Covariant, dest, src)
}
/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
///
+/// When validating assignments, the variance should be `Covariant`. When checking
+/// during `MirPhase` >= `MirPhase::Runtime(RuntimePhase::Initial)` variance should be `Invariant`
+/// because we want to check for type equality.
+///
/// This mostly ignores opaque types as it can be used in constraining contexts
/// while still computing the final underlying type.
-pub fn is_subtype<'tcx>(
+pub fn relate_types<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
+ variance: Variance,
src: Ty<'tcx>,
dest: Ty<'tcx>,
) -> bool {
@@ -48,7 +54,7 @@ pub fn is_subtype<'tcx>(
let cause = ObligationCause::dummy();
let src = ocx.normalize(&cause, param_env, src);
let dest = ocx.normalize(&cause, param_env, dest);
- match ocx.sub(&cause, param_env, src, dest) {
+ match ocx.relate(&cause, param_env, variance, src, dest) {
Ok(()) => {}
Err(_) => return false,
};
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 0aef7fa469e4b..040b3071e6f87 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -7,7 +7,7 @@ mod type_name;
pub use self::alignment::{is_disaligned, is_within_packed};
pub use self::check_validity_requirement::check_validity_requirement;
-pub use self::compare_types::{is_equal_up_to_subtyping, is_subtype};
+pub use self::compare_types::{is_equal_up_to_subtyping, relate_types};
pub use self::type_name::type_name;
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
diff --git a/compiler/rustc_error_codes/src/error_codes/E0551.md b/compiler/rustc_error_codes/src/error_codes/E0551.md
index 53db559a4fcbf..0e078fe71bfd1 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0551.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0551.md
@@ -1,8 +1,10 @@
+#### Note: this error code is no longer emitted by the compiler
+
An invalid meta-item was used inside an attribute.
Erroneous code example:
-```compile_fail,E0551
+```compile_fail,E0539
#[deprecated(note)] // error!
fn i_am_deprecated() {}
```
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index b747a62b864d8..665b5d6adec37 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -1376,16 +1376,16 @@ impl HandlerInner {
self.emitted_diagnostic_codes.insert(code.clone());
}
- let already_emitted = |this: &mut Self| {
+ let already_emitted = {
let mut hasher = StableHasher::new();
diagnostic.hash(&mut hasher);
let diagnostic_hash = hasher.finish();
- !this.emitted_diagnostics.insert(diagnostic_hash)
+ !self.emitted_diagnostics.insert(diagnostic_hash)
};
// Only emit the diagnostic if we've been asked to deduplicate or
// haven't already emitted an equivalent diagnostic.
- if !(self.flags.deduplicate_diagnostics && already_emitted(self)) {
+ if !(self.flags.deduplicate_diagnostics && already_emitted) {
debug!(?diagnostic);
debug!(?self.emitted_diagnostics);
let already_emitted_sub = |sub: &mut SubDiagnostic| {
@@ -1401,6 +1401,11 @@ impl HandlerInner {
};
diagnostic.children.extract_if(already_emitted_sub).for_each(|_| {});
+ if already_emitted {
+ diagnostic.note(
+ "duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`",
+ );
+ }
self.emitter.emit_diagnostic(diagnostic);
if diagnostic.is_error() {
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 18397af565fa0..65e697c8f3b80 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -666,6 +666,10 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
rustc_attr!(
rustc_do_not_const_check, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
),
+ // Ensure the argument to this function is &&str during const-check.
+ rustc_attr!(
+ rustc_const_panic_str, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
+ ),
// ==========================================================================
// Internal attributes, Layout related:
diff --git a/compiler/rustc_fluent_macro/src/fluent.rs b/compiler/rustc_fluent_macro/src/fluent.rs
index 56e23ac277520..7479e4ef2b317 100644
--- a/compiler/rustc_fluent_macro/src/fluent.rs
+++ b/compiler/rustc_fluent_macro/src/fluent.rs
@@ -40,26 +40,35 @@ fn invocation_relative_path_to_absolute(span: Span, path: &str) -> PathBuf {
}
}
-/// Tokens to be returned when the macro cannot proceed.
-fn failed(crate_name: &Ident) -> proc_macro::TokenStream {
+/// Final tokens.
+fn finish(body: TokenStream, resource: TokenStream) -> proc_macro::TokenStream {
quote! {
- pub static DEFAULT_LOCALE_RESOURCE: &'static str = "";
+ /// Raw content of Fluent resource for this crate, generated by `fluent_messages` macro,
+ /// imported by `rustc_driver` to include all crates' resources in one bundle.
+ pub static DEFAULT_LOCALE_RESOURCE: &'static str = #resource;
#[allow(non_upper_case_globals)]
#[doc(hidden)]
+ /// Auto-generated constants for type-checked references to Fluent messages.
pub(crate) mod fluent_generated {
- pub mod #crate_name {
- }
+ #body
+ /// Constants expected to exist by the diagnostic derive macros to use as default Fluent
+ /// identifiers for different subdiagnostic kinds.
pub mod _subdiag {
+ /// Default for `#[help]`
pub const help: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("help"));
+ /// Default for `#[note]`
pub const note: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("note"));
+ /// Default for `#[warn]`
pub const warn: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("warn"));
+ /// Default for `#[label]`
pub const label: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("label"));
+ /// Default for `#[suggestion]`
pub const suggestion: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("suggestion"));
}
@@ -68,6 +77,11 @@ fn failed(crate_name: &Ident) -> proc_macro::TokenStream {
.into()
}
+/// Tokens to be returned when the macro cannot proceed.
+fn failed(crate_name: &Ident) -> proc_macro::TokenStream {
+ finish(quote! { pub mod #crate_name {} }, quote! { "" })
+}
+
/// See [rustc_fluent_macro::fluent_messages].
pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let crate_name = std::env::var("CARGO_PKG_NAME")
@@ -311,39 +325,7 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
}
}
- quote! {
- /// Raw content of Fluent resource for this crate, generated by `fluent_messages` macro,
- /// imported by `rustc_driver` to include all crates' resources in one bundle.
- pub static DEFAULT_LOCALE_RESOURCE: &'static str = include_str!(#relative_ftl_path);
-
- #[allow(non_upper_case_globals)]
- #[doc(hidden)]
- /// Auto-generated constants for type-checked references to Fluent messages.
- pub(crate) mod fluent_generated {
- #constants
-
- /// Constants expected to exist by the diagnostic derive macros to use as default Fluent
- /// identifiers for different subdiagnostic kinds.
- pub mod _subdiag {
- /// Default for `#[help]`
- pub const help: crate::SubdiagnosticMessage =
- crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("help"));
- /// Default for `#[note]`
- pub const note: crate::SubdiagnosticMessage =
- crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("note"));
- /// Default for `#[warn]`
- pub const warn: crate::SubdiagnosticMessage =
- crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("warn"));
- /// Default for `#[label]`
- pub const label: crate::SubdiagnosticMessage =
- crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("label"));
- /// Default for `#[suggestion]`
- pub const suggestion: crate::SubdiagnosticMessage =
- crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("suggestion"));
- }
- }
- }
- .into()
+ finish(constants, quote! { include_str!(#relative_ftl_path) })
}
fn variable_references<'a>(msg: &Message<&'a str>) -> Vec<&'a str> {
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index 23b20543d5355..4a89a6f7e3926 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -230,7 +230,6 @@ language_item_table! {
Panic, sym::panic, panic_fn, Target::Fn, GenericRequirement::Exact(0);
PanicNounwind, sym::panic_nounwind, panic_nounwind, Target::Fn, GenericRequirement::Exact(0);
PanicFmt, sym::panic_fmt, panic_fmt, Target::Fn, GenericRequirement::None;
- PanicDisplay, sym::panic_display, panic_display, Target::Fn, GenericRequirement::None;
ConstPanicFmt, sym::const_panic_fmt, const_panic_fmt, Target::Fn, GenericRequirement::None;
PanicBoundsCheck, sym::panic_bounds_check, panic_bounds_check_fn, Target::Fn, GenericRequirement::Exact(0);
PanicMisalignedPointerDereference, sym::panic_misaligned_pointer_dereference, panic_misaligned_pointer_dereference_fn, Target::Fn, GenericRequirement::Exact(0);
diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl
index 1c926533a27fd..ee475e3de7e99 100644
--- a/compiler/rustc_hir_analysis/messages.ftl
+++ b/compiler/rustc_hir_analysis/messages.ftl
@@ -96,8 +96,6 @@ hir_analysis_enum_discriminant_overflowed = enum discriminant overflowed
.label = overflowed on value after {$discr}
.note = explicitly set `{$item_name} = {$wrapped_discr}` if that is desired outcome
-hir_analysis_expected_used_symbol = expected `used`, `used(compiler)` or `used(linker)`
-
hir_analysis_field_already_declared =
field `{$field_name}` is already declared
.label = field already declared
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
index d081b0e35c666..6a1da615055b5 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
@@ -16,6 +16,7 @@ use rustc_infer::traits::util;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::util::ExplicitSelf;
+use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::{
self, GenericArgs, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
@@ -1188,7 +1189,7 @@ fn report_trait_method_mismatch<'tcx>(
let ap = Applicability::MachineApplicable;
match sig.decl.output {
hir::FnRetTy::DefaultReturn(sp) => {
- let sugg = format!("-> {} ", trait_sig.output());
+ let sugg = format!(" -> {}", trait_sig.output());
diag.span_suggestion_verbose(sp, msg, sugg, ap);
}
hir::FnRetTy::Return(hir_ty) => {
@@ -2279,16 +2280,16 @@ pub(super) fn check_type_bounds<'tcx>(
//
// impl X for T where T: X { type Y = ::Y; }
}
- _ => predicates.push(ty::Clause::from_projection_clause(
- tcx,
+ _ => predicates.push(
ty::Binder::bind_with_vars(
ty::ProjectionPredicate {
projection_ty: tcx.mk_alias_ty(trait_ty.def_id, rebased_args),
term: normalize_impl_ty.into(),
},
bound_vars,
- ),
- )),
+ )
+ .to_predicate(tcx),
+ ),
};
ty::ParamEnv::new(tcx.mk_clauses(&predicates), Reveal::UserFacing)
};
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs
index d9e0e87eb471e..bc5029a1d5e47 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs
@@ -23,8 +23,12 @@ pub(super) fn check_refining_return_position_impl_trait_in_trait<'tcx>(
if !tcx.impl_method_has_trait_impl_trait_tys(impl_m.def_id) {
return;
}
- // crate-private traits don't have any library guarantees, there's no need to do this check.
- if !tcx.visibility(trait_m.container_id(tcx)).is_public() {
+ // unreachable traits don't have any library guarantees, there's no need to do this check.
+ if trait_m
+ .container_id(tcx)
+ .as_local()
+ .is_some_and(|trait_def_id| !tcx.effective_visibilities(()).is_reachable(trait_def_id))
+ {
return;
}
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index c4fdffb02618a..de6ca0d61dc97 100644
--- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -1130,11 +1130,11 @@ fn check_associated_type_bounds(wfcx: &WfCheckingCtxt<'_, '_>, item: ty::AssocIt
let wf_obligations =
bounds.instantiate_identity_iter_copied().flat_map(|(bound, bound_span)| {
let normalized_bound = wfcx.normalize(span, None, bound);
- traits::wf::predicate_obligations(
+ traits::wf::clause_obligations(
wfcx.infcx,
wfcx.param_env,
wfcx.body_def_id,
- normalized_bound.as_predicate(),
+ normalized_bound,
bound_span,
)
});
@@ -1234,7 +1234,7 @@ fn check_impl<'tcx>(
wfcx.infcx,
wfcx.param_env,
wfcx.body_def_id,
- &trait_pred,
+ trait_pred,
ast_trait_ref.path.span,
item,
);
@@ -1443,13 +1443,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
debug!(?predicates.predicates);
assert_eq!(predicates.predicates.len(), predicates.spans.len());
let wf_obligations = predicates.into_iter().flat_map(|(p, sp)| {
- traits::wf::predicate_obligations(
- infcx,
- wfcx.param_env,
- wfcx.body_def_id,
- p.as_predicate(),
- sp,
- )
+ traits::wf::clause_obligations(infcx, wfcx.param_env, wfcx.body_def_id, p, sp)
});
let obligations: Vec<_> = wf_obligations.chain(default_obligations).collect();
wfcx.register_obligations(obligations);
diff --git a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
index 3760195a5e8ec..dbd38e1b1fe72 100644
--- a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
@@ -376,9 +376,9 @@ fn check_predicates<'tcx>(
let always_applicable_traits = impl1_predicates
.iter()
.copied()
- .filter(|(clause, _span)| {
+ .filter(|&(clause, _span)| {
matches!(
- trait_predicate_kind(tcx, clause.as_predicate()),
+ trait_specialization_kind(tcx, clause),
Some(TraitSpecializationKind::AlwaysApplicable)
)
})
@@ -402,7 +402,7 @@ fn check_predicates<'tcx>(
.iter()
.any(|pred2| trait_predicates_eq(tcx, clause.as_predicate(), *pred2, span))
{
- check_specialization_on(tcx, clause.as_predicate(), span)
+ check_specialization_on(tcx, clause, span)
}
}
}
@@ -441,19 +441,16 @@ fn trait_predicates_eq<'tcx>(
}
#[instrument(level = "debug", skip(tcx))]
-fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tcx>, span: Span) {
- match predicate.kind().skip_binder() {
+fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, clause: ty::Clause<'tcx>, span: Span) {
+ match clause.kind().skip_binder() {
// Global predicates are either always true or always false, so we
// are fine to specialize on.
- _ if predicate.is_global() => (),
+ _ if clause.is_global() => (),
// We allow specializing on explicitly marked traits with no associated
// items.
- ty::PredicateKind::Clause(ty::ClauseKind::Trait(ty::TraitPredicate {
- trait_ref,
- polarity: _,
- })) => {
+ ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, polarity: _ }) => {
if !matches!(
- trait_predicate_kind(tcx, predicate),
+ trait_specialization_kind(tcx, clause),
Some(TraitSpecializationKind::Marker)
) {
tcx.sess
@@ -467,10 +464,7 @@ fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tc
.emit();
}
}
- ty::PredicateKind::Clause(ty::ClauseKind::Projection(ty::ProjectionPredicate {
- projection_ty,
- term,
- })) => {
+ ty::ClauseKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
tcx.sess
.struct_span_err(
span,
@@ -478,7 +472,7 @@ fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tc
)
.emit();
}
- ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) => {
+ ty::ClauseKind::ConstArgHasType(..) => {
// FIXME(min_specialization), FIXME(const_generics):
// It probably isn't right to allow _every_ `ConstArgHasType` but I am somewhat unsure
// about the actual rules that would be sound. Can't just always error here because otherwise
@@ -490,33 +484,25 @@ fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tc
}
_ => {
tcx.sess
- .struct_span_err(span, format!("cannot specialize on predicate `{predicate}`"))
+ .struct_span_err(span, format!("cannot specialize on predicate `{clause}`"))
.emit();
}
}
}
-fn trait_predicate_kind<'tcx>(
+fn trait_specialization_kind<'tcx>(
tcx: TyCtxt<'tcx>,
- predicate: ty::Predicate<'tcx>,
+ clause: ty::Clause<'tcx>,
) -> Option {
- match predicate.kind().skip_binder() {
- ty::PredicateKind::Clause(ty::ClauseKind::Trait(ty::TraitPredicate {
- trait_ref,
- polarity: _,
- })) => Some(tcx.trait_def(trait_ref.def_id).specialization_kind),
- ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(_))
- | ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(_))
- | ty::PredicateKind::Clause(ty::ClauseKind::Projection(_))
- | ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..))
- | ty::PredicateKind::AliasRelate(..)
- | ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_))
- | ty::PredicateKind::Subtype(_)
- | ty::PredicateKind::Coerce(_)
- | ty::PredicateKind::ObjectSafe(_)
- | ty::PredicateKind::ClosureKind(..)
- | ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(..))
- | ty::PredicateKind::ConstEquate(..)
- | ty::PredicateKind::Ambiguous => None,
+ match clause.kind().skip_binder() {
+ ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, polarity: _ }) => {
+ Some(tcx.trait_def(trait_ref.def_id).specialization_kind)
+ }
+ ty::ClauseKind::RegionOutlives(_)
+ | ty::ClauseKind::TypeOutlives(_)
+ | ty::ClauseKind::Projection(_)
+ | ty::ClauseKind::ConstArgHasType(..)
+ | ty::ClauseKind::WellFormed(_)
+ | ty::ClauseKind::ConstEvaluatable(..) => None,
}
}
diff --git a/compiler/rustc_hir_analysis/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs
index 85e0000ab474e..9fb39a0e93b6c 100644
--- a/compiler/rustc_hir_analysis/src/variance/mod.rs
+++ b/compiler/rustc_hir_analysis/src/variance/mod.rs
@@ -192,5 +192,5 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
}
}
}
- tcx.arena.alloc_from_iter(collector.variances.into_iter())
+ tcx.arena.alloc_from_iter(collector.variances)
}
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
index 1fa0ec173a7e4..c8ffd7d15067d 100644
--- a/compiler/rustc_hir_typeck/src/check.rs
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -113,7 +113,11 @@ pub(super) fn check_fn<'a, 'tcx>(
fcx.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
- fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ let return_or_body_span = match decl.output {
+ hir::FnRetTy::DefaultReturn(_) => body.value.span,
+ hir::FnRetTy::Return(ty) => ty.span,
+ };
+ fcx.require_type_is_sized(declared_ret_ty, return_or_body_span, traits::SizedReturnType);
fcx.check_return_expr(&body.value, false);
// We insert the deferred_generator_interiors entry after visiting the body.
diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
index d97691369c958..5c3f2b85966a8 100644
--- a/compiler/rustc_hir_typeck/src/demand.rs
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -14,7 +14,7 @@ use rustc_middle::ty::adjustment::AllowTwoPhase;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut, TypeFoldable};
+use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut, TypeFoldable, TypeVisitableExt};
use rustc_span::symbol::sym;
use rustc_span::{BytePos, Span, DUMMY_SP};
use rustc_trait_selection::infer::InferCtxtExt as _;
@@ -504,12 +504,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// incompatible fix at the original mismatch site.
if matches!(source, TypeMismatchSource::Ty(_))
&& let Some(ideal_method) = ideal_method
+ && let ideal_arg_ty = self.resolve_vars_if_possible(ideal_method.sig.inputs()[idx + 1])
+ // HACK(compiler-errors): We don't actually consider the implications
+ // of our inference guesses in `emit_type_mismatch_suggestions`, so
+ // only suggest things when we know our type error is precisely due to
+ // a type mismatch, and not via some projection or something. See #116155.
+ && !ideal_arg_ty.has_non_region_infer()
{
self.emit_type_mismatch_suggestions(
err,
arg_expr,
arg_ty,
- self.resolve_vars_if_possible(ideal_method.sig.inputs()[idx + 1]),
+ ideal_arg_ty,
None,
None,
);
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
index 1526988fbd926..8efccd5ba3eef 100644
--- a/compiler/rustc_hir_typeck/src/errors.rs
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -110,7 +110,7 @@ pub struct AddressOfTemporaryTaken {
pub enum AddReturnTypeSuggestion {
#[suggestion(
hir_typeck_add_return_type_add,
- code = "-> {found} ",
+ code = " -> {found}",
applicability = "machine-applicable"
)]
Add {
@@ -120,7 +120,7 @@ pub enum AddReturnTypeSuggestion {
},
#[suggestion(
hir_typeck_add_return_type_missing_here,
- code = "-> _ ",
+ code = " -> _",
applicability = "has-placeholders"
)]
MissingHere {
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index eead4da5e3ef8..d9d0dd93010c7 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -41,7 +41,6 @@ use rustc_infer::infer::DefineOpaqueTypes;
use rustc_infer::infer::InferOk;
use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::ObligationCause;
-use rustc_middle::middle::stability;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
use rustc_middle::ty::error::{
ExpectedFound,
@@ -1585,12 +1584,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.check_expr_struct_fields(
adt_ty,
expected,
- expr.hir_id,
+ expr,
qpath.span(),
variant,
fields,
base_expr,
- expr.span,
);
self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
@@ -1601,12 +1599,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
adt_ty: Ty<'tcx>,
expected: Expectation<'tcx>,
- expr_id: hir::HirId,
+ expr: &hir::Expr<'_>,
span: Span,
variant: &'tcx ty::VariantDef,
ast_fields: &'tcx [hir::ExprField<'tcx>],
base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
- expr_span: Span,
) {
let tcx = self.tcx;
@@ -1646,7 +1643,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// struct-like enums (yet...), but it's definitely not
// a bug to have constructed one.
if adt_kind != AdtKind::Enum {
- tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
+ tcx.check_stability(v_field.did, Some(expr.hir_id), field.span, None);
}
self.field_ty(field.span, v_field, args)
@@ -1662,10 +1659,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.report_unknown_field(
adt_ty,
variant,
+ expr,
field,
ast_fields,
adt.variant_descr(),
- expr_span,
)
};
@@ -1731,7 +1728,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter()
.map(|f| {
let fru_ty = self
- .normalize(expr_span, self.field_ty(base_expr.span, f, fresh_args));
+ .normalize(expr.span, self.field_ty(base_expr.span, f, fresh_args));
let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
if let Some(_) = remaining_fields.remove(&ident) {
let target_ty = self.field_ty(base_expr.span, f, args);
@@ -1814,7 +1811,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty::Adt(adt, args) if adt.is_struct() => variant
.fields
.iter()
- .map(|f| self.normalize(expr_span, f.ty(self.tcx, args)))
+ .map(|f| self.normalize(expr.span, f.ty(self.tcx, args)))
.collect(),
_ => {
self.tcx
@@ -1824,13 +1821,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
};
- self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys);
+ self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr.hir_id, fru_tys);
} else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() {
debug!(?remaining_fields);
let private_fields: Vec<&ty::FieldDef> = variant
.fields
.iter()
- .filter(|field| !field.vis.is_accessible_from(tcx.parent_module(expr_id), tcx))
+ .filter(|field| !field.vis.is_accessible_from(tcx.parent_module(expr.hir_id), tcx))
.collect();
if !private_fields.is_empty() {
@@ -2049,16 +2046,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
ty: Ty<'tcx>,
variant: &'tcx ty::VariantDef,
+ expr: &hir::Expr<'_>,
field: &hir::ExprField<'_>,
skip_fields: &[hir::ExprField<'_>],
kind_name: &str,
- expr_span: Span,
) -> ErrorGuaranteed {
if variant.is_recovered() {
let guar = self
.tcx
.sess
- .delay_span_bug(expr_span, "parser recovered but no error was emitted");
+ .delay_span_bug(expr.span, "parser recovered but no error was emitted");
self.set_tainted_by_errors(guar);
return guar;
}
@@ -2102,7 +2099,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.span_label(field.ident.span, "field does not exist");
err.span_suggestion_verbose(
- expr_span,
+ expr.span,
format!(
"`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
adt = ty,
@@ -2120,7 +2117,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.span_label(variant_ident_span, format!("`{ty}` defined here"));
err.span_label(field.ident.span, "field does not exist");
err.span_suggestion_verbose(
- expr_span,
+ expr.span,
format!("`{ty}` is a tuple {kind_name}, use the appropriate syntax",),
format!("{ty}(/* fields */)"),
Applicability::HasPlaceholders,
@@ -2129,9 +2126,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
},
_ => {
// prevent all specified fields from being suggested
- let skip_fields: Vec<_> = skip_fields.iter().map(|x| x.ident.name).collect();
+ let available_field_names = self.available_field_names(variant, expr, skip_fields);
if let Some(field_name) =
- self.suggest_field_name(variant, field.ident.name, &skip_fields, expr_span)
+ find_best_match_for_name(&available_field_names, field.ident.name, None)
{
err.span_suggestion(
field.ident.span,
@@ -2153,10 +2150,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
format!("`{ty}` does not have this field"),
);
}
- let mut available_field_names =
- self.available_field_names(variant, expr_span);
- available_field_names
- .retain(|name| skip_fields.iter().all(|skip| name != skip));
if available_field_names.is_empty() {
err.note("all struct fields are already assigned");
} else {
@@ -2174,63 +2167,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.emit()
}
- // Return a hint about the closest match in field names
- fn suggest_field_name(
- &self,
- variant: &'tcx ty::VariantDef,
- field: Symbol,
- skip: &[Symbol],
- // The span where stability will be checked
- span: Span,
- ) -> Option {
- let names = variant
- .fields
- .iter()
- .filter_map(|field| {
- // ignore already set fields and private fields from non-local crates
- // and unstable fields.
- if skip.iter().any(|&x| x == field.name)
- || (!variant.def_id.is_local() && !field.vis.is_public())
- || matches!(
- self.tcx.eval_stability(field.did, None, span, None),
- stability::EvalResult::Deny { .. }
- )
- {
- None
- } else {
- Some(field.name)
- }
- })
- .collect::>();
-
- find_best_match_for_name(&names, field, None)
- }
-
fn available_field_names(
&self,
variant: &'tcx ty::VariantDef,
- access_span: Span,
+ expr: &hir::Expr<'_>,
+ skip_fields: &[hir::ExprField<'_>],
) -> Vec {
- let body_owner_hir_id = self.tcx.hir().local_def_id_to_hir_id(self.body_id);
variant
.fields
.iter()
.filter(|field| {
- let def_scope = self
- .tcx
- .adjust_ident_and_get_scope(
- field.ident(self.tcx),
- variant.def_id,
- body_owner_hir_id,
- )
- .1;
- field.vis.is_accessible_from(def_scope, self.tcx)
- && !matches!(
- self.tcx.eval_stability(field.did, None, access_span, None),
- stability::EvalResult::Deny { .. }
- )
+ skip_fields.iter().all(|&skip| skip.ident.name != field.name)
+ && self.is_field_suggestable(field, expr.hir_id, expr.span)
})
- .filter(|field| !self.tcx.is_doc_hidden(field.did))
.map(|field| field.name)
.collect()
}
@@ -2460,7 +2409,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.suggest_first_deref_field(&mut err, expr, base, ident);
}
ty::Adt(def, _) if !def.is_enum() => {
- self.suggest_fields_on_recordish(&mut err, def, ident, expr.span);
+ self.suggest_fields_on_recordish(&mut err, expr, def, ident);
}
ty::Param(param_ty) => {
self.point_at_param_definition(&mut err, param_ty);
@@ -2622,12 +2571,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn suggest_fields_on_recordish(
&self,
err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
def: ty::AdtDef<'tcx>,
field: Ident,
- access_span: Span,
) {
+ let available_field_names = self.available_field_names(def.non_enum_variant(), expr, &[]);
if let Some(suggested_field_name) =
- self.suggest_field_name(def.non_enum_variant(), field.name, &[], access_span)
+ find_best_match_for_name(&available_field_names, field.name, None)
{
err.span_suggestion(
field.span,
@@ -2637,12 +2587,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
} else {
err.span_label(field.span, "unknown field");
- let struct_variant_def = def.non_enum_variant();
- let field_names = self.available_field_names(struct_variant_def, access_span);
- if !field_names.is_empty() {
+ if !available_field_names.is_empty() {
err.note(format!(
"available fields are: {}",
- self.name_series_display(field_names),
+ self.name_series_display(available_field_names),
));
}
}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
index abb689892183c..9999fa2e59ccf 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -782,8 +782,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
hir::FnRetTy::Return(hir_ty) => {
- let span = hir_ty.span;
-
if let hir::TyKind::OpaqueDef(item_id, ..) = hir_ty.kind
&& let hir::Node::Item(hir::Item {
kind: hir::ItemKind::OpaqueTy(op_ty),
@@ -799,28 +797,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!(?found);
if found.is_suggestable(self.tcx, false) {
if term.span.is_empty() {
- err.subdiagnostic(errors::AddReturnTypeSuggestion::Add { span, found: found.to_string() });
+ err.subdiagnostic(errors::AddReturnTypeSuggestion::Add { span: term.span, found: found.to_string() });
return true;
} else {
- err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span, expected });
+ err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span: term.span, expected });
}
}
- }
-
- // Only point to return type if the expected type is the return type, as if they
- // are not, the expectation must have been caused by something else.
- debug!("return type {:?}", hir_ty);
- let ty = self.astconv().ast_ty_to_ty(hir_ty);
- debug!("return type {:?}", ty);
- debug!("expected type {:?}", expected);
- let bound_vars = self.tcx.late_bound_vars(hir_ty.hir_id.owner.into());
- let ty = Binder::bind_with_vars(ty, bound_vars);
- let ty = self.normalize(span, ty);
- let ty = self.tcx.erase_late_bound_regions(ty);
- if self.can_coerce(expected, ty) {
- err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span, expected });
- self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
- return true;
+ } else {
+ // Only point to return type if the expected type is the return type, as if they
+ // are not, the expectation must have been caused by something else.
+ debug!("return type {:?}", hir_ty);
+ let ty = self.astconv().ast_ty_to_ty(hir_ty);
+ debug!("return type {:?}", ty);
+ debug!("expected type {:?}", expected);
+ let bound_vars = self.tcx.late_bound_vars(hir_ty.hir_id.owner.into());
+ let ty = Binder::bind_with_vars(ty, bound_vars);
+ let ty = self.normalize(hir_ty.span, ty);
+ let ty = self.tcx.erase_late_bound_regions(ty);
+ if self.can_coerce(expected, ty) {
+ err.subdiagnostic(errors::ExpectedReturnTypeLabel::Other { span: hir_ty.span, expected });
+ self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
+ return true;
+ }
}
}
_ => {}
@@ -1687,4 +1685,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false
}
}
+
+ pub(crate) fn is_field_suggestable(
+ &self,
+ field: &ty::FieldDef,
+ hir_id: HirId,
+ span: Span,
+ ) -> bool {
+ // The field must be visible in the containing module.
+ field.vis.is_accessible_from(self.tcx.parent_module(hir_id), self.tcx)
+ // The field must not be unstable.
+ && !matches!(
+ self.tcx.eval_stability(field.did, None, rustc_span::DUMMY_SP, None),
+ rustc_middle::middle::stability::EvalResult::Deny { .. }
+ )
+ // If the field is from an external crate it must not be `doc(hidden)`.
+ && (field.did.is_local() || !self.tcx.is_doc_hidden(field.did))
+ // If the field is hygienic it must come from the same syntax context.
+ && self.tcx.def_ident_span(field.did).unwrap().normalize_to_macros_2_0().eq_ctxt(span)
+ }
}
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
index 6873382c4ac60..cd6adb345e7a3 100644
--- a/compiler/rustc_hir_typeck/src/lib.rs
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -5,7 +5,6 @@
#![feature(box_patterns)]
#![feature(min_specialization)]
#![feature(control_flow_enum)]
-#![feature(option_as_slice)]
#![recursion_limit = "256"]
#[macro_use]
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index 07c48ec6392a8..a4bbb16026a73 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -2494,10 +2494,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Try alternative arbitrary self types that could fulfill this call.
// FIXME: probe for all types that *could* be arbitrary self-types, not
// just this list.
- for (rcvr_ty, post) in &[
- (rcvr_ty, ""),
- (Ty::new_mut_ref(self.tcx, self.tcx.lifetimes.re_erased, rcvr_ty), "&mut "),
- (Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_erased, rcvr_ty), "&"),
+ for (rcvr_ty, post, pin_call) in &[
+ (rcvr_ty, "", None),
+ (
+ Ty::new_mut_ref(self.tcx, self.tcx.lifetimes.re_erased, rcvr_ty),
+ "&mut ",
+ Some("as_mut"),
+ ),
+ (
+ Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_erased, rcvr_ty),
+ "&",
+ Some("as_ref"),
+ ),
] {
match self.lookup_probe_for_diagnostic(
item_name,
@@ -2531,6 +2539,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Err(_) => (),
}
+ let pred = ty::TraitRef::new(
+ self.tcx,
+ self.tcx.lang_items().unpin_trait().unwrap(),
+ [*rcvr_ty],
+ );
+ let unpin = self.predicate_must_hold_considering_regions(&Obligation::new(
+ self.tcx,
+ ObligationCause::misc(rcvr.span, self.body_id),
+ self.param_env,
+ pred,
+ ));
for (rcvr_ty, pre) in &[
(Ty::new_lang_item(self.tcx, *rcvr_ty, LangItem::OwnedBox), "Box::new"),
(Ty::new_lang_item(self.tcx, *rcvr_ty, LangItem::Pin), "Pin::new"),
@@ -2554,7 +2573,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Explicitly ignore the `Pin::as_ref()` method as `Pin` does not
// implement the `AsRef` trait.
let skip = skippable.contains(&did)
- || (("Pin::new" == *pre) && (sym::as_ref == item_name.name))
+ || (("Pin::new" == *pre) && ((sym::as_ref == item_name.name) || !unpin))
|| inputs_len.is_some_and(|inputs_len| pick.item.kind == ty::AssocKind::Fn && self.tcx.fn_sig(pick.item.def_id).skip_binder().skip_binder().inputs().len() != inputs_len);
// Make sure the method is defined for the *actual* receiver: we don't
// want to treat `Box` as a receiver if it only works because of
@@ -2566,7 +2585,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.multipart_suggestion(
"consider wrapping the receiver expression with the \
- appropriate type",
+ appropriate type",
vec![
(rcvr.span.shrink_to_lo(), format!("{pre}({post}")),
(rcvr.span.shrink_to_hi(), ")".to_string()),
@@ -2578,6 +2597,49 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
}
+ // We special case the situation where `Pin::new` wouldn't work, and instead
+ // suggest using the `pin!()` macro instead.
+ if let Some(new_rcvr_t) = Ty::new_lang_item(self.tcx, *rcvr_ty, LangItem::Pin)
+ // We didn't find an alternative receiver for the method.
+ && !alt_rcvr_sugg
+ // `T: !Unpin`
+ && !unpin
+ // The method isn't `as_ref`, as it would provide a wrong suggestion for `Pin`.
+ && sym::as_ref != item_name.name
+ // Either `Pin::as_ref` or `Pin::as_mut`.
+ && let Some(pin_call) = pin_call
+ // Search for `item_name` as a method accessible on `Pin`.
+ && let Ok(pick) = self.lookup_probe_for_diagnostic(
+ item_name,
+ new_rcvr_t,
+ rcvr,
+ ProbeScope::AllTraits,
+ return_type,
+ )
+ // We skip some common traits that we don't want to consider because autoderefs
+ // would take care of them.
+ && !skippable.contains(&Some(pick.item.container_id(self.tcx)))
+ // We don't want to go through derefs.
+ && pick.autoderefs == 0
+ // Check that the method of the same name that was found on the new `Pin`
+ // receiver has the same number of arguments that appear in the user's code.
+ && inputs_len.is_some_and(|inputs_len| pick.item.kind == ty::AssocKind::Fn && self.tcx.fn_sig(pick.item.def_id).skip_binder().skip_binder().inputs().len() == inputs_len)
+ {
+ let indent = self.tcx.sess
+ .source_map()
+ .indentation_before(rcvr.span)
+ .unwrap_or_else(|| " ".to_string());
+ err.multipart_suggestion(
+ "consider pinning the expression",
+ vec![
+ (rcvr.span.shrink_to_lo(), format!("let mut pinned = std::pin::pin!(")),
+ (rcvr.span.shrink_to_hi(), format!(");\n{indent}pinned.{pin_call}()")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ // We don't care about the other suggestions.
+ alt_rcvr_sugg = true;
+ }
}
}
if self.suggest_valid_traits(err, valid_out_of_scope_traits) {
diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
index 8fc236f46b226..3f9c9b3381baa 100644
--- a/compiler/rustc_hir_typeck/src/pat.rs
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -12,7 +12,6 @@ use rustc_hir::pat_util::EnumerateAndAdjustIterator;
use rustc_hir::{HirId, Pat, PatKind};
use rustc_infer::infer;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_middle::middle::stability::EvalResult;
use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeVisitableExt};
use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
use rustc_span::edit_distance::find_best_match_for_name;
@@ -1408,6 +1407,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
adt.variant_descr(),
&inexistent_fields,
&mut unmentioned_fields,
+ pat,
variant,
args,
))
@@ -1434,15 +1434,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let accessible_unmentioned_fields: Vec<_> = unmentioned_fields
.iter()
.copied()
- .filter(|(field, _)| {
- field.vis.is_accessible_from(tcx.parent_module(pat.hir_id), tcx)
- && !matches!(
- tcx.eval_stability(field.did, None, DUMMY_SP, None),
- EvalResult::Deny { .. }
- )
- // We only want to report the error if it is hidden and not local
- && !(tcx.is_doc_hidden(field.did) && !field.did.is_local())
- })
+ .filter(|(field, _)| self.is_field_suggestable(field, pat.hir_id, pat.span))
.collect();
if !has_rest_pat {
@@ -1578,12 +1570,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
kind_name: &str,
inexistent_fields: &[&hir::PatField<'tcx>],
unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>,
+ pat: &'tcx Pat<'tcx>,
variant: &ty::VariantDef,
args: &'tcx ty::List>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let tcx = self.tcx;
- let (field_names, t, plural) = if inexistent_fields.len() == 1 {
- (format!("a field named `{}`", inexistent_fields[0].ident), "this", "")
+ let (field_names, t, plural) = if let [field] = inexistent_fields {
+ (format!("a field named `{}`", field.ident), "this", "")
} else {
(
format!(
@@ -1620,10 +1613,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
),
);
- if unmentioned_fields.len() == 1 {
- let input =
- unmentioned_fields.iter().map(|(_, field)| field.name).collect::>();
- let suggested_name = find_best_match_for_name(&input, pat_field.ident.name, None);
+ if let [(field_def, field)] = unmentioned_fields.as_slice()
+ && self.is_field_suggestable(field_def, pat.hir_id, pat.span)
+ {
+ let suggested_name =
+ find_best_match_for_name(&[field.name], pat_field.ident.name, None);
if let Some(suggested_name) = suggested_name {
err.span_suggestion(
pat_field.ident.span,
@@ -1646,22 +1640,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
PatKind::Lit(expr)
if !self.can_coerce(
self.typeck_results.borrow().expr_ty(expr),
- self.field_ty(
- unmentioned_fields[0].1.span,
- unmentioned_fields[0].0,
- args,
- ),
+ self.field_ty(field.span, field_def, args),
) => {}
_ => {
- let unmentioned_field = unmentioned_fields[0].1.name;
err.span_suggestion_short(
pat_field.ident.span,
format!(
"`{}` has a field named `{}`",
tcx.def_path_str(variant.def_id),
- unmentioned_field
+ field.name,
),
- unmentioned_field.to_string(),
+ field.name,
Applicability::MaybeIncorrect,
);
}
@@ -1871,8 +1860,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fields: &'tcx [hir::PatField<'tcx>],
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let inaccessible = if have_inaccessible_fields { " and inaccessible fields" } else { "" };
- let field_names = if unmentioned_fields.len() == 1 {
- format!("field `{}`{}", unmentioned_fields[0].1, inaccessible)
+ let field_names = if let [(_, field)] = unmentioned_fields {
+ format!("field `{field}`{inaccessible}")
} else {
let fields = unmentioned_fields
.iter()
diff --git a/compiler/rustc_infer/messages.ftl b/compiler/rustc_infer/messages.ftl
index 46558997f7236..b36fb6a4dbac9 100644
--- a/compiler/rustc_infer/messages.ftl
+++ b/compiler/rustc_infer/messages.ftl
@@ -66,7 +66,6 @@ infer_await_both_futures = consider `await`ing on both `Future`s
infer_await_future = consider `await`ing on the `Future`
infer_await_note = calling an async function returns a future
-infer_borrowed_too_long = a value of type `{$ty}` is borrowed for too long
infer_but_calling_introduces = {$has_param_name ->
[true] `{$param_name}`
*[false] `fn` parameter
@@ -181,8 +180,6 @@ infer_more_targeted = {$has_param_name ->
} but calling `{$ident}` introduces an implicit `'static` lifetime requirement
infer_msl_introduces_static = introduces a `'static` lifetime requirement
-infer_msl_trait_note = this has an implicit `'static` lifetime requirement
-infer_msl_trait_sugg = consider relaxing the implicit `'static` requirement
infer_msl_unmet_req = because this has an unmet lifetime requirement
infer_need_type_info_in_generator =
type inside {$generator_kind ->
@@ -233,7 +230,6 @@ infer_prlf_known_limitation = this is a known limitation that will be removed in
infer_prlf_must_outlive_with_sup = ...must outlive the lifetime `{$sup_symbol}` defined here
infer_prlf_must_outlive_without_sup = ...must outlive the lifetime defined here
infer_reborrow = ...so that reference does not outlive borrowed content
-infer_reborrow_upvar = ...so that closure can access `{$name}`
infer_ref_longer_than_data = in type `{$ty}`, reference has a longer lifetime than the data it references
infer_reference_outlives_referent = ...so that the reference type `{$name}` does not outlive the data it points at
diff --git a/compiler/rustc_infer/src/errors/mod.rs b/compiler/rustc_infer/src/errors/mod.rs
index ad4525c922b43..3ff1a5c0c14b0 100644
--- a/compiler/rustc_infer/src/errors/mod.rs
+++ b/compiler/rustc_infer/src/errors/mod.rs
@@ -194,13 +194,13 @@ impl<'a> SourceKindMultiSuggestion<'a> {
data: &'a FnRetTy<'a>,
should_wrap_expr: Option,
) -> Self {
- let (arrow, post) = match data {
- FnRetTy::DefaultReturn(_) => ("-> ", " "),
- _ => ("", ""),
+ let arrow = match data {
+ FnRetTy::DefaultReturn(_) => " -> ",
+ _ => "",
};
let (start_span, start_span_code, end_span) = match should_wrap_expr {
- Some(end_span) => (data.span(), format!("{arrow}{ty_info}{post}{{ "), Some(end_span)),
- None => (data.span(), format!("{arrow}{ty_info}{post}"), None),
+ Some(end_span) => (data.span(), format!("{arrow}{ty_info} {{"), Some(end_span)),
+ None => (data.span(), format!("{arrow}{ty_info}"), None),
};
Self::ClosureReturn { start_span, start_span_code, end_span }
}
diff --git a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
index fefa8959545ab..2d6b88226adb2 100644
--- a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
+++ b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
@@ -44,7 +44,7 @@ pub fn extract_verify_if_eq<'tcx>(
test_ty: Ty<'tcx>,
) -> Option> {
assert!(!verify_if_eq_b.has_escaping_bound_vars());
- let mut m = Match::new(tcx, param_env);
+ let mut m = MatchAgainstHigherRankedOutlives::new(tcx, param_env);
let verify_if_eq = verify_if_eq_b.skip_binder();
m.relate(verify_if_eq.ty, test_ty).ok()?;
@@ -87,24 +87,32 @@ pub(super) fn can_match_erased_ty<'tcx>(
// pointless micro-optimization
true
} else {
- Match::new(tcx, param_env).relate(outlives_ty, erased_ty).is_ok()
+ MatchAgainstHigherRankedOutlives::new(tcx, param_env).relate(outlives_ty, erased_ty).is_ok()
}
}
-struct Match<'tcx> {
+struct MatchAgainstHigherRankedOutlives<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
pattern_depth: ty::DebruijnIndex,
map: FxHashMap>,
}
-impl<'tcx> Match<'tcx> {
- fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Match<'tcx> {
- Match { tcx, param_env, pattern_depth: ty::INNERMOST, map: FxHashMap::default() }
+impl<'tcx> MatchAgainstHigherRankedOutlives<'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> MatchAgainstHigherRankedOutlives<'tcx> {
+ MatchAgainstHigherRankedOutlives {
+ tcx,
+ param_env,
+ pattern_depth: ty::INNERMOST,
+ map: FxHashMap::default(),
+ }
}
}
-impl<'tcx> Match<'tcx> {
+impl<'tcx> MatchAgainstHigherRankedOutlives<'tcx> {
/// Creates the "Error" variant that signals "no match".
fn no_match(&self) -> RelateResult<'tcx, T> {
Err(TypeError::Mismatch)
@@ -134,7 +142,7 @@ impl<'tcx> Match<'tcx> {
}
}
-impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
+impl<'tcx> TypeRelation<'tcx> for MatchAgainstHigherRankedOutlives<'tcx> {
fn tag(&self) -> &'static str {
"Match"
}
diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl
index 7377c6e2f35a2..fa4b8e4c36b84 100644
--- a/compiler/rustc_lint/messages.ftl
+++ b/compiler/rustc_lint/messages.ftl
@@ -5,6 +5,10 @@ lint_array_into_iter =
.use_explicit_into_iter_suggestion =
or use `IntoIterator::into_iter(..)` instead of `.into_iter()` to explicitly iterate by value
+lint_async_fn_in_trait = use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified
+ .note = you can suppress this lint if you plan to use the trait only in your own code, or do not care about auto traits like `Send` on the `Future`
+ .suggestion = you can alternatively desugar to a normal `fn` that returns `impl Future` and add any desired bounds such as `Send`
+
lint_atomic_ordering_fence = memory fences cannot have `Relaxed` ordering
.help = consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`
@@ -319,6 +323,8 @@ lint_invalid_reference_casting_borrow_as_mut = casting `&T` to `&mut T` is undef
lint_invalid_reference_casting_note_book = for more information, visit
+lint_invalid_reference_casting_note_ty_has_interior_mutability = even for types with interior mutability, the only legal way to obtain a mutable pointer from a shared reference is through `UnsafeCell::get`
+
lint_lintpass_by_hand = implementing `LintPass` by hand
.help = try using `declare_lint_pass!` or `impl_lint_pass!` instead
diff --git a/compiler/rustc_lint/src/async_fn_in_trait.rs b/compiler/rustc_lint/src/async_fn_in_trait.rs
new file mode 100644
index 0000000000000..ff4c81e2fc9b1
--- /dev/null
+++ b/compiler/rustc_lint/src/async_fn_in_trait.rs
@@ -0,0 +1,128 @@
+use crate::lints::AsyncFnInTraitDiag;
+use crate::LateContext;
+use crate::LateLintPass;
+use rustc_hir as hir;
+use rustc_trait_selection::traits::error_reporting::suggestions::suggest_desugaring_async_fn_to_impl_future_in_trait;
+
+declare_lint! {
+ /// The `async_fn_in_trait` lint detects use of `async fn` in the
+ /// definition of a publicly-reachable trait.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![feature(async_fn_in_trait)]
+ /// pub trait Trait {
+ /// async fn method(&self);
+ /// }
+ /// # fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// When `async fn` is used in a trait definition, the trait does not
+ /// promise that the opaque [`Future`] returned by the associated function
+ /// or method will implement any [auto traits] such as [`Send`]. This may
+ /// be surprising and may make the associated functions or methods on the
+ /// trait less useful than intended. On traits exposed publicly from a
+ /// crate, this may affect downstream crates whose authors cannot alter
+ /// the trait definition.
+ ///
+ /// For example, this code is invalid:
+ ///
+ /// ```rust,compile_fail
+ /// # #![feature(async_fn_in_trait)]
+ /// pub trait Trait {
+ /// async fn method(&self) {}
+ /// }
+ ///
+ /// fn test(x: T) {
+ /// fn spawn(_: T) {}
+ /// spawn(x.method()); // Not OK.
+ /// }
+ /// ```
+ ///
+ /// This lint exists to warn authors of publicly-reachable traits that
+ /// they may want to consider desugaring the `async fn` to a normal `fn`
+ /// that returns an opaque `impl Future<..> + Send` type.
+ ///
+ /// For example, instead of:
+ ///
+ /// ```rust
+ /// # #![feature(async_fn_in_trait)]
+ /// pub trait Trait {
+ /// async fn method(&self) {}
+ /// }
+ /// ```
+ ///
+ /// The author of the trait may want to write:
+ ///
+ ///
+ /// ```rust
+ /// # #![feature(return_position_impl_trait_in_trait)]
+ /// use core::future::Future;
+ /// pub trait Trait {
+ /// fn method(&self) -> impl Future
| | | |