Skip to content

Don't require allocas for consuming simple enums #138582

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1821,7 +1821,7 @@ pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {

/// Single enum variants, structs/tuples, unions, and all non-ADTs.
Single {
/// Always `0` for types that cannot have multiple variants.
/// Always [`FIRST_VARIANT`] for types that cannot have multiple variants.
index: VariantIdx,
},

Expand Down
83 changes: 33 additions & 50 deletions compiler/rustc_codegen_ssa/src/mir/analyze.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use rustc_index::bit_set::DenseBitSet;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{self, DefLocation, Location, TerminatorKind, traversal};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::{bug, span_bug};
use tracing::debug;

Expand Down Expand Up @@ -99,63 +99,46 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> LocalAnalyzer<'a, 'b, 'tcx, Bx>
context: PlaceContext,
location: Location,
) {
let cx = self.fx.cx;

if let Some((place_base, elem)) = place_ref.last_projection() {
let mut base_context = if context.is_mutating_use() {
PlaceContext::MutatingUse(MutatingUseContext::Projection)
} else {
PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
};

// Allow uses of projections that are ZSTs or from scalar fields.
let is_consume = matches!(
context,
PlaceContext::NonMutatingUse(
NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
)
);
if is_consume {
let base_ty = place_base.ty(self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(base_ty);

// ZSTs don't require any actual memory access.
let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty;
let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
if cx.spanned_layout_of(elem_ty, span).is_zst() {
return;
if !place_ref.projection.is_empty() {
const COPY_CONTEXT: PlaceContext =
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);

// `PlaceElem::Index` is the only variant that can mention other `Local`s,
// so check for those up-front before any potential short-circuits.
for elem in place_ref.projection {
if let mir::PlaceElem::Index(index_local) = *elem {
self.visit_local(index_local, COPY_CONTEXT, location);
}
}

if let mir::ProjectionElem::Field(..) = elem {
let layout = cx.spanned_layout_of(base_ty.ty, span);
if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `not_ssa` on locals.
base_context = context;
}
}
// If our local is already memory, nothing can make it *more* memory
// so we don't need to bother checking the projections further.
if self.locals[place_ref.local] == LocalKind::Memory {
return;
}

if let mir::ProjectionElem::Deref = elem {
// Deref projections typically only read the pointer.
base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
if place_ref.is_indirect_first_projection() {
// If this starts with a `Deref`, we only need to record a read of the
// pointer being dereferenced, as all the subsequent projections are
// working on a place which is always supported. (And because we're
// looking at codegen MIR, it can only happen as the first projection.)
self.visit_local(place_ref.local, COPY_CONTEXT, location);
return;
}

self.process_place(&place_base, base_context, location);
// HACK(eddyb) this emulates the old `visit_projection_elem`, this
// entire `visit_place`-like `process_place` method should be rewritten,
// now that we have moved to the "slice of projections" representation.
if let mir::ProjectionElem::Index(local) = elem {
self.visit_local(
local,
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
location,
);
if context.is_mutating_use() {
// If it's a mutating use it doesn't matter what the projections are,
// if there are *any* then we need a place to write. (For example,
// `_1 = Foo()` works in SSA but `_2.0 = Foo()` does not.)
let mut_projection = PlaceContext::MutatingUse(MutatingUseContext::Projection);
self.visit_local(place_ref.local, mut_projection, location);
return;
}
} else {
self.visit_local(place_ref.local, context, location);
}

// Even with supported projections, we still need to have `visit_local`
// check for things that can't be done in SSA (like `SharedBorrow`).
self.visit_local(place_ref.local, context, location);
}
}

Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_codegen_ssa/src/mir/block.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::cmp;

use rustc_abi::{Align, BackendRepr, ExternAbi, HasDataLayout, Reg, Size, WrappingRange};
use rustc_abi::{Align, BackendRepr, ExternAbi, FieldIdx, HasDataLayout, Reg, Size, WrappingRange};
use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_data_structures::packed::Pu128;
Expand Down Expand Up @@ -1038,7 +1038,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (idx, _) = op.layout.non_1zst_field(bx).expect(
"not exactly one non-1-ZST field in a `DispatchFromDyn` type",
);
op = op.extract_field(self, bx, idx.as_usize());
op = op.extract_field(self, bx, None, idx);
}

// Now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
Expand Down Expand Up @@ -1598,7 +1598,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
// If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() {
let op = tuple.extract_field(self, bx, i);
let op = tuple.extract_field(self, bx, None, FieldIdx::from_usize(i));
self.codegen_argument(bx, op, llargs, &args[i], lifetime_ends_after_call);
}
}
Expand Down
1 change: 1 addition & 0 deletions compiler/rustc_codegen_ssa/src/mir/locals.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use tracing::{debug, warn};
use crate::mir::{FunctionCx, LocalRef};
use crate::traits::BuilderMethods;

#[derive(Debug)]
pub(super) struct Locals<'tcx, V> {
values: IndexVec<mir::Local, LocalRef<'tcx, V>>,
}
Expand Down
1 change: 1 addition & 0 deletions compiler/rustc_codegen_ssa/src/mir/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}

#[derive(Debug)]
enum LocalRef<'tcx, V> {
Place(PlaceRef<'tcx, V>),
/// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
Expand Down
110 changes: 62 additions & 48 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ pub struct OperandRef<'tcx, V> {
pub layout: TyAndLayout<'tcx>,
}

impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
impl<V: fmt::Debug> fmt::Debug for OperandRef<'_, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
}
Expand Down Expand Up @@ -319,16 +319,40 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
OperandRef { val, layout }
}

/// Extracts field `field_idx` from `self`, after downcasting to
/// `downcast_variant` if it's specified.
///
/// Reading from things like tuples or structs, which are always single-variant,
/// don't need to pass a downcast variant since downcasting them to
/// `FIRST_VARIANT` doesn't actually change anything.
/// Things like enums and coroutines, though, must pass the variant from which
/// they want to read unless they're specifically reading the tag field
/// (which is the index at the type level, not inside one of the variants).
pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx,
i: usize,
downcast_variant: Option<VariantIdx>,
field_idx: FieldIdx,
) -> Self {
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
let layout = if let Some(vidx) = downcast_variant {
self.layout.for_variant(bx.cx(), vidx)
} else {
debug_assert!(
match self.layout.variants {
Variants::Empty => true,
Variants::Single { index } => index == FIRST_VARIANT,
Variants::Multiple { tag_field, .. } => tag_field == field_idx,
},
"Should have specified a variant to read field {field_idx:?} of {self:?}",
);
self.layout
};

let field = layout.field(bx.cx(), field_idx.as_usize());
let offset = layout.fields.offset(field_idx.as_usize());

if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) {
if !bx.is_backend_ref(layout) && bx.is_backend_ref(field) {
// Part of https://github.com/rust-lang/compiler-team/issues/838
span_bug!(
fx.mir.span,
Expand All @@ -338,27 +362,35 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {

let val = if field.is_zst() {
OperandValue::ZeroSized
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
} else if let BackendRepr::SimdVector { .. } = layout.backend_repr {
// codegen_transmute_operand doesn't support SIMD, but since the previous
// check handled ZSTs, the only possible field access into something SIMD
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
// just padding, would change the wrapper's representation type.)
assert_eq!(field.size, self.layout.size);
assert_eq!(field.size, layout.size);
self.val
} else if field.size == self.layout.size {
assert_eq!(offset.bytes(), 0);
fx.codegen_transmute_operand(bx, *self, field)
} else if field.size == layout.size {
debug_assert_eq!(offset.bytes(), 0);
if downcast_variant.is_some() || self.layout.ty.is_union() {
fx.codegen_transmute_operand(bx, *self, field)
} else {
self.val
}
} else {
let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
let (in_scalar, imm) = match (self.val, layout.backend_repr) {
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
// This needs to look at `offset`, rather than `i`, because
// for a type like `Option<u32>`, the first thing in the pair
// is the tag, so `(_2 as Some).0` needs to read the *second*
// thing in the pair despite it being "field zero".
if offset == Size::ZERO {
assert_eq!(field.size, a.size(bx.cx()));
(Some(a), a_llval)
(a, a_llval)
} else {
assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
assert_eq!(field.size, b.size(bx.cx()));
(Some(b), b_llval)
(b, b_llval)
}
}

Expand All @@ -367,30 +399,18 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
};
OperandValue::Immediate(match field.backend_repr {
BackendRepr::SimdVector { .. } => imm,
BackendRepr::Scalar(out_scalar) => {
let Some(in_scalar) = in_scalar else {
span_bug!(
fx.mir.span,
"OperandRef::extract_field({:?}): missing input scalar for output scalar",
self
)
};
if in_scalar != out_scalar {
// If the backend and backend_immediate types might differ,
// flip back to the backend type then to the new immediate.
// This avoids nop truncations, but still handles things like
// Bools in union fields needs to be truncated.
let backend = bx.from_immediate(imm);
bx.to_immediate_scalar(backend, out_scalar)
} else {
imm
}
BackendRepr::Scalar(out_scalar) if downcast_variant.is_some() => {
// For a type like `Result<usize, &u32>` the layout is `Pair(i64, ptr)`.
// But if we're reading the `Ok` payload, we need to turn that `ptr`
// back into an integer. To avoid repeating logic we do that by
// calling the transmute code, which is legal thanks to the size
// assert we did when pulling it out of the pair.
transmute_scalar(bx, imm, in_scalar, out_scalar)
}
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => imm,
BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
})
};

OperandRef { val, layout: field }
}

Expand Down Expand Up @@ -438,7 +458,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let tag_op = match self.val {
OperandValue::ZeroSized => bug!(),
OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
self.extract_field(fx, bx, tag_field.as_usize())
self.extract_field(fx, bx, None, tag_field)
}
OperandValue::Ref(place) => {
let tag = place.with_type(self.layout).project_field(bx, tag_field.as_usize());
Expand Down Expand Up @@ -929,34 +949,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> Option<OperandRef<'tcx, Bx::Value>> {
debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);

let mut downcast_variant = None;
match self.locals[place_ref.local] {
LocalRef::Operand(mut o) => {
// Moves out of scalar and scalar pair fields are trivial.
for elem in place_ref.projection.iter() {
match elem {
match *elem {
mir::ProjectionElem::Field(f, _) => {
assert!(
!o.layout.ty.is_any_ptr(),
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {f:?} of pointer {o:?}",
);
o = o.extract_field(self, bx, f.index());
o = o.extract_field(self, bx, downcast_variant, f);
downcast_variant = None;
}
mir::ProjectionElem::Index(_)
| mir::ProjectionElem::ConstantIndex { .. } => {
// ZSTs don't require any actual memory access.
// FIXME(eddyb) deduplicate this with the identical
// checks in `codegen_consume` and `extract_field`.
let elem = o.layout.field(bx.cx(), 0);
if elem.is_zst() {
o = OperandRef::zero_sized(elem);
} else {
return None;
}
mir::ProjectionElem::Downcast(_sym, variant_idx) => {
downcast_variant = Some(variant_idx);
}
_ => return None,
}
}
debug_assert_eq!(downcast_variant, None);

Some(o)
}
Expand Down
10 changes: 9 additions & 1 deletion tests/codegen/array-cmp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ pub fn array_of_tuple_le(a: &[(i16, u16); 2], b: &[(i16, u16); 2]) -> bool {
// CHECK: %[[EQ00:.+]] = icmp eq i16 %[[A00]], %[[B00]]
// CHECK-NEXT: br i1 %[[EQ00]], label %[[L01:.+]], label %[[EXIT_S:.+]]

// CHECK: [[EXIT_S]]:
// CHECK: %[[RET_S:.+]] = icmp sle i16
// CHECK: br label

// CHECK: [[L01]]:
// CHECK: %[[PA01:.+]] = getelementptr{{.+}}i8, ptr %a, {{i32|i64}} 2
// CHECK: %[[PB01:.+]] = getelementptr{{.+}}i8, ptr %b, {{i32|i64}} 2
Expand Down Expand Up @@ -66,8 +70,12 @@ pub fn array_of_tuple_le(a: &[(i16, u16); 2], b: &[(i16, u16); 2]) -> bool {
// CHECK: %[[EQ11:.+]] = icmp eq i16 %[[A11]], %[[B11]]
// CHECK-NEXT: br i1 %[[EQ11]], label %[[DONE:.+]], label %[[EXIT_U]]

// CHECK: [[EXIT_U]]:
// CHECK: %[[RET_U:.+]] = icmp ule i16
// CHECK: br label

// CHECK: [[DONE]]:
// CHECK: %[[RET:.+]] = phi i1 [ %{{.+}}, %[[EXIT_S]] ], [ %{{.+}}, %[[EXIT_U]] ], [ true, %[[L11]] ]
// CHECK: %[[RET:.+]] = phi i1 [ true, %[[L11]] ], [ %[[RET_S]], %[[EXIT_S]] ], [ %[[RET_U]], %[[EXIT_U]] ]
// CHECK: ret i1 %[[RET]]

a <= b
Expand Down
6 changes: 5 additions & 1 deletion tests/codegen/common_prim_int_ptr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,13 @@ pub unsafe fn extract_int(x: Result<usize, Box<()>>) -> usize {
}

// CHECK-LABEL: @extract_box
// CHECK-SAME: (i{{[0-9]+}} {{[^%]+}} [[DISCRIMINANT:%[0-9]+]], ptr {{[^%]+}} [[PAYLOAD:%[0-9]+]])
// CHECK-SAME: (i{{[0-9]+}} {{[^%]+}} [[DISCRIMINANT:%x.0]], ptr {{[^%]+}} [[PAYLOAD:%x.1]])
#[no_mangle]
pub unsafe fn extract_box(x: Result<usize, Box<i32>>) -> Box<i32> {
// CHECK: [[NOT_OK:%.+]] = icmp ne i{{[0-9]+}} [[DISCRIMINANT]], 0
// CHECK: call void @llvm.assume(i1 [[NOT_OK]])
// CHECK: [[NOT_NULL:%.+]] = icmp ne ptr [[PAYLOAD]], null
// CHECK: call void @llvm.assume(i1 [[NOT_NULL]])
// CHECK: ret ptr [[PAYLOAD]]
match x {
Ok(_) => std::intrinsics::unreachable(),
Expand Down
Loading
Loading