diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index f4c46b6ce09e1..c598f99a2b54d 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -384,7 +384,8 @@ for ::mir::interpret::ConstValue<'gcx> { a.hash_stable(hcx, hasher); b.hash_stable(hcx, hasher); } - ByRef(alloc, offset) => { + ByRef(id, alloc, offset) => { + id.hash_stable(hcx, hasher); alloc.hash_stable(hcx, hasher); offset.hash_stable(hcx, hasher); } @@ -446,7 +447,7 @@ impl<'a> HashStable> for mir::interpret::Allocation { } self.undef_mask.hash_stable(hcx, hasher); self.align.hash_stable(hcx, hasher); - self.runtime_mutability.hash_stable(hcx, hasher); + self.mutability.hash_stable(hcx, hasher); } } @@ -516,7 +517,6 @@ for ::mir::interpret::EvalErrorKind<'gcx, O> { InvalidMemoryAccess | InvalidFunctionPointer | InvalidBool | - InvalidDiscriminant | InvalidNullPointerUsage | ReadPointerAsBytes | ReadBytesAsPointer | @@ -549,6 +549,7 @@ for ::mir::interpret::EvalErrorKind<'gcx, O> { GeneratorResumedAfterReturn | GeneratorResumedAfterPanic | InfiniteLoop => {} + InvalidDiscriminant(val) => val.hash_stable(hcx, hasher), Panic { ref msg, ref file, line, col } => { msg.hash_stable(hcx, hasher); file.hash_stable(hcx, hasher); diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index dc6d17d34535b..ab38f8fa72135 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -190,7 +190,7 @@ pub enum EvalErrorKind<'tcx, O> { InvalidMemoryAccess, InvalidFunctionPointer, InvalidBool, - InvalidDiscriminant, + InvalidDiscriminant(u128), PointerOutOfBounds { ptr: Pointer, access: bool, @@ -302,7 +302,7 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { "tried to use a function pointer after offsetting it", InvalidBool => "invalid boolean value read", - InvalidDiscriminant => + InvalidDiscriminant(..) => "invalid enum discriminant value read", PointerOutOfBounds { .. } => "pointer offset outside bounds of allocation", @@ -488,6 +488,8 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()), Panic { ref msg, line, col, ref file } => write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col), + InvalidDiscriminant(val) => + write!(f, "encountered invalid enum discriminant {}", val), _ => write!(f, "{}", self.description()), } } diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 6458c211ab537..147f9ccad7c38 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -393,7 +393,8 @@ impl fmt::Display for AllocId { pub enum AllocType<'tcx, M> { /// The alloc id is used as a function pointer Function(Instance<'tcx>), - /// The alloc id points to a static variable + /// The alloc id points to a "lazy" static variable that did not get computed (yet). + /// This is also used to break the cycle in recursive statics. Static(DefId), /// The alloc id points to memory Memory(M) @@ -496,13 +497,14 @@ pub struct Allocation { pub undef_mask: UndefMask, /// The alignment of the allocation to detect unaligned reads. pub align: Align, - /// Whether the allocation (of a static) should be put into mutable memory when codegenning - /// - /// Only happens for `static mut` or `static` with interior mutability - pub runtime_mutability: Mutability, + /// Whether the allocation is mutable. + /// Also used by codegen to determine if a static should be put into mutable memory, + /// which happens for `static mut` and `static` with interior mutability. + pub mutability: Mutability, } impl Allocation { + /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes(slice: &[u8], align: Align) -> Self { let mut undef_mask = UndefMask::new(Size::ZERO); undef_mask.grow(Size::from_bytes(slice.len() as u64), true); @@ -511,7 +513,7 @@ impl Allocation { relocations: Relocations::new(), undef_mask, align, - runtime_mutability: Mutability::Immutable, + mutability: Mutability::Immutable, } } @@ -526,7 +528,7 @@ impl Allocation { relocations: Relocations::new(), undef_mask: UndefMask::new(size), align, - runtime_mutability: Mutability::Immutable, + mutability: Mutability::Mutable, } } } diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 6b34e3f47cc5d..d793bb1cc63ca 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -14,7 +14,7 @@ use ty::layout::{HasDataLayout, Size}; use ty::subst::Substs; use hir::def_id::DefId; -use super::{EvalResult, Pointer, PointerArithmetic, Allocation}; +use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend}; /// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which /// matches the LocalValue optimizations for easy conversions between Value and ConstValue. @@ -32,8 +32,9 @@ pub enum ConstValue<'tcx> { /// /// The second field may be undef in case of `Option::None` ScalarPair(Scalar, ScalarMaybeUndef), - /// Used only for the remaining cases. An allocation + offset into the allocation - ByRef(&'tcx Allocation, Size), + /// Used only for the remaining cases. An allocation + offset into the allocation. + /// Invariant: The AllocId matches the allocation. + ByRef(AllocId, &'tcx Allocation, Size), } impl<'tcx> ConstValue<'tcx> { @@ -185,6 +186,57 @@ impl<'tcx> Scalar { _ => err!(InvalidBool), } } + + pub fn to_char(self) -> EvalResult<'tcx, char> { + let val = self.to_u32()?; + match ::std::char::from_u32(val) { + Some(c) => Ok(c), + None => err!(InvalidChar(val as u128)), + } + } + + pub fn to_u8(self) -> EvalResult<'static, u8> { + let sz = Size::from_bits(8); + let b = self.to_bits(sz)?; + assert_eq!(b as u8 as u128, b); + Ok(b as u8) + } + + pub fn to_u32(self) -> EvalResult<'static, u32> { + let sz = Size::from_bits(32); + let b = self.to_bits(sz)?; + assert_eq!(b as u32 as u128, b); + Ok(b as u32) + } + + pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> { + let b = self.to_bits(cx.data_layout().pointer_size)?; + assert_eq!(b as u64 as u128, b); + Ok(b as u64) + } + + pub fn to_i8(self) -> EvalResult<'static, i8> { + let sz = Size::from_bits(8); + let b = self.to_bits(sz)?; + let b = sign_extend(b, sz) as i128; + assert_eq!(b as i8 as i128, b); + Ok(b as i8) + } + + pub fn to_i32(self) -> EvalResult<'static, i32> { + let sz = Size::from_bits(32); + let b = self.to_bits(sz)?; + let b = sign_extend(b, sz) as i128; + assert_eq!(b as i32 as i128, b); + Ok(b as i32) + } + + pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> { + let b = self.to_bits(cx.data_layout().pointer_size)?; + let b = sign_extend(b, cx.data_layout().pointer_size) as i128; + assert_eq!(b as i64 as i128, b); + Ok(b as i64) + } } impl From for Scalar { @@ -228,6 +280,7 @@ impl From for ScalarMaybeUndef { } impl<'tcx> ScalarMaybeUndef { + #[inline] pub fn not_undef(self) -> EvalResult<'static, Scalar> { match self { ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), @@ -235,15 +288,53 @@ impl<'tcx> ScalarMaybeUndef { } } + #[inline(always)] pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { self.not_undef()?.to_ptr() } + #[inline(always)] pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { self.not_undef()?.to_bits(target_size) } + #[inline(always)] pub fn to_bool(self) -> EvalResult<'tcx, bool> { self.not_undef()?.to_bool() } + + #[inline(always)] + pub fn to_char(self) -> EvalResult<'tcx, char> { + self.not_undef()?.to_char() + } + + #[inline(always)] + pub fn to_u8(self) -> EvalResult<'tcx, u8> { + self.not_undef()?.to_u8() + } + + #[inline(always)] + pub fn to_u32(self) -> EvalResult<'tcx, u32> { + self.not_undef()?.to_u32() + } + + #[inline(always)] + pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_usize(cx) + } + + #[inline(always)] + pub fn to_i8(self) -> EvalResult<'tcx, i8> { + self.not_undef()?.to_i8() + } + + #[inline(always)] + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.not_undef()?.to_i32() + } + + #[inline(always)] + pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_isize(cx) + } } diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 424139e752736..b10e9f1415869 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1043,13 +1043,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } let interned = self.global_arenas.const_allocs.alloc(alloc); - if let Some(prev) = allocs.replace(interned) { + if let Some(prev) = allocs.replace(interned) { // insert into interner bug!("Tried to overwrite interned Allocation: {:#?}", prev) } interned } - /// Allocates a byte or string literal for `mir::interpret` + /// Allocates a byte or string literal for `mir::interpret`, read-only pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId { // create an allocation that just contains these bytes let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes); diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index e6c10358279b3..1f3c314050435 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -498,7 +498,7 @@ impl<'a, 'tcx, O: Lift<'tcx>> Lift<'tcx> for interpret::EvalErrorKind<'a, O> { InvalidMemoryAccess => InvalidMemoryAccess, InvalidFunctionPointer => InvalidFunctionPointer, InvalidBool => InvalidBool, - InvalidDiscriminant => InvalidDiscriminant, + InvalidDiscriminant(val) => InvalidDiscriminant(val), PointerOutOfBounds { ptr, access, @@ -1139,7 +1139,7 @@ impl<'tcx> TypeFoldable<'tcx> for ConstValue<'tcx> { match *self { ConstValue::Scalar(v) => ConstValue::Scalar(v), ConstValue::ScalarPair(a, b) => ConstValue::ScalarPair(a, b), - ConstValue::ByRef(alloc, offset) => ConstValue::ByRef(alloc, offset), + ConstValue::ByRef(id, alloc, offset) => ConstValue::ByRef(id, alloc, offset), ConstValue::Unevaluated(def_id, substs) => { ConstValue::Unevaluated(def_id, substs.fold_with(folder)) } @@ -1150,7 +1150,7 @@ impl<'tcx> TypeFoldable<'tcx> for ConstValue<'tcx> { match *self { ConstValue::Scalar(_) | ConstValue::ScalarPair(_, _) | - ConstValue::ByRef(_, _) => false, + ConstValue::ByRef(_, _, _) => false, ConstValue::Unevaluated(_, substs) => substs.visit_with(visitor), } } diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 2657543b2d167..b6c9658dd6fc3 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -57,7 +57,7 @@ pub fn scalar_to_llvm( let base_addr = match alloc_type { Some(AllocType::Memory(alloc)) => { let init = const_alloc_to_llvm(cx, alloc); - if alloc.runtime_mutability == Mutability::Mutable { + if alloc.mutability == Mutability::Mutable { consts::addr_of_mut(cx, init, alloc.align, None) } else { consts::addr_of(cx, init, alloc.align, None) @@ -134,7 +134,7 @@ pub fn codegen_static_initializer( let static_ = cx.tcx.const_eval(param_env.and(cid))?; let alloc = match static_.val { - ConstValue::ByRef(alloc, n) if n.bytes() == 0 => alloc, + ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, _ => bug!("static const eval returned {:#?}", static_), }; Ok((const_alloc_to_llvm(cx, alloc), alloc)) diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 9537379813d52..419e7298588c5 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -126,7 +126,7 @@ impl OperandRef<'ll, 'tcx> { }; OperandValue::Pair(a_llval, b_llval) }, - ConstValue::ByRef(alloc, offset) => { + ConstValue::ByRef(_, alloc, offset) => { return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); }, }; diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs index ce3292eaa426d..833dca8c75fd5 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -458,7 +458,7 @@ impl FunctionCx<'a, 'll, 'tcx> { let layout = cx.layout_of(self.monomorphize(&ty)); match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { - mir::interpret::ConstValue::ByRef(alloc, offset) => { + mir::interpret::ConstValue::ByRef(_, alloc, offset) => { PlaceRef::from_const_alloc(bx, layout, alloc, offset) } _ => bug!("promoteds should have an allocation: {:?}", val), diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index d84972b32c108..c497627415f1a 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -1612,21 +1612,15 @@ fn validate_const<'a, 'tcx>( gid: ::rustc::mir::interpret::GlobalId<'tcx>, what: &str, ) { - let mut ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap(); + let ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap(); let result = (|| { - use rustc_target::abi::LayoutOf; - use rustc_mir::interpret::OpTy; - - let op = ecx.const_value_to_op(constant.val)?; - let layout = ecx.layout_of(constant.ty)?; - let place = ecx.allocate_op(OpTy { op, layout })?.into(); - - let mut todo = vec![(place, Vec::new())]; + let op = ecx.const_to_op(constant)?; + let mut todo = vec![(op, Vec::new())]; let mut seen = FxHashSet(); - seen.insert(place); - while let Some((place, mut path)) = todo.pop() { - ecx.validate_mplace( - place, + seen.insert(op); + while let Some((op, mut path)) = todo.pop() { + ecx.validate_operand( + op, &mut path, &mut seen, &mut todo, diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/const_eval.rs similarity index 54% rename from src/librustc_mir/interpret/const_eval.rs rename to src/librustc_mir/const_eval.rs index 8e77af7526e35..295fca839c812 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -8,29 +8,29 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// Not in interpret to make sure we do not use private implementation details + use std::fmt; use std::error::Error; -use rustc::hir; +use rustc::hir::{self, def_id::DefId}; use rustc::mir::interpret::ConstEvalErr; use rustc::mir; -use rustc::ty::{self, TyCtxt, Instance}; -use rustc::ty::layout::{LayoutOf, Primitive, TyLayout, Size}; +use rustc::ty::{self, TyCtxt, Instance, query::TyCtxtAt}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::ty::subst::Subst; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use syntax::ast::Mutability; use syntax::source_map::Span; -use syntax::source_map::DUMMY_SP; -use syntax::symbol::Symbol; use rustc::mir::interpret::{ EvalResult, EvalError, EvalErrorKind, GlobalId, - Scalar, AllocId, Allocation, ConstValue, + Scalar, Allocation, ConstValue, }; -use super::{ - Place, PlaceExtra, PlaceTy, MemPlace, OpTy, Operand, Value, - EvalContext, StackPopCleanup, Memory, MemoryKind, MPlaceTy, +use interpret::{self, + Place, PlaceTy, MemPlace, OpTy, Operand, Value, + EvalContext, StackPopCleanup, MemoryKind, }; pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( @@ -43,14 +43,14 @@ pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( let param_env = tcx.param_env(instance.def_id()); let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ()); // insert a stack frame so any queries have the correct substs - ecx.stack.push(super::eval_context::Frame { + ecx.stack.push(interpret::Frame { block: mir::START_BLOCK, locals: IndexVec::new(), instance, span, mir, return_place: Place::null(tcx), - return_to_block: StackPopCleanup::None, + return_to_block: StackPopCleanup::Goto(None), // never pop stmt: 0, }); Ok(ecx) @@ -71,7 +71,7 @@ pub fn mk_eval_cx<'a, 'tcx>( mir.span, mir, Place::null(tcx), - StackPopCleanup::None, + StackPopCleanup::Goto(None), // never pop )?; Ok(ecx) } @@ -103,15 +103,17 @@ pub fn op_to_const<'tcx>( let val = match normalized_op { Err(MemPlace { ptr, align, extra }) => { // extract alloc-offset pair - assert_eq!(extra, PlaceExtra::None); + assert!(extra.is_none()); let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); let mut alloc = alloc.clone(); alloc.align = align; + // FIXME shouldnt it be the case that `mark_static_initialized` has already + // interned this? I thought that is the entire point of that `FinishStatic` stuff? let alloc = ecx.tcx.intern_const_alloc(alloc); - ConstValue::ByRef(alloc, ptr.offset) + ConstValue::ByRef(ptr.alloc_id, alloc, ptr.offset) }, Ok(Value::Scalar(x)) => ConstValue::Scalar(x.not_undef()?), @@ -120,13 +122,6 @@ pub fn op_to_const<'tcx>( }; Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty)) } -pub fn const_to_op<'tcx>( - ecx: &mut EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>, - cnst: &'tcx ty::Const<'tcx>, -) -> EvalResult<'tcx, OpTy<'tcx>> { - let op = ecx.const_value_to_op(cnst.val)?; - Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? }) -} fn eval_body_and_ecx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -134,7 +129,6 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, ) -> (EvalResult<'tcx, OpTy<'tcx>>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { - debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env); // we start out with the best span we have // and try improving it down the road when more information is available let span = tcx.def_span(cid.instance.def_id()); @@ -151,7 +145,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, ) -> EvalResult<'tcx, OpTy<'tcx>> { - debug!("eval_body: {:?}, {:?}", cid, param_env); + debug!("eval_body_using_ecx: {:?}, {:?}", cid, param_env); let tcx = ecx.tcx.tcx; let mut mir = match mir { Some(mir) => mir, @@ -163,29 +157,33 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); let ret = ecx.allocate(layout, MemoryKind::Stack)?; - let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span); - let is_static = tcx.is_static(cid.instance.def_id()); - let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable { - Mutability::Mutable - } else { - Mutability::Immutable - }; - let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(cid.instance.def_id())); let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p)); - trace!("const_eval: pushing stack frame for global: {}{}", name, prom); + trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom); assert!(mir.arg_count == 0); ecx.push_stack_frame( cid.instance, mir.span, mir, Place::Ptr(*ret), - cleanup, + StackPopCleanup::None { cleanup: false }, )?; // The main interpreter loop. - while ecx.step()? {} + ecx.run()?; + // Intern the result + let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span); + let is_static = tcx.is_static(cid.instance.def_id()); + let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + ecx.memory.intern_static(ret.ptr.to_ptr()?.alloc_id, mutability)?; + + debug!("eval_body_using_ecx done: {:?}", *ret); Ok(ret.into()) } @@ -234,72 +232,33 @@ impl Error for ConstEvalError { } } -impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { +impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeEvaluator { type MemoryData = (); type MemoryKinds = !; - fn eval_fn_call<'a>( + + const MUT_STATIC_KIND: Option = None; // no mutating of statics allowed + + fn find_fn<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, args: &[OpTy<'tcx>], - span: Span, - ) -> EvalResult<'tcx, bool> { + dest: Option>, + ret: Option, + ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> { debug!("eval_fn_call: {:?}", instance); if !ecx.tcx.is_const_fn(instance.def_id()) { - let def_id = instance.def_id(); - // Some fn calls are actually BinOp intrinsics - let _: ! = if let Some((op, oflo)) = ecx.tcx.is_binop_lang_item(def_id) { - let (dest, bb) = destination.expect("128 lowerings can't diverge"); - let l = ecx.read_value(args[0])?; - let r = ecx.read_value(args[1])?; - if oflo { - ecx.binop_with_overflow(op, l, r, dest)?; - } else { - ecx.binop_ignore_overflow(op, l, r, dest)?; - } - ecx.goto_block(bb); - return Ok(true); - } else if Some(def_id) == ecx.tcx.lang_items().panic_fn() { - assert!(args.len() == 1); - // &(&'static str, &'static str, u32, u32) - let ptr = ecx.read_value(args[0])?; - let place = ecx.ref_to_mplace(ptr)?; - let (msg, file, line, col) = ( - place_field(ecx, 0, place)?, - place_field(ecx, 1, place)?, - place_field(ecx, 2, place)?, - place_field(ecx, 3, place)?, - ); - - let msg = to_str(ecx, msg)?; - let file = to_str(ecx, file)?; - let line = to_u32(line)?; - let col = to_u32(col)?; - return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); - } else if Some(def_id) == ecx.tcx.lang_items().begin_panic_fn() { - assert!(args.len() == 2); - // &'static str, &(&'static str, u32, u32) - let msg = ecx.read_value(args[0])?; - let ptr = ecx.read_value(args[1])?; - let place = ecx.ref_to_mplace(ptr)?; - let (file, line, col) = ( - place_field(ecx, 0, place)?, - place_field(ecx, 1, place)?, - place_field(ecx, 2, place)?, - ); - - let msg = to_str(ecx, msg.value)?; - let file = to_str(ecx, file)?; - let line = to_u32(line)?; - let col = to_u32(col)?; - return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); - } else { - return Err( - ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), - ); - }; + // Some functions we support even if they are non-const -- but avoid testing + // that for const fn! + if ecx.hook_fn(instance, args, dest)? { + ecx.goto_block(ret)?; // fully evaluated and done + return Ok(None); + } + return Err( + ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), + ); } - let mir = match ecx.load_mir(instance.def) { + // This is a const fn. Call it. + Ok(Some(match ecx.load_mir(instance.def) { Ok(mir) => mir, Err(err) => { if let EvalErrorKind::NoMirFor(ref path) = err.kind { @@ -310,94 +269,23 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } return Err(err); } - }; - let (return_place, return_to_block) = match destination { - Some((place, block)) => (*place, StackPopCleanup::Goto(block)), - None => (Place::null(&ecx), StackPopCleanup::None), - }; - - ecx.push_stack_frame( - instance, - span, - mir, - return_place, - return_to_block, - )?; - - Ok(false) + })) } - fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: PlaceTy<'tcx>, - target: mir::BasicBlock, ) -> EvalResult<'tcx> { - let substs = instance.substs; - - let intrinsic_name = &ecx.tcx.item_name(instance.def_id()).as_str()[..]; - match intrinsic_name { - "min_align_of" => { - let elem_ty = substs.type_at(0); - let elem_align = ecx.layout_of(elem_ty)?.align.abi(); - let align_val = Scalar::Bits { - bits: elem_align as u128, - size: dest.layout.size.bytes() as u8, - }; - ecx.write_scalar(align_val, dest)?; - } - - "size_of" => { - let ty = substs.type_at(0); - let size = ecx.layout_of(ty)?.size.bytes() as u128; - let size_val = Scalar::Bits { - bits: size, - size: dest.layout.size.bytes() as u8, - }; - ecx.write_scalar(size_val, dest)?; - } - - "type_id" => { - let ty = substs.type_at(0); - let type_id = ecx.tcx.type_id_hash(ty) as u128; - let id_val = Scalar::Bits { - bits: type_id, - size: dest.layout.size.bytes() as u8, - }; - ecx.write_scalar(id_val, dest)?; - } - "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { - let ty = substs.type_at(0); - let layout_of = ecx.layout_of(ty)?; - let bits = ecx.read_scalar(args[0])?.to_bits(layout_of.size)?; - let kind = match layout_of.abi { - ty::layout::Abi::Scalar(ref scalar) => scalar.value, - _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?, - }; - let out_val = if intrinsic_name.ends_with("_nonzero") { - if bits == 0 { - return err!(Intrinsic(format!("{} called on 0", intrinsic_name))); - } - numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), bits, kind)? - } else { - numeric_intrinsic(intrinsic_name, bits, kind)? - }; - ecx.write_scalar(out_val, dest)?; - } - - name => return Err( - ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into() - ), + if ecx.emulate_intrinsic(instance, args, dest)? { + return Ok(()); } - - ecx.goto_block(target); - - // Since we pushed no stack frame, the main loop will act - // as if the call just completed and it's returning to the - // current frame. - Ok(()) + // An intrinsic that we do not support + let intrinsic_name = &ecx.tcx.item_name(instance.def_id()).as_str()[..]; + Err( + ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", intrinsic_name)).into() + ) } fn try_ptr_op<'a>( @@ -417,23 +305,11 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } } - fn mark_static_initialized<'a>( - _mem: &mut Memory<'a, 'mir, 'tcx, Self>, - _id: AllocId, - _mutability: Mutability, - ) -> EvalResult<'tcx, bool> { - Ok(false) - } - - fn init_static<'a>( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - cid: GlobalId<'tcx>, - ) -> EvalResult<'tcx, AllocId> { - Ok(ecx - .tcx - .alloc_map - .lock() - .intern_static(cid.instance.def_id())) + fn find_foreign_static<'a>( + _tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + _def_id: DefId, + ) -> EvalResult<'tcx, &'tcx Allocation> { + err!(ReadForeignStatic) } fn box_alloc<'a>( @@ -444,50 +320,6 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(), ) } - - fn global_item_with_linkage<'a>( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _instance: ty::Instance<'tcx>, - _mutability: Mutability, - ) -> EvalResult<'tcx> { - Err( - ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(), - ) - } -} - -fn place_field<'a, 'tcx, 'mir>( - ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>, - i: u64, - place: MPlaceTy<'tcx>, -) -> EvalResult<'tcx, Value> { - let place = ecx.mplace_field(place, i)?; - Ok(ecx.try_read_value_from_mplace(place)?.expect("bad panic arg layout")) -} - -fn to_str<'a, 'tcx, 'mir>( - ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>, - val: Value, -) -> EvalResult<'tcx, Symbol> { - if let Value::ScalarPair(ptr, len) = val { - let len = len.not_undef()?.to_bits(ecx.memory.pointer_size())?; - let bytes = ecx.memory.read_bytes(ptr.not_undef()?, Size::from_bytes(len as u64))?; - let str = ::std::str::from_utf8(bytes) - .map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?; - Ok(Symbol::intern(str)) - } else { - bug!("panic arg is not a str") - } -} - -fn to_u32<'a, 'tcx, 'mir>( - val: Value, -) -> EvalResult<'tcx, u32> { - if let Value::Scalar(n) = val { - Ok(n.not_undef()?.to_bits(Size::from_bits(32))? as u32) - } else { - bug!("panic arg is not a str") - } } /// Project to a field of a (variant of a) const @@ -500,10 +332,10 @@ pub fn const_field<'a, 'tcx>( value: &'tcx ty::Const<'tcx>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { trace!("const_field: {:?}, {:?}, {:?}", instance, field, value); - let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); + let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let result = (|| { // get the operand again - let op = const_to_op(&mut ecx, value)?; + let op = ecx.const_to_op(value)?; // downcast let down = match variant { None => op, @@ -532,36 +364,25 @@ pub fn const_variant_index<'a, 'tcx>( val: &'tcx ty::Const<'tcx>, ) -> EvalResult<'tcx, usize> { trace!("const_variant_index: {:?}, {:?}", instance, val); - let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let op = const_to_op(&mut ecx, val)?; - ecx.read_discriminant_as_variant_index(op) + let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); + let op = ecx.const_to_op(val)?; + Ok(ecx.read_discriminant(op)?.1) } pub fn const_to_allocation_provider<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, + _tcx: TyCtxt<'a, 'tcx, 'tcx>, val: &'tcx ty::Const<'tcx>, ) -> &'tcx Allocation { + // FIXME: This really does not need to be a query. Instead, we should have a query for statics + // that returns an allocation directly (or an `AllocId`?), after doing a sanity check of the + // value and centralizing error reporting. match val.val { - ConstValue::ByRef(alloc, offset) => { + ConstValue::ByRef(_, alloc, offset) => { assert_eq!(offset.bytes(), 0); return alloc; }, - _ => () + _ => bug!("const_to_allocation called on non-static"), } - let result = || -> EvalResult<'tcx, &'tcx Allocation> { - let mut ecx = EvalContext::new( - tcx.at(DUMMY_SP), - ty::ParamEnv::reveal_all(), - CompileTimeEvaluator, - ()); - let op = const_to_op(&mut ecx, val)?; - // Make a new allocation, copy things there - let ptr = ecx.allocate(op.layout, MemoryKind::Stack)?; - ecx.copy_op(op, ptr.into())?; - let alloc = ecx.memory.get(ptr.to_ptr()?.alloc_id)?; - Ok(tcx.intern_const_alloc(alloc.clone())) - }; - result().expect("unable to convert ConstValue to Allocation") } pub fn const_eval_provider<'a, 'tcx>( @@ -626,23 +447,3 @@ pub fn const_eval_provider<'a, 'tcx>( err.into() }) } - -fn numeric_intrinsic<'tcx>( - name: &str, - bits: u128, - kind: Primitive, -) -> EvalResult<'tcx, Scalar> { - let size = match kind { - Primitive::Int(integer, _) => integer.size(), - _ => bug!("invalid `{}` argument: {:?}", name, bits), - }; - let extra = 128 - size.bits() as u128; - let bits_out = match name { - "ctpop" => bits.count_ones() as u128, - "ctlz" => bits.leading_zeros() as u128 - extra, - "cttz" => (bits << extra).trailing_zeros() as u128 - extra, - "bswap" => (bits << extra).swap_bytes(), - _ => bug!("not a numeric intrinsic: {}", name), - }; - Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 }) -} diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 4522f477959b3..d0d1c5d6610d0 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -32,7 +32,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - crate fn cast( + pub fn cast( &mut self, src: OpTy<'tcx>, kind: CastKind, diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index fa70a1500d060..455c3fc281a6b 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -32,10 +32,9 @@ use rustc::mir::interpret::{ }; use syntax::source_map::{self, Span}; -use syntax::ast::Mutability; use super::{ - Value, Operand, MemPlace, MPlaceTy, Place, PlaceExtra, + Value, Operand, MemPlace, MPlaceTy, Place, Memory, Machine }; @@ -56,15 +55,15 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub(crate) stack: Vec>, /// The maximum number of stack frames allowed - pub(crate) stack_limit: usize, + pub(super) stack_limit: usize, /// When this value is negative, it indicates the number of interpreter /// steps *until* the loop detector is enabled. When it is positive, it is /// the number of steps after the detector has been enabled modulo the loop /// detector period. - pub(crate) steps_since_detector_enabled: isize, + pub(super) steps_since_detector_enabled: isize, - pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, + pub(super) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, } /// A stack frame. @@ -85,7 +84,7 @@ pub struct Frame<'mir, 'tcx: 'mir> { //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// - /// The block to return to when returning from the current stack frame + /// Work to perform when returning from this function pub return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to. @@ -157,6 +156,18 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { } } +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum StackPopCleanup { + /// Jump to the next block in the caller, or cause UB if None (that's a function + /// that may never return). + Goto(Option), + /// Just do nohing: Used by Main and for the box_alloc hook in miri. + /// `cleanup` says whether locals are deallocated. Static computation + /// wants them leaked to intern what they need (and just throw away + /// the entire `ecx` when it is done). + None { cleanup: bool }, +} + // State of a local variable #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub enum LocalValue { @@ -188,7 +199,7 @@ impl<'tcx> LocalValue { type EvalSnapshot<'a, 'mir, 'tcx, M> = (M, Vec>, Memory<'a, 'mir, 'tcx, M>); -pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { +pub(super) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The set of all `EvalSnapshot` *hashes* observed by this detector. /// /// When a collision occurs in this table, we store the full snapshot in @@ -251,20 +262,6 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> } } -#[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub enum StackPopCleanup { - /// The stackframe existed to compute the initial value of a static/constant, make sure it - /// isn't modifyable afterwards in case of constants. - /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through - /// references or deallocated - MarkStatic(Mutability), - /// A regular stackframe added due to a function call will need to get forwarded to the next - /// block - Goto(mir::BasicBlock), - /// The main function and diverging functions have nowhere to return to - None, -} - impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { @@ -388,7 +385,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_bytes(s.as_bytes()); + let ptr = self.memory.allocate_static_bytes(s.as_bytes()); Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) } @@ -465,90 +462,94 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } /// Return the actual dynamic size and alignment of the place at the given type. - /// Note that the value does not matter if the type is sized. For unsized types, - /// the value has to be a fat pointer, and we only care about the "extra" data in it. - pub fn size_and_align_of_mplace( + /// Only the "extra" (metadata) part of the place matters. + pub(super) fn size_and_align_of( &self, - mplace: MPlaceTy<'tcx>, + metadata: Option, + layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, (Size, Align)> { - if let PlaceExtra::None = mplace.extra { - assert!(!mplace.layout.is_unsized()); - Ok(mplace.layout.size_and_align()) - } else { - let layout = mplace.layout; - assert!(layout.is_unsized()); - match layout.ty.sty { - ty::Adt(..) | ty::Tuple(..) => { - // First get the size of all statically known fields. - // Don't use type_of::sizing_type_of because that expects t to be sized, - // and it also rounds up to alignment, which we want to avoid, - // as the unsized field's alignment could be smaller. - assert!(!layout.ty.is_simd()); - debug!("DST layout: {:?}", layout); - - let sized_size = layout.fields.offset(layout.fields.count() - 1); - let sized_align = layout.align; - debug!( - "DST {} statically sized prefix size: {:?} align: {:?}", - layout.ty, - sized_size, - sized_align - ); - - // Recurse to get the size of the dynamically sized field (must be - // the last field). - let field = self.mplace_field(mplace, layout.fields.count() as u64 - 1)?; - let (unsized_size, unsized_align) = self.size_and_align_of_mplace(field)?; - - // FIXME (#26403, #27023): We should be adding padding - // to `sized_size` (to accommodate the `unsized_align` - // required of the unsized field that follows) before - // summing it with `sized_size`. (Note that since #26403 - // is unfixed, we do not yet add the necessary padding - // here. But this is where the add would go.) - - // Return the sum of sizes and max of aligns. - let size = sized_size + unsized_size; - - // Choose max of two known alignments (combined value must - // be aligned according to more restrictive of the two). - let align = sized_align.max(unsized_align); - - // Issue #27023: must add any necessary padding to `size` - // (to make it a multiple of `align`) before returning it. - // - // Namely, the returned size should be, in C notation: - // - // `size + ((size & (align-1)) ? align : 0)` - // - // emulated via the semi-standard fast bit trick: - // - // `(size + (align-1)) & -align` - - Ok((size.abi_align(align), align)) - } - ty::Dynamic(..) => { - let vtable = match mplace.extra { - PlaceExtra::Vtable(vtable) => vtable, - _ => bug!("Expected vtable"), - }; - // the second entry in the vtable is the dynamic size of the object. - self.read_size_and_align_from_vtable(vtable) - } - - ty::Slice(_) | ty::Str => { - let len = match mplace.extra { - PlaceExtra::Length(len) => len, - _ => bug!("Expected length"), - }; - let (elem_size, align) = layout.field(self, 0)?.size_and_align(); - Ok((elem_size * len, align)) - } + let metadata = match metadata { + None => { + assert!(!layout.is_unsized()); + return Ok(layout.size_and_align()) + } + Some(metadata) => { + assert!(layout.is_unsized()); + metadata + } + }; + match layout.ty.sty { + ty::Adt(..) | ty::Tuple(..) => { + // First get the size of all statically known fields. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. + assert!(!layout.ty.is_simd()); + debug!("DST layout: {:?}", layout); + + let sized_size = layout.fields.offset(layout.fields.count() - 1); + let sized_align = layout.align; + debug!( + "DST {} statically sized prefix size: {:?} align: {:?}", + layout.ty, + sized_size, + sized_align + ); + + // Recurse to get the size of the dynamically sized field (must be + // the last field). + let field = layout.field(self, layout.fields.count() - 1)?; + let (unsized_size, unsized_align) = self.size_and_align_of(Some(metadata), field)?; + + // FIXME (#26403, #27023): We should be adding padding + // to `sized_size` (to accommodate the `unsized_align` + // required of the unsized field that follows) before + // summing it with `sized_size`. (Note that since #26403 + // is unfixed, we do not yet add the necessary padding + // here. But this is where the add would go.) + + // Return the sum of sizes and max of aligns. + let size = sized_size + unsized_size; + + // Choose max of two known alignments (combined value must + // be aligned according to more restrictive of the two). + let align = sized_align.max(unsized_align); + + // Issue #27023: must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + + Ok((size.abi_align(align), align)) + } + ty::Dynamic(..) => { + let vtable = metadata.to_ptr()?; + // the second entry in the vtable is the dynamic size of the object. + self.read_size_and_align_from_vtable(vtable) + } - _ => bug!("size_of_val::<{:?}> not supported", layout.ty), + ty::Slice(_) | ty::Str => { + let len = metadata.to_usize(self)?; + let (elem_size, align) = layout.field(self, 0)?.size_and_align(); + Ok((elem_size * len, align)) } + + _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty), } } + #[inline] + pub fn size_and_align_of_mplace( + &self, + mplace: MPlaceTy<'tcx> + ) -> EvalResult<'tcx, (Size, Align)> { + self.size_and_align_of(mplace.extra, mplace.layout) + } pub fn push_stack_frame( &mut self, @@ -628,25 +629,19 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { ::log_settings::settings().indentation -= 1; - M::end_region(self, None)?; let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", ); match frame.return_to_block { - StackPopCleanup::MarkStatic(mutable) => { - if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place { - // FIXME: to_ptr()? might be too extreme here, - // static zsts might reach this under certain conditions - self.memory.mark_static_initialized( - ptr.to_ptr()?.alloc_id, - mutable, - )? - } else { - bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_place); + StackPopCleanup::Goto(block) => { + self.goto_block(block)?; + } + StackPopCleanup::None { cleanup } => { + if !cleanup { + // Leak the locals + return Ok(()); } } - StackPopCleanup::Goto(target) => self.goto_block(target), - StackPopCleanup::None => {} } // deallocate all locals that are backed by an allocation for local in frame.locals { @@ -656,7 +651,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(()) } - crate fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { + pub(super) fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { // FIXME: should we tell the user that there was a local which was never written to? if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { trace!("deallocating local"); diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs new file mode 100644 index 0000000000000..35e3253ca7f8e --- /dev/null +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -0,0 +1,175 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Intrinsics and other functions that the miri engine executes without +//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE +//! and miri. + +use syntax::symbol::Symbol; +use rustc::ty; +use rustc::ty::layout::{LayoutOf, Primitive}; +use rustc::mir::interpret::{ + EvalResult, EvalErrorKind, Scalar, +}; + +use super::{ + Machine, PlaceTy, OpTy, EvalContext, +}; + + +fn numeric_intrinsic<'tcx>( + name: &str, + bits: u128, + kind: Primitive, +) -> EvalResult<'tcx, Scalar> { + let size = match kind { + Primitive::Int(integer, _) => integer.size(), + _ => bug!("invalid `{}` argument: {:?}", name, bits), + }; + let extra = 128 - size.bits() as u128; + let bits_out = match name { + "ctpop" => bits.count_ones() as u128, + "ctlz" => bits.leading_zeros() as u128 - extra, + "cttz" => (bits << extra).trailing_zeros() as u128 - extra, + "bswap" => (bits << extra).swap_bytes(), + _ => bug!("not a numeric intrinsic: {}", name), + }; + Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 }) +} + +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + /// Returns whether emulation happened. + pub fn emulate_intrinsic( + &mut self, + instance: ty::Instance<'tcx>, + args: &[OpTy<'tcx>], + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx, bool> { + let substs = instance.substs; + + let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..]; + match intrinsic_name { + "min_align_of" => { + let elem_ty = substs.type_at(0); + let elem_align = self.layout_of(elem_ty)?.align.abi(); + let align_val = Scalar::Bits { + bits: elem_align as u128, + size: dest.layout.size.bytes() as u8, + }; + self.write_scalar(align_val, dest)?; + } + + "size_of" => { + let ty = substs.type_at(0); + let size = self.layout_of(ty)?.size.bytes() as u128; + let size_val = Scalar::Bits { + bits: size, + size: dest.layout.size.bytes() as u8, + }; + self.write_scalar(size_val, dest)?; + } + + "type_id" => { + let ty = substs.type_at(0); + let type_id = self.tcx.type_id_hash(ty) as u128; + let id_val = Scalar::Bits { + bits: type_id, + size: dest.layout.size.bytes() as u8, + }; + self.write_scalar(id_val, dest)?; + } + "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { + let ty = substs.type_at(0); + let layout_of = self.layout_of(ty)?; + let bits = self.read_scalar(args[0])?.to_bits(layout_of.size)?; + let kind = match layout_of.abi { + ty::layout::Abi::Scalar(ref scalar) => scalar.value, + _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?, + }; + let out_val = if intrinsic_name.ends_with("_nonzero") { + if bits == 0 { + return err!(Intrinsic(format!("{} called on 0", intrinsic_name))); + } + numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), bits, kind)? + } else { + numeric_intrinsic(intrinsic_name, bits, kind)? + }; + self.write_scalar(out_val, dest)?; + } + + _ => return Ok(false), + } + + Ok(true) + } + + /// "Intercept" a function call because we have something special to do for it. + /// Returns whether an intercept happened. + pub fn hook_fn( + &mut self, + instance: ty::Instance<'tcx>, + args: &[OpTy<'tcx>], + dest: Option>, + ) -> EvalResult<'tcx, bool> { + let def_id = instance.def_id(); + // Some fn calls are actually BinOp intrinsics + if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) { + let dest = dest.expect("128 lowerings can't diverge"); + let l = self.read_value(args[0])?; + let r = self.read_value(args[1])?; + if oflo { + self.binop_with_overflow(op, l, r, dest)?; + } else { + self.binop_ignore_overflow(op, l, r, dest)?; + } + return Ok(true); + } else if Some(def_id) == self.tcx.lang_items().panic_fn() { + assert!(args.len() == 1); + // &(&'static str, &'static str, u32, u32) + let ptr = self.read_value(args[0])?; + let place = self.ref_to_mplace(ptr)?; + let (msg, file, line, col) = ( + self.mplace_field(place, 0)?, + self.mplace_field(place, 1)?, + self.mplace_field(place, 2)?, + self.mplace_field(place, 3)?, + ); + + let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?; + let msg = Symbol::intern(self.read_str(msg_place)?); + let file_place = self.ref_to_mplace(self.read_value(file.into())?)?; + let file = Symbol::intern(self.read_str(file_place)?); + let line = self.read_scalar(line.into())?.to_u32()?; + let col = self.read_scalar(col.into())?.to_u32()?; + return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); + } else if Some(def_id) == self.tcx.lang_items().begin_panic_fn() { + assert!(args.len() == 2); + // &'static str, &(&'static str, u32, u32) + let msg = args[0]; + let ptr = self.read_value(args[1])?; + let place = self.ref_to_mplace(ptr)?; + let (file, line, col) = ( + self.mplace_field(place, 0)?, + self.mplace_field(place, 1)?, + self.mplace_field(place, 2)?, + ); + + let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?; + let msg = Symbol::intern(self.read_str(msg_place)?); + let file_place = self.ref_to_mplace(self.read_value(file.into())?)?; + let file = Symbol::intern(self.read_str(file_place)?); + let line = self.read_scalar(line.into())?.to_u32()?; + let col = self.read_scalar(col.into())?.to_u32()?; + return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); + } else { + return Ok(false); + } + } +} diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 1fcdab2be62b8..a8fae2b4871ed 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -14,14 +14,12 @@ use std::hash::Hash; -use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId}; -use super::{EvalContext, PlaceTy, OpTy, Memory}; - +use rustc::hir::def_id::DefId; +use rustc::mir::interpret::{Allocation, EvalResult, Scalar}; use rustc::mir; -use rustc::ty::{self, layout::TyLayout}; -use rustc::ty::layout::Size; -use syntax::source_map::Span; -use syntax::ast::Mutability; +use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt}; + +use super::{EvalContext, PlaceTy, OpTy}; /// Methods of this trait signifies a point where CTFE evaluation would fail /// and some use case dependent behaviour can instead be applied @@ -30,31 +28,47 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { type MemoryData: Clone + Eq + Hash; /// Additional memory kinds a machine wishes to distinguish from the builtin ones - type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone; + type MemoryKinds: ::std::fmt::Debug + Copy + Clone + Eq + Hash; + + /// The memory kind to use for mutated statics -- or None if those are not supported. + const MUT_STATIC_KIND: Option; /// Entry point to all function calls. /// - /// Returns Ok(true) when the function was handled completely - /// e.g. due to missing mir - /// - /// Returns Ok(false) if a new stack frame was pushed - fn eval_fn_call<'a>( + /// Returns either the mir to use for the call, or `None` if execution should + /// just proceed (which usually means this hook did all the work that the + /// called function should usually have done). In the latter case, it is + /// this hook's responsibility to call `goto_block(ret)` to advance the instruction pointer! + /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR + /// nor just jump to `ret`, but instead push their own stack frame.) + /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them + /// was used. + fn find_fn<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, args: &[OpTy<'tcx>], - span: Span, - ) -> EvalResult<'tcx, bool>; + dest: Option>, + ret: Option, + ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>; - /// directly process an intrinsic without pushing a stack frame. + /// Directly process an intrinsic without pushing a stack frame. + /// If this returns successfully, the engine will take care of jumping to the next block. fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: PlaceTy<'tcx>, - target: mir::BasicBlock, ) -> EvalResult<'tcx>; + /// Called for read access to a foreign static item. + /// This can be called multiple times for the same static item and should return consistent + /// results. Once the item is *written* the first time, as usual for statics a copy is + /// made and this function is not called again. + fn find_foreign_static<'a>( + tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + def_id: DefId, + ) -> EvalResult<'tcx, &'tcx Allocation>; + /// Called for all binary operations except on float types. /// /// Returns `None` if the operation should be handled by the integer @@ -70,20 +84,6 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Scalar, bool)>>; - /// Called when trying to mark machine defined `MemoryKinds` as static - fn mark_static_initialized<'a>( - _mem: &mut Memory<'a, 'mir, 'tcx, Self>, - _id: AllocId, - _mutability: Mutability, - ) -> EvalResult<'tcx, bool>; - - /// Called when requiring a pointer to a static. Non const eval can - /// create a mutable memory location for `static mut` - fn init_static<'a>( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - cid: GlobalId<'tcx>, - ) -> EvalResult<'tcx, AllocId>; - /// Heap allocations via the `box` keyword /// /// Returns a pointer to the allocated memory @@ -92,42 +92,7 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx>; - /// Called when trying to access a global declared with a `linkage` attribute - fn global_item_with_linkage<'a>( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - instance: ty::Instance<'tcx>, - mutability: Mutability, - ) -> EvalResult<'tcx>; - - fn check_locks<'a>( - _mem: &Memory<'a, 'mir, 'tcx, Self>, - _ptr: Pointer, - _size: Size, - _access: AccessKind, - ) -> EvalResult<'tcx> { - Ok(()) - } - - fn add_lock<'a>( - _mem: &mut Memory<'a, 'mir, 'tcx, Self>, - _id: AllocId, - ) {} - - fn free_lock<'a>( - _mem: &mut Memory<'a, 'mir, 'tcx, Self>, - _id: AllocId, - _len: u64, - ) -> EvalResult<'tcx> { - Ok(()) - } - - fn end_region<'a>( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _reg: Option<::rustc::middle::region::Scope>, - ) -> EvalResult<'tcx> { - Ok(()) - } - + /// Execute a validation operation fn validation_op<'a>( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, _op: ::rustc::mir::ValidationOp, diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 6f1a126534ce8..240f977a5a0e8 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -20,26 +20,18 @@ use std::collections::VecDeque; use std::hash::{Hash, Hasher}; use std::ptr; -use rustc::hir::def_id::DefId; -use rustc::ty::Instance; -use rustc::ty::ParamEnv; -use rustc::ty::query::TyCtxtAt; -use rustc::ty::layout::{self, Align, TargetDataLayout, Size}; -use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, ScalarMaybeUndef, - EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType, truncate}; +use rustc::ty::{self, Instance, query::TyCtxtAt}; +use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout}; +use rustc::mir::interpret::{Pointer, AllocId, Allocation, ScalarMaybeUndef, GlobalId, + EvalResult, Scalar, EvalErrorKind, AllocType, truncate}; pub use rustc::mir::interpret::{write_target_uint, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; use syntax::ast::Mutability; -use super::{EvalContext, Machine}; +use super::Machine; - -//////////////////////////////////////////////////////////////////////////////// -// Allocations and pointers -//////////////////////////////////////////////////////////////////////////////// - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] pub enum MemoryKind { /// Error if deallocated except during a stack pop Stack, @@ -47,24 +39,28 @@ pub enum MemoryKind { Machine(T), } -//////////////////////////////////////////////////////////////////////////////// -// Top-level interpreter memory -//////////////////////////////////////////////////////////////////////////////// - #[derive(Clone)] pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Additional data required by the Machine pub data: M::MemoryData, - /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate` - alloc_kind: FxHashMap>, - - /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). - alloc_map: FxHashMap, + /// Allocations local to this instance of the miri engine. The kind + /// helps ensure that the same mechanism is used for allocation and + /// deallocation. When an allocation is not found here, it is a + /// static and looked up in the `tcx` for read access. Writing to + /// a static creates a copy here, in the machine. + alloc_map: FxHashMap, Allocation)>, pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>, } +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> { + #[inline] + fn data_layout(&self) -> &TargetDataLayout { + &self.tcx.data_layout + } +} + impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M> where M: Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, @@ -77,13 +73,11 @@ impl<'a, 'mir, 'tcx, M> PartialEq for Memory<'a, 'mir, 'tcx, M> fn eq(&self, other: &Self) -> bool { let Memory { data, - alloc_kind, alloc_map, tcx: _, } = self; *data == other.data - && *alloc_kind == other.alloc_kind && *alloc_map == other.alloc_map } } @@ -95,7 +89,6 @@ impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> fn hash(&self, state: &mut H) { let Memory { data, - alloc_kind: _, alloc_map: _, tcx: _, } = self; @@ -108,10 +101,11 @@ impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> // iteration orders, we use a commutative operation (in this case // addition, but XOR would also work), to combine the hash of each // `Allocation`. - self.allocations() - .map(|allocs| { + self.alloc_map.iter() + .map(|(&id, alloc)| { let mut h = FxHasher::default(); - allocs.hash(&mut h); + id.hash(&mut h); + alloc.hash(&mut h); h.finish() }) .fold(0u64, |hash, x| hash.wrapping_add(x)) @@ -119,51 +113,79 @@ impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> } } +/// Helper function to obtain the global (tcx) allocation for a static +fn const_eval_static<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>>( + tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + id: AllocId +) -> EvalResult<'tcx, &'tcx Allocation> { + let alloc = tcx.alloc_map.lock().get(id); + let def_id = match alloc { + Some(AllocType::Memory(mem)) => { + return Ok(mem) + } + Some(AllocType::Function(..)) => { + return err!(DerefFunctionPointer) + } + Some(AllocType::Static(did)) => { + did + } + None => + return err!(DanglingPointerDeref), + }; + // We got a "lazy" static that has not been computed yet, do some work + trace!("static_alloc: Need to compute {:?}", def_id); + if tcx.is_foreign_item(def_id) { + return M::find_foreign_static(tcx, def_id); + } + let instance = Instance::mono(tcx.tcx, def_id); + let gid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| { + // no need to report anything, the const_eval call takes care of that for statics + assert!(tcx.is_static(def_id).is_some()); + EvalErrorKind::ReferencedConstant(err).into() + }).map(|val| { + // FIXME We got our static (will be a ByRef), now we make a *copy*?!? + tcx.const_to_allocation(val) + }) +} + impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self { Memory { data, - alloc_kind: FxHashMap::default(), alloc_map: FxHashMap::default(), tcx, } } - pub fn allocations<'x>( - &'x self, - ) -> impl Iterator { - self.alloc_map.iter().map(|(&id, alloc)| (id, alloc)) - } - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { self.tcx.alloc_map.lock().create_fn_alloc(instance).into() } - pub fn allocate_bytes(&mut self, bytes: &[u8]) -> Pointer { + pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer { self.tcx.allocate_bytes(bytes).into() } - /// kind is `None` for statics - pub fn allocate_value( + pub fn allocate_with( &mut self, alloc: Allocation, kind: MemoryKind, ) -> EvalResult<'tcx, AllocId> { let id = self.tcx.alloc_map.lock().reserve(); - M::add_lock(self, id); - self.alloc_map.insert(id, alloc); - self.alloc_kind.insert(id, kind); + self.alloc_map.insert(id, (kind, alloc)); Ok(id) } - /// kind is `None` for statics pub fn allocate( &mut self, size: Size, align: Align, kind: MemoryKind, ) -> EvalResult<'tcx, Pointer> { - self.allocate_value(Allocation::undef(size, align), kind).map(Pointer::from) + self.allocate_with(Allocation::undef(size, align), kind).map(Pointer::from) } pub fn reallocate( @@ -178,15 +200,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { if ptr.offset.bytes() != 0 { return err!(ReallocateNonBasePtr); } - if self.alloc_map.contains_key(&ptr.alloc_id) { - let alloc_kind = self.alloc_kind[&ptr.alloc_id]; - if alloc_kind != kind { - return err!(ReallocatedWrongMemoryKind( - format!("{:?}", alloc_kind), - format!("{:?}", kind), - )); - } - } // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" let new_ptr = self.allocate(new_size, new_align, kind)?; @@ -196,20 +209,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { new_ptr.into(), new_align, old_size.min(new_size), - /*nonoverlapping*/ - true, + /*nonoverlapping*/ true, )?; self.deallocate(ptr, Some((old_size, old_align)), kind)?; Ok(new_ptr) } + /// Deallocate a local, or do nothing if that local has been made into a static pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> { - match self.alloc_kind.get(&ptr.alloc_id).cloned() { - Some(MemoryKind::Stack) => self.deallocate(ptr, None, MemoryKind::Stack), - // Happens if the memory was interned into immutable memory - None => Ok(()), - other => bug!("local contained non-stack memory: {:?}", other), + // The allocation might be already removed by static interning. + // This can only really happen in the CTFE instance, not in miri. + if self.alloc_map.contains_key(&ptr.alloc_id) { + self.deallocate(ptr, None, MemoryKind::Stack) + } else { + Ok(()) } } @@ -223,9 +237,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { return err!(DeallocateNonBasePtr); } - let alloc = match self.alloc_map.remove(&ptr.alloc_id) { + let (alloc_kind, alloc) = match self.alloc_map.remove(&ptr.alloc_id) { Some(alloc) => alloc, None => { + // Deallocating static memory -- always an error return match self.tcx.alloc_map.lock().get(ptr.alloc_id) { Some(AllocType::Function(..)) => err!(DeallocatedWrongMemoryKind( "function".to_string(), @@ -241,18 +256,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } }; - let alloc_kind = self.alloc_kind - .remove(&ptr.alloc_id) - .expect("alloc_map out of sync with alloc_kind"); - - // It is okay for us to still holds locks on deallocation -- for example, we could store - // data we own in a local, and the local could be deallocated (from StorageDead) before the - // function returns. However, we should check *something*. For now, we make sure that there - // is no conflicting write lock by another frame. We *have* to permit deallocation if we - // hold a read lock. - // FIXME: Figure out the exact rules here. - M::free_lock(self, ptr.alloc_id, alloc.bytes.len() as u64)?; - if alloc_kind != kind { return err!(DeallocatedWrongMemoryKind( format!("{:?}", alloc_kind), @@ -339,63 +342,37 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// Allocation accessors impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { - fn const_eval_static(&self, def_id: DefId) -> EvalResult<'tcx, &'tcx Allocation> { - if self.tcx.is_foreign_item(def_id) { - return err!(ReadForeignStatic); - } - let instance = Instance::mono(self.tcx.tcx, def_id); - let gid = GlobalId { - instance, - promoted: None, - }; - self.tcx.const_eval(ParamEnv::reveal_all().and(gid)).map_err(|err| { - // no need to report anything, the const_eval call takes care of that for statics - assert!(self.tcx.is_static(def_id).is_some()); - EvalErrorKind::ReferencedConstant(err).into() - }).map(|val| { - self.tcx.const_to_allocation(val) - }) - } - pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { - // normal alloc? match self.alloc_map.get(&id) { - Some(alloc) => Ok(alloc), - // uninitialized static alloc? - None => { - // static alloc? - let alloc = self.tcx.alloc_map.lock().get(id); - match alloc { - Some(AllocType::Memory(mem)) => Ok(mem), - Some(AllocType::Function(..)) => { - Err(EvalErrorKind::DerefFunctionPointer.into()) - } - Some(AllocType::Static(did)) => { - self.const_eval_static(did) - } - None => Err(EvalErrorKind::DanglingPointerDeref.into()), - } - }, + // Normal alloc? + Some(alloc) => Ok(&alloc.1), + // Static. No need to make any copies, just provide read access to the global static + // memory in tcx. + None => const_eval_static::(self.tcx, id), } } - fn get_mut( + pub fn get_mut( &mut self, id: AllocId, ) -> EvalResult<'tcx, &mut Allocation> { - // normal alloc? - match self.alloc_map.get_mut(&id) { - Some(alloc) => Ok(alloc), - // uninitialized static alloc? - None => { - // no alloc or immutable alloc? produce an error - match self.tcx.alloc_map.lock().get(id) { - Some(AllocType::Memory(..)) | - Some(AllocType::Static(..)) => err!(ModifiedConstantMemory), - Some(AllocType::Function(..)) => err!(DerefFunctionPointer), - None => err!(DanglingPointerDeref), - } - }, + // Static? + if !self.alloc_map.contains_key(&id) { + // Ask the machine for what to do + if let Some(kind) = M::MUT_STATIC_KIND { + // The machine supports mutating statics. Make a copy, use that. + self.deep_copy_static(id, MemoryKind::Machine(kind))?; + } else { + return err!(ModifiedConstantMemory) + } + } + // If we come here, we know the allocation is in our map + let alloc = &mut self.alloc_map.get_mut(&id).unwrap().1; + // See if we are allowed to mutate this + if alloc.mutability == Mutability::Immutable { + err!(ModifiedConstantMemory) + } else { + Ok(alloc) } } @@ -410,10 +387,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - pub fn get_alloc_kind(&self, id: AllocId) -> Option> { - self.alloc_kind.get(&id).cloned() - } - /// For debugging, print an allocation and all allocations it points to, recursively. pub fn dump_alloc(&self, id: AllocId) { if !log_enabled!(::log::Level::Trace) { @@ -441,14 +414,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { let (alloc, immutable) = // normal alloc? match self.alloc_map.get(&id) { - Some(a) => (a, match self.alloc_kind[&id] { + Some((kind, alloc)) => (alloc, match kind { MemoryKind::Stack => " (stack)".to_owned(), MemoryKind::Machine(m) => format!(" ({:?})", m), }), None => { // static alloc? match self.tcx.alloc_map.lock().get(id) { - Some(AllocType::Memory(a)) => (a, "(immutable)".to_owned()), + Some(AllocType::Memory(a)) => (a, " (immutable)".to_owned()), Some(AllocType::Function(func)) => { trace!("{} {}", msg, func); continue; @@ -509,9 +482,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn leak_report(&self) -> usize { trace!("### LEAK REPORT ###"); + let mut_static_kind = M::MUT_STATIC_KIND.map(|k| MemoryKind::Machine(k)); let leaks: Vec<_> = self.alloc_map - .keys() - .cloned() + .iter() + .filter_map(|(&id, &(kind, _))| + // exclude mutable statics + if Some(kind) == mut_static_kind { None } else { Some(id) } ) .collect(); let n = leaks.len(); self.dump_allocs(leaks); @@ -534,7 +510,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { if size.bytes() == 0 { return Ok(&[]); } - M::check_locks(self, ptr, size, AccessKind::Read)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) self.check_bounds(ptr.offset(size, self)?, true)?; let alloc = self.get(ptr.alloc_id)?; @@ -557,7 +532,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { if size.bytes() == 0 { return Ok(&mut []); } - M::check_locks(self, ptr, size, AccessKind::Write)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) self.check_bounds(ptr.offset(size, &*self)?, true)?; let alloc = self.get_mut(ptr.alloc_id)?; @@ -591,22 +565,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// Reading and writing impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { - /// mark an allocation pointed to by a static as static and initialized - fn mark_inner_allocation_initialized( - &mut self, - alloc: AllocId, - mutability: Mutability, - ) -> EvalResult<'tcx> { - match self.alloc_kind.get(&alloc) { - // do not go into statics - None => Ok(()), - // just locals and machine allocs - Some(_) => self.mark_static_initialized(alloc, mutability), - } - } - /// mark an allocation as static and initialized, either mutable or not - pub fn mark_static_initialized( + pub fn intern_static( &mut self, alloc_id: AllocId, mutability: Mutability, @@ -616,31 +576,48 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { alloc_id, mutability ); - // The machine handled it - if M::mark_static_initialized(self, alloc_id, mutability)? { - return Ok(()) - } - let alloc = self.alloc_map.remove(&alloc_id); - match self.alloc_kind.remove(&alloc_id) { - None => {}, - Some(MemoryKind::Machine(_)) => bug!("machine didn't handle machine alloc"), - Some(MemoryKind::Stack) => {}, - } - if let Some(mut alloc) = alloc { - // ensure llvm knows not to put this into immutable memory - alloc.runtime_mutability = mutability; - let alloc = self.tcx.intern_const_alloc(alloc); - self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc); - // recurse into inner allocations - for &alloc in alloc.relocations.values() { - self.mark_inner_allocation_initialized(alloc, mutability)?; + // remove allocation + let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap(); + match kind { + MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"), + MemoryKind::Stack => {}, + } + // ensure llvm knows not to put this into immutable memory + alloc.mutability = mutability; + let alloc = self.tcx.intern_const_alloc(alloc); + self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc); + // recurse into inner allocations + for &alloc in alloc.relocations.values() { + // FIXME: Reusing the mutability here is likely incorrect. It is originally + // determined via `is_freeze`, and data is considered frozen if there is no + // `UnsafeCell` *immediately* in that data -- however, this search stops + // at references. So whenever we follow a reference, we should likely + // assume immutability -- and we should make sure that the compiler + // does not permit code that would break this! + if self.alloc_map.contains_key(&alloc) { + // Not yet interned, so proceed recursively + self.intern_static(alloc, mutability)?; } - } else { - bug!("no allocation found for {:?}", alloc_id); } Ok(()) } + /// The alloc_id must refer to a (mutable) static; a deep copy of that + /// static is made into this memory. + fn deep_copy_static( + &mut self, + id: AllocId, + kind: MemoryKind, + ) -> EvalResult<'tcx> { + let alloc = const_eval_static::(self.tcx, id)?; + if alloc.mutability == Mutability::Immutable { + return err!(ModifiedConstantMemory); + } + let old = self.alloc_map.insert(id, (kind, alloc.clone())); + assert!(old.is_none(), "deep_copy_static: must not overwrite existing memory"); + Ok(()) + } + pub fn copy( &mut self, src: Scalar, @@ -745,7 +722,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { return err!(ReadPointerAsBytes); } self.check_defined(ptr, p1)?; - M::check_locks(self, ptr, p1, AccessKind::Read)?; Ok(&alloc.bytes[offset..offset + size]) } None => err!(UnterminatedCString(ptr)), @@ -802,9 +778,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?; // Undef check happens *after* we established that the alignment is correct. // We must not return Ok() for unaligned pointers! - if self.check_defined(ptr, size).is_err() { - // this inflates undefined bytes to the entire scalar, - // even if only a few bytes are undefined + if !self.is_defined(ptr, size)? { + // this inflates undefined bytes to the entire scalar, even if only a few + // bytes are undefined return Ok(ScalarMaybeUndef::Undef); } // Now we do the actual reading @@ -990,16 +966,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + fn is_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx, bool> { let alloc = self.get(ptr.alloc_id)?; - if !alloc.undef_mask.is_range_defined( + Ok(alloc.undef_mask.is_range_defined( ptr.offset, ptr.offset + size, - ) - { - return err!(ReadUndefBytes); + )) + } + + #[inline] + fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + if self.is_defined(ptr, size)? { + Ok(()) + } else { + err!(ReadUndefBytes) } - Ok(()) } pub fn mark_definedness( @@ -1020,49 +1001,3 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } } - -//////////////////////////////////////////////////////////////////////////////// -// Unaligned accesses -//////////////////////////////////////////////////////////////////////////////// - -pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { - fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>; - fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>; -} - -impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx> -{ - #[inline] - fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - self - } - - #[inline] - fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - self - } -} - -impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx> -{ - #[inline] - fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.memory - } - - #[inline] - fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.memory - } -} - -impl<'a, 'mir, 'tcx, M> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx> -{ - #[inline] - fn data_layout(&self) -> &TargetDataLayout { - &self.tcx.data_layout - } -} diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 8192ef3d0f31d..462c4b8889dd1 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -20,18 +20,23 @@ mod operator; mod step; mod terminator; mod traits; -mod const_eval; mod validity; +mod intrinsics; pub use self::eval_context::{ EvalContext, Frame, StackPopCleanup, LocalValue, }; -pub use self::place::{Place, PlaceExtra, PlaceTy, MemPlace, MPlaceTy}; +pub use self::place::{Place, PlaceTy, MemPlace, MPlaceTy}; -pub use self::memory::{Memory, MemoryKind, HasMemory}; +pub use self::memory::{Memory, MemoryKind}; -pub use self::const_eval::{ +pub use self::machine::Machine; + +pub use self::operand::{Value, ValTy, Operand, OpTy}; + +// reexports for compatibility +pub use const_eval::{ eval_promoted, mk_borrowck_eval_cx, mk_eval_cx, @@ -42,7 +47,3 @@ pub use self::const_eval::{ const_variant_index, op_to_const, }; - -pub use self::machine::Machine; - -pub use self::operand::{Value, ValTy, Operand, OpTy}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index aa4585fb89242..9681b705d7eba 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -11,16 +11,17 @@ //! Functions concerning immediate values and operands, and reading from operands. //! All high-level functions to read from memory work on operands as sources. +use std::hash::{Hash, Hasher}; use std::convert::TryInto; -use rustc::mir; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt}; +use rustc::{mir, ty}; +use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt}; use rustc_data_structures::indexed_vec::Idx; use rustc::mir::interpret::{ GlobalId, ConstValue, Scalar, EvalResult, Pointer, ScalarMaybeUndef, EvalErrorKind }; -use super::{EvalContext, Machine, MemPlace, MPlaceTy, PlaceExtra, MemoryKind}; +use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}; /// A `Value` represents a single immediate self-contained Rust value. /// @@ -64,6 +65,14 @@ impl<'tcx> Value { self.to_scalar_or_undef().not_undef() } + #[inline] + pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> { + match self { + Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"), + Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)) + } + } + /// Convert the value into a pointer (or a pointer-sized integer). /// Throws away the second half of a ScalarPair! #[inline] @@ -73,24 +82,6 @@ impl<'tcx> Value { Value::ScalarPair(ptr, _) => ptr.not_undef(), } } - - pub fn to_scalar_dyn_trait(self) -> EvalResult<'tcx, (Scalar, Pointer)> { - match self { - Value::ScalarPair(ptr, vtable) => - Ok((ptr.not_undef()?, vtable.to_ptr()?)), - _ => bug!("expected ptr and vtable, got {:?}", self), - } - } - - pub fn to_scalar_slice(self, cx: impl HasDataLayout) -> EvalResult<'tcx, (Scalar, u64)> { - match self { - Value::ScalarPair(ptr, val) => { - let len = val.to_bits(cx.data_layout().pointer_size)?; - Ok((ptr.not_undef()?, len as u64)) - } - _ => bug!("expected ptr and length, got {:?}", self), - } - } } // ScalarPair needs a type to interpret, so we often have a value and a type together @@ -150,7 +141,7 @@ impl Operand { #[derive(Copy, Clone, Debug)] pub struct OpTy<'tcx> { - pub op: Operand, + crate op: Operand, // ideally we'd make this private, but we are not there yet pub layout: TyLayout<'tcx>, } @@ -182,6 +173,20 @@ impl<'tcx> From> for OpTy<'tcx> { } } +// Validation needs to hash OpTy, but we cannot hash Layout -- so we just hash the type +impl<'tcx> Hash for OpTy<'tcx> { + fn hash(&self, state: &mut H) { + self.op.hash(state); + self.layout.ty.hash(state); + } +} +impl<'tcx> PartialEq for OpTy<'tcx> { + fn eq(&self, other: &Self) -> bool { + self.op == other.op && self.layout.ty == other.layout.ty + } +} +impl<'tcx> Eq for OpTy<'tcx> {} + impl<'tcx> OpTy<'tcx> { #[inline] pub fn from_ptr(ptr: Pointer, align: Align, layout: TyLayout<'tcx>) -> Self { @@ -227,7 +232,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, mplace: MPlaceTy<'tcx>, ) -> EvalResult<'tcx, Option> { - if mplace.extra != PlaceExtra::None { + if mplace.layout.is_unsized() { + // Dont touch unsized return Ok(None); } let (ptr, ptr_align) = mplace.to_scalar_ptr_align(); @@ -266,7 +272,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { /// Note that for a given layout, this operation will either always fail or always /// succeed! Whether it succeeds depends on whether the layout can be represented /// in a `Value`, not on which data is stored there currently. - pub(super) fn try_read_value( + pub(crate) fn try_read_value( &self, src: OpTy<'tcx>, ) -> EvalResult<'tcx, Result> { @@ -300,6 +306,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } + // Turn the MPlace into a string (must already be dereferenced!) + pub fn read_str( + &self, + mplace: MPlaceTy<'tcx>, + ) -> EvalResult<'tcx, &str> { + let len = mplace.len(self)?; + let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?; + let str = ::std::str::from_utf8(bytes) + .map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?; + Ok(str) + } + pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> { // This decides which types we will use the Immediate optimization for, and hence should // match what `try_read_value` and `eval_place_to_op` support. @@ -360,7 +378,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(OpTy { op: Operand::Immediate(value), layout: field_layout }) } - pub(super) fn operand_downcast( + pub fn operand_downcast( &self, op: OpTy<'tcx>, variant: usize, @@ -411,12 +429,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // avoid allocations. If you already know the layout, you can pass it in // to avoid looking it up again. fn eval_place_to_op( - &mut self, + &self, mir_place: &mir::Place<'tcx>, layout: Option>, ) -> EvalResult<'tcx, OpTy<'tcx>> { use rustc::mir::Place::*; - Ok(match *mir_place { + let op = match *mir_place { Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer), Local(local) => { let op = *self.frame().locals[local].access()?; @@ -430,21 +448,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.operand_projection(op, &proj.elem)? } - // Everything else is an mplace, so we just call `eval_place`. - // Note that getting an mplace for a static aways requires `&mut`, - // so this does not "cost" us anything in terms if mutability. - Promoted(_) | Static(_) => { - let place = self.eval_place(mir_place)?; - place.to_mem_place().into() - } - }) + _ => self.eval_place_to_mplace(mir_place)?.into(), + }; + + trace!("eval_place_to_op: got {:?}", *op); + Ok(op) } /// Evaluate the operand, returning a place where you can then find the data. /// if you already know the layout, you can save two some table lookups /// by passing it in here. pub fn eval_operand( - &mut self, + &self, mir_op: &mir::Operand<'tcx>, layout: Option>, ) -> EvalResult<'tcx, OpTy<'tcx>> { @@ -469,8 +484,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } /// Evaluate a bunch of operands at once - pub(crate) fn eval_operands( - &mut self, + pub(super) fn eval_operands( + &self, ops: &[mir::Operand<'tcx>], ) -> EvalResult<'tcx, Vec>> { ops.into_iter() @@ -479,12 +494,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } // Also used e.g. when miri runs into a constant. - // Unfortunately, this needs an `&mut` to be able to allocate a copy of a `ByRef` - // constant. This bleeds up to `eval_operand` needing `&mut`. - pub fn const_value_to_op( - &mut self, + pub(super) fn const_value_to_op( + &self, val: ConstValue<'tcx>, ) -> EvalResult<'tcx, Operand> { + trace!("const_value_to_op: {:?}", val); match val { ConstValue::Unevaluated(def_id, substs) => { let instance = self.resolve(def_id, substs)?; @@ -493,9 +507,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { promoted: None, }) } - ConstValue::ByRef(alloc, offset) => { - // FIXME: Allocate new AllocId for all constants inside - let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; + ConstValue::ByRef(id, alloc, offset) => { + // We rely on mutability being set correctly in that allocation to prevent writes + // where none should happen -- and for `static mut`, we copy on demand anyway. Ok(Operand::from_ptr(Pointer::new(id, offset), alloc.align)) }, ConstValue::ScalarPair(a, b) => @@ -504,52 +518,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(Operand::Immediate(Value::Scalar(x.into()))), } } + pub fn const_to_op( + &self, + cnst: &ty::Const<'tcx>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = self.const_value_to_op(cnst.val)?; + Ok(OpTy { op, layout: self.layout_of(cnst.ty)? }) + } - pub(super) fn global_to_op(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> { + pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> { let cv = self.const_eval(gid)?; self.const_value_to_op(cv.val) } - /// We cannot do self.read_value(self.eval_operand) due to eval_operand taking &mut self, - /// so this helps avoid unnecessary let. - #[inline] - pub fn eval_operand_and_read_value( - &mut self, - op: &mir::Operand<'tcx>, - layout: Option>, - ) -> EvalResult<'tcx, ValTy<'tcx>> { - let op = self.eval_operand(op, layout)?; - self.read_value(op) - } - - /// reads a tag and produces the corresponding variant index - pub fn read_discriminant_as_variant_index( + /// Read discriminant, return the runtime value as well as the variant index. + pub fn read_discriminant( &self, rval: OpTy<'tcx>, - ) -> EvalResult<'tcx, usize> { - match rval.layout.variants { - layout::Variants::Single { index } => Ok(index), - layout::Variants::Tagged { .. } => { - let discr_val = self.read_discriminant_value(rval)?; - rval.layout.ty - .ty_adt_def() - .expect("tagged layout for non adt") - .discriminants(self.tcx.tcx) - .position(|var| var.val == discr_val) - .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into()) - } - layout::Variants::NicheFilling { .. } => { - let discr_val = self.read_discriminant_value(rval)?; - assert_eq!(discr_val as usize as u128, discr_val); - Ok(discr_val as usize) - }, - } - } - - pub fn read_discriminant_value( - &self, - rval: OpTy<'tcx>, - ) -> EvalResult<'tcx, u128> { + ) -> EvalResult<'tcx, (u128, usize)> { trace!("read_discriminant_value {:#?}", rval.layout); if rval.layout.abi == layout::Abi::Uninhabited { return err!(Unreachable); @@ -560,20 +546,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let discr_val = rval.layout.ty.ty_adt_def().map_or( index as u128, |def| def.discriminant_for_variant(*self.tcx, index).val); - return Ok(discr_val); + return Ok((discr_val, index)); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } + // read raw discriminant value let discr_op = self.operand_field(rval, 0)?; let discr_val = self.read_value(discr_op)?; - trace!("discr value: {:?}", discr_val); let raw_discr = discr_val.to_scalar()?; + trace!("discr value: {:?}", raw_discr); + // post-process Ok(match rval.layout.variants { layout::Variants::Single { .. } => bug!(), - // FIXME: We should catch invalid discriminants here! layout::Variants::Tagged { .. } => { - if discr_val.layout.ty.is_signed() { + let real_discr = if discr_val.layout.ty.is_signed() { let i = raw_discr.to_bits(discr_val.layout.size)? as i128; // going from layout tag type to typeck discriminant type // requires first sign extending with the layout discriminant @@ -590,7 +577,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { (truncatee << shift) >> shift } else { raw_discr.to_bits(discr_val.layout.size)? - } + }; + // Make sure we catch invalid discriminants + let index = rval.layout.ty + .ty_adt_def() + .expect("tagged layout for non adt") + .discriminants(self.tcx.tcx) + .position(|var| var.val == real_discr) + .ok_or_else(|| EvalErrorKind::InvalidDiscriminant(real_discr))?; + (real_discr, index) }, layout::Variants::NicheFilling { dataful_variant, @@ -600,8 +595,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } => { let variants_start = *niche_variants.start() as u128; let variants_end = *niche_variants.end() as u128; - match raw_discr { + let real_discr = match raw_discr { Scalar::Ptr(_) => { + // The niche must be just 0 (which a pointer value never is) assert!(niche_start == 0); assert!(variants_start == variants_end); dataful_variant as u128 @@ -616,7 +612,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { dataful_variant as u128 } }, - } + }; + let index = real_discr as usize; + assert_eq!(index as u128, real_discr); + assert!(index < rval.layout.ty + .ty_adt_def() + .expect("tagged layout for non adt") + .variants.len()); + (real_discr, index) } }) } diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 89293dc101232..13ed527e3496b 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -9,7 +9,7 @@ // except according to those terms. use rustc::mir; -use rustc::ty::{self, layout::{self, TyLayout}}; +use rustc::ty::{self, layout::TyLayout}; use syntax::ast::FloatTy; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; @@ -48,44 +48,103 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Returns the result of the specified operation and whether it overflowed. - pub fn binary_op( + fn binary_char_op( &self, bin_op: mir::BinOp, - ValTy { value: left, layout: left_layout }: ValTy<'tcx>, - ValTy { value: right, layout: right_layout }: ValTy<'tcx>, + l: char, + r: char, ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; - let left = left.to_scalar()?; - let right = right.to_scalar()?; - - let left_kind = match left_layout.abi { - layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(left_layout.ty)), + let res = match bin_op { + Eq => l == r, + Ne => l != r, + Lt => l < r, + Le => l <= r, + Gt => l > r, + Ge => l >= r, + _ => bug!("Invalid operation on char: {:?}", bin_op), }; - let right_kind = match right_layout.abi { - layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(right_layout.ty)), + return Ok((Scalar::from_bool(res), false)); + } + + fn binary_bool_op( + &self, + bin_op: mir::BinOp, + l: bool, + r: bool, + ) -> EvalResult<'tcx, (Scalar, bool)> { + use rustc::mir::BinOp::*; + + let res = match bin_op { + Eq => l == r, + Ne => l != r, + Lt => l < r, + Le => l <= r, + Gt => l > r, + Ge => l >= r, + BitAnd => l & r, + BitOr => l | r, + BitXor => l ^ r, + _ => bug!("Invalid operation on bool: {:?}", bin_op), }; - trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", - bin_op, left, left_kind, right, right_kind); + return Ok((Scalar::from_bool(res), false)); + } - // I: Handle operations that support pointers - if !left_kind.is_float() && !right_kind.is_float() { - if let Some(handled) = - M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)? - { - return Ok(handled); - } + fn binary_float_op( + &self, + bin_op: mir::BinOp, + fty: FloatTy, + // passing in raw bits + l: u128, + r: u128, + ) -> EvalResult<'tcx, (Scalar, bool)> { + use rustc::mir::BinOp::*; + + macro_rules! float_math { + ($ty:path, $size:expr) => {{ + let l = <$ty>::from_bits(l); + let r = <$ty>::from_bits(r); + let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits { + bits: res.value.to_bits(), + size: $size, + }; + let val = match bin_op { + Eq => Scalar::from_bool(l == r), + Ne => Scalar::from_bool(l != r), + Lt => Scalar::from_bool(l < r), + Le => Scalar::from_bool(l <= r), + Gt => Scalar::from_bool(l > r), + Ge => Scalar::from_bool(l >= r), + Add => bitify(l + r), + Sub => bitify(l - r), + Mul => bitify(l * r), + Div => bitify(l / r), + Rem => bitify(l % r), + _ => bug!("invalid float op: `{:?}`", bin_op), + }; + return Ok((val, false)); + }}; + } + match fty { + FloatTy::F32 => float_math!(Single, 4), + FloatTy::F64 => float_math!(Double, 8), } + } - // II: From now on, everything must be bytes, no pointers - let l = left.to_bits(left_layout.size)?; - let r = right.to_bits(right_layout.size)?; + fn binary_int_op( + &self, + bin_op: mir::BinOp, + // passing in raw bits + l: u128, + left_layout: TyLayout<'tcx>, + r: u128, + right_layout: TyLayout<'tcx>, + ) -> EvalResult<'tcx, (Scalar, bool)> { + use rustc::mir::BinOp::*; - // These ops can have an RHS with a different numeric type. - if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) { + // Shift ops can have an RHS with a different numeric type. + if bin_op == Shl || bin_op == Shr { let signed = left_layout.abi.is_signed(); let mut oflo = (r as u32 as u128) != r; let mut r = r as u32; @@ -116,18 +175,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }, oflo)); } - if left_kind != right_kind { + // For the remaining ops, the types must be the same on both sides + if left_layout.ty != right_layout.ty { let msg = format!( - "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + "unimplemented asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, - left, - left_kind, - right, - right_kind + l, + left_layout.ty, + r, + right_layout.ty ); return err!(Unimplemented(msg)); } + // Operations that need special treatment for signed integers if left_layout.abi.is_signed() { let op: Option bool> = match bin_op { Lt => Some(i128::lt), @@ -180,38 +241,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - if let ty::Float(fty) = left_layout.ty.sty { - macro_rules! float_math { - ($ty:path, $size:expr) => {{ - let l = <$ty>::from_bits(l); - let r = <$ty>::from_bits(r); - let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits { - bits: res.value.to_bits(), - size: $size, - }; - let val = match bin_op { - Eq => Scalar::from_bool(l == r), - Ne => Scalar::from_bool(l != r), - Lt => Scalar::from_bool(l < r), - Le => Scalar::from_bool(l <= r), - Gt => Scalar::from_bool(l > r), - Ge => Scalar::from_bool(l >= r), - Add => bitify(l + r), - Sub => bitify(l - r), - Mul => bitify(l * r), - Div => bitify(l / r), - Rem => bitify(l % r), - _ => bug!("invalid float op: `{:?}`", bin_op), - }; - return Ok((val, false)); - }}; - } - match fty { - FloatTy::F32 => float_math!(Single, 4), - FloatTy::F64 => float_math!(Double, 8), - } - } - let size = left_layout.size.bytes() as u8; // only ints left @@ -249,11 +278,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => { let msg = format!( - "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + "unimplemented binary op {:?}: {:?}, {:?} (both {:?})", bin_op, - left, - left_layout.ty, - right, + l, + r, right_layout.ty, ); return err!(Unimplemented(msg)); @@ -263,6 +291,60 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok((val, false)) } + /// Returns the result of the specified operation and whether it overflowed. + pub fn binary_op( + &self, + bin_op: mir::BinOp, + ValTy { value: left, layout: left_layout }: ValTy<'tcx>, + ValTy { value: right, layout: right_layout }: ValTy<'tcx>, + ) -> EvalResult<'tcx, (Scalar, bool)> { + let left = left.to_scalar()?; + let right = right.to_scalar()?; + + trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, left, left_layout.ty, right, right_layout.ty); + + match left_layout.ty.sty { + ty::Char => { + assert_eq!(left_layout.ty, right_layout.ty); + let left = left.to_char()?; + let right = right.to_char()?; + self.binary_char_op(bin_op, left, right) + } + ty::Bool => { + assert_eq!(left_layout.ty, right_layout.ty); + let left = left.to_bool()?; + let right = right.to_bool()?; + self.binary_bool_op(bin_op, left, right) + } + ty::Float(fty) => { + assert_eq!(left_layout.ty, right_layout.ty); + let left = left.to_bits(left_layout.size)?; + let right = right.to_bits(right_layout.size)?; + self.binary_float_op(bin_op, fty, left, right) + } + _ => { + // Must be integer(-like) types. Don't forget about == on fn pointers. + assert!(left_layout.ty.is_integral() || left_layout.ty.is_unsafe_ptr() || + left_layout.ty.is_fn()); + assert!(right_layout.ty.is_integral() || right_layout.ty.is_unsafe_ptr() || + right_layout.ty.is_fn()); + + // Handle operations that support pointer values + if let Some(handled) = + M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)? + { + return Ok(handled); + } + + // Everything else only works with "proper" bits + let left = left.to_bits(left_layout.size)?; + let right = right.to_bits(right_layout.size)?; + self.binary_int_op(bin_op, left, left_layout, right, right_layout) + } + } + } + pub fn unary_op( &self, un_op: mir::UnOp, @@ -273,25 +355,42 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; - let size = layout.size; - let bytes = val.to_bits(size)?; - - let result_bytes = match (un_op, &layout.ty.sty) { - - (Not, ty::Bool) => !val.to_bool()? as u128, - - (Not, _) => !bytes, - - (Neg, ty::Float(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)), - (Neg, ty::Float(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)), - - (Neg, _) if bytes == (1 << (size.bits() - 1)) => return err!(OverflowNeg), - (Neg, _) => (-(bytes as i128)) as u128, - }; + trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty.sty); - Ok(Scalar::Bits { - bits: self.truncate(result_bytes, layout), - size: size.bytes() as u8, - }) + match layout.ty.sty { + ty::Bool => { + let val = val.to_bool()?; + let res = match un_op { + Not => !val, + _ => bug!("Invalid bool op {:?}", un_op) + }; + Ok(Scalar::from_bool(res)) + } + ty::Float(fty) => { + let val = val.to_bits(layout.size)?; + let res = match (un_op, fty) { + (Neg, FloatTy::F32) => Single::to_bits(-Single::from_bits(val)), + (Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)), + _ => bug!("Invalid float op {:?}", un_op) + }; + Ok(Scalar::Bits { bits: res, size: layout.size.bytes() as u8 }) + } + _ => { + assert!(layout.ty.is_integral()); + let val = val.to_bits(layout.size)?; + let res = match un_op { + Not => !val, + Neg => { + assert!(layout.abi.is_signed()); + (-(val as i128)) as u128 + } + }; + // res needs tuncating + Ok(Scalar::Bits { + bits: self.truncate(res, layout), + size: layout.size.bytes() as u8, + }) + } + } } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index f1c2b6b34fb15..0a6fef3008433 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -12,7 +12,6 @@ //! into a place. //! All high-level functions to write to memory work on places as destinations. -use std::hash::{Hash, Hasher}; use std::convert::TryFrom; use rustc::mir; @@ -32,7 +31,10 @@ pub struct MemPlace { /// However, it may never be undef. pub ptr: Scalar, pub align: Align, - pub extra: PlaceExtra, + /// Metadata for unsized places. Interpretation is up to the type. + /// Must not be present for sized types, but can be missing for unsized types + /// (e.g. `extern type`). + pub extra: Option, } #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] @@ -48,14 +50,6 @@ pub enum Place { }, } -// Extra information for fat pointers / places -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub enum PlaceExtra { - None, - Length(u64), - Vtable(Pointer), -} - #[derive(Copy, Clone, Debug)] pub struct PlaceTy<'tcx> { place: Place, @@ -101,7 +95,7 @@ impl MemPlace { MemPlace { ptr, align, - extra: PlaceExtra::None, + extra: None, } } @@ -112,7 +106,7 @@ impl MemPlace { #[inline(always)] pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { - assert_eq!(self.extra, PlaceExtra::None); + assert_eq!(self.extra, None); (self.ptr, self.align) } @@ -127,13 +121,12 @@ impl MemPlace { /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. /// This is the inverse of `ref_to_mplace`. - pub fn to_ref(self, cx: impl HasDataLayout) -> Value { + pub fn to_ref(self) -> Value { // We ignore the alignment of the place here -- special handling for packed structs ends // at the `&` operator. match self.extra { - PlaceExtra::None => Value::Scalar(self.ptr.into()), - PlaceExtra::Length(len) => Value::new_slice(self.ptr.into(), len, cx), - PlaceExtra::Vtable(vtable) => Value::new_dyn_trait(self.ptr.into(), vtable), + None => Value::Scalar(self.ptr.into()), + Some(extra) => Value::ScalarPair(self.ptr.into(), extra.into()), } } } @@ -145,33 +138,32 @@ impl<'tcx> MPlaceTy<'tcx> { } #[inline] - pub(super) fn len(self) -> u64 { - // Sanity check - let ty_len = match self.layout.fields { - layout::FieldPlacement::Array { count, .. } => count, - _ => bug!("Length for non-array layout {:?} requested", self.layout), - }; - if let PlaceExtra::Length(len) = self.extra { - len + pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { + if self.layout.is_unsized() { + // We need to consult `extra` metadata + match self.layout.ty.sty { + ty::Slice(..) | ty::Str => + return self.extra.unwrap().to_usize(cx), + _ => bug!("len not supported on unsized type {:?}", self.layout.ty), + } } else { - ty_len + // Go through the layout. There are lots of types that support a length, + // e.g. SIMD types. + match self.layout.fields { + layout::FieldPlacement::Array { count, .. } => Ok(count), + _ => bug!("len not supported on sized type {:?}", self.layout.ty), + } } } -} -// Validation needs to hash MPlaceTy, but we cannot hash Layout -- so we just hash the type -impl<'tcx> Hash for MPlaceTy<'tcx> { - fn hash(&self, state: &mut H) { - self.mplace.hash(state); - self.layout.ty.hash(state); - } -} -impl<'tcx> PartialEq for MPlaceTy<'tcx> { - fn eq(&self, other: &Self) -> bool { - self.mplace == other.mplace && self.layout.ty == other.layout.ty + #[inline] + pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer> { + match self.layout.ty.sty { + ty::Dynamic(..) => self.extra.unwrap().to_ptr(), + _ => bug!("vtable not supported on type {:?}", self.layout.ty), + } } } -impl<'tcx> Eq for MPlaceTy<'tcx> {} impl<'tcx> OpTy<'tcx> { #[inline(always)] @@ -246,28 +238,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty; let layout = self.layout_of(pointee_type)?; - let mplace = match self.tcx.struct_tail(pointee_type).sty { - ty::Dynamic(..) => { - let (ptr, vtable) = val.to_scalar_dyn_trait()?; - MemPlace { - ptr, - align: layout.align, - extra: PlaceExtra::Vtable(vtable), - } - } - ty::Str | ty::Slice(_) => { - let (ptr, len) = val.to_scalar_slice(self)?; - MemPlace { - ptr, - align: layout.align, - extra: PlaceExtra::Length(len), - } - } - _ => MemPlace { - ptr: val.to_scalar()?, - align: layout.align, - extra: PlaceExtra::None, - }, + let align = layout.align; + let mplace = match *val { + Value::Scalar(ptr) => + MemPlace { ptr: ptr.not_undef()?, align, extra: None }, + Value::ScalarPair(ptr, extra) => + MemPlace { ptr: ptr.not_undef()?, align, extra: Some(extra.not_undef()?) }, }; Ok(MPlaceTy { mplace, layout }) } @@ -286,9 +262,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { layout::FieldPlacement::Arbitrary { ref offsets, .. } => offsets[usize::try_from(field).unwrap()], layout::FieldPlacement::Array { stride, .. } => { - let len = base.len(); - assert!(field < len, - "Tried to access element {} of array/slice with length {}", field, len); + let len = base.len(self)?; + assert!(field < len, "Tried to access element {} of array/slice with length {}", + field, len); stride * field } layout::FieldPlacement::Union(count) => { @@ -302,29 +278,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // above). In that case, all fields are equal. let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; - // Adjust offset - let offset = match base.extra { - PlaceExtra::Vtable(vtable) => { - let (_, align) = self.read_size_and_align_from_vtable(vtable)?; - // FIXME: Is this right? Should we always do this, or only when actually - // accessing the field to which the vtable applies? - offset.abi_align(align) - } - _ => { - // No adjustment needed - offset - } - }; + // Offset may need adjustment for unsized fields + let (extra, offset) = if field_layout.is_unsized() { + // re-use parent metadata to determine dynamic field layout + let (_, align) = self.size_and_align_of(base.extra, field_layout)?; + (base.extra, offset.abi_align(align)) - let ptr = base.ptr.ptr_offset(offset, self)?; - let align = base.align.min(field_layout.align); - let extra = if !field_layout.is_unsized() { - PlaceExtra::None } else { - assert!(base.extra != PlaceExtra::None, "Expected fat ptr"); - base.extra + // base.extra could be present; we might be accessing a sized field of an unsized + // struct. + (None, offset) }; + let ptr = base.ptr.ptr_offset(offset, self)?; + let align = base.align.min(field_layout.align); // only use static information + Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout }) } @@ -334,7 +302,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, base: MPlaceTy<'tcx>, ) -> EvalResult<'tcx, impl Iterator>> + 'a> { - let len = base.len(); + let len = base.len(self)?; // also asserts that we have a type where this makes sense let stride = match base.layout.fields { layout::FieldPlacement::Array { stride, .. } => stride, _ => bug!("mplace_array_fields: expected an array layout"), @@ -344,7 +312,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok((0..len).map(move |i| { let ptr = base.ptr.ptr_offset(i * stride, dl)?; Ok(MPlaceTy { - mplace: MemPlace { ptr, align: base.align, extra: PlaceExtra::None }, + mplace: MemPlace { ptr, align: base.align, extra: None }, layout }) })) @@ -356,7 +324,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { from: u64, to: u64, ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { - let len = base.len(); + let len = base.len(self)?; // also asserts that we have a type where this makes sense assert!(from <= len - to); // Not using layout method because that works with usize, and does not work with slices @@ -371,10 +339,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // Compute extra and new layout let inner_len = len - to - from; let (extra, ty) = match base.layout.ty.sty { + // It is not nice to match on the type, but that seems to be the only way to + // implement this. ty::Array(inner, _) => - (PlaceExtra::None, self.tcx.mk_array(inner, inner_len)), - ty::Slice(..) => - (PlaceExtra::Length(inner_len), base.layout.ty), + (None, self.tcx.mk_array(inner, inner_len)), + ty::Slice(..) => { + let len = Scalar::Bits { + bits: inner_len.into(), + size: self.memory.pointer_size().bytes() as u8 + }; + (Some(len), base.layout.ty) + } _ => bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), }; @@ -392,7 +367,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { variant: usize, ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { // Downcasts only change the layout - assert_eq!(base.extra, PlaceExtra::None); + assert_eq!(base.extra, None); Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) } @@ -421,7 +396,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { min_length, from_end, } => { - let n = base.len(); + let n = base.len(self)?; assert!(n >= min_length as u64); let index = if from_end { @@ -487,33 +462,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }) } - /// Compute a place. You should only use this if you intend to write into this - /// place; for reading, a more efficient alternative is `eval_place_for_read`. - pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> { + /// Evaluate statics and promoteds to an `MPlace`. Used to share some code between + /// `eval_place` and `eval_place_to_op`. + pub(super) fn eval_place_to_mplace( + &self, + mir_place: &mir::Place<'tcx> + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { use rustc::mir::Place::*; - let place = match *mir_place { - Local(mir::RETURN_PLACE) => PlaceTy { - place: self.frame().return_place, - layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?, - }, - Local(local) => PlaceTy { - place: Place::Local { - frame: self.cur_frame(), - local, - }, - layout: self.layout_of_local(self.cur_frame(), local)?, - }, - + Ok(match *mir_place { Promoted(ref promoted) => { let instance = self.frame().instance; let op = self.global_to_op(GlobalId { instance, promoted: Some(promoted.0), })?; - let mplace = op.to_mem_place(); + let mplace = op.to_mem_place(); // these are always in memory let ty = self.monomorphize(promoted.1, self.substs()); - PlaceTy { - place: Place::Ptr(mplace), + MPlaceTy { + mplace, layout: self.layout_of(ty)?, } } @@ -526,18 +492,51 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { instance, promoted: None }; - let alloc = Machine::init_static(self, cid)?; - MPlaceTy::from_aligned_ptr(alloc.into(), layout).into() + // Just create a lazy reference, so we can support recursive statics. + // tcx takes are of assigning every static one and only one unique AllocId. + // When the data here is ever actually used, memory will notice, + // and it knows how to deal with alloc_id that are present in the + // global table but not in its local memory: It calls back into tcx through + // a query, triggering the CTFE machinery to actually turn this lazy reference + // into a bunch of bytes. IOW, statics are evaluated with CTFE even when + // this EvalContext uses another Machine (e.g., in miri). This is what we + // want! This way, computing statics works concistently between codegen + // and miri: They use the same query to eventually obtain a `ty::Const` + // and use that for further computation. + let alloc = self.tcx.alloc_map.lock().intern_static(cid.instance.def_id()); + MPlaceTy::from_aligned_ptr(alloc.into(), layout) } + _ => bug!("eval_place_to_mplace called on {:?}", mir_place), + }) + } + + /// Compute a place. You should only use this if you intend to write into this + /// place; for reading, a more efficient alternative is `eval_place_for_read`. + pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> { + use rustc::mir::Place::*; + let place = match *mir_place { + Local(mir::RETURN_PLACE) => PlaceTy { + place: self.frame().return_place, + layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?, + }, + Local(local) => PlaceTy { + place: Place::Local { + frame: self.cur_frame(), + local, + }, + layout: self.layout_of_local(self.cur_frame(), local)?, + }, + Projection(ref proj) => { let place = self.eval_place(&proj.base)?; self.place_projection(place, &proj.elem)? } + + _ => self.eval_place_to_mplace(mir_place)?.into(), }; self.dump_place(place.place); - Ok(place) } @@ -656,20 +655,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { let mplace = match place.place { Place::Local { frame, local } => { - // FIXME: Consider not doing anything for a ZST, and just returning - // a fake pointer? - - // We need the layout of the local. We can NOT use the layout we got, - // that might e.g. be a downcast variant! - let local_layout = self.layout_of_local(frame, local)?; - // Make sure it has a place - let rval = *self.stack[frame].locals[local].access()?; - let mplace = self.allocate_op(OpTy { op: rval, layout: local_layout })?.mplace; - // This might have allocated the flag - *self.stack[frame].locals[local].access_mut()? = - Operand::Indirect(mplace); - // done - mplace + match *self.stack[frame].locals[local].access()? { + Operand::Indirect(mplace) => mplace, + Operand::Immediate(value) => { + // We need to make an allocation. + // FIXME: Consider not doing anything for a ZST, and just returning + // a fake pointer? Are we even called for ZST? + + // We need the layout of the local. We can NOT use the layout we got, + // that might e.g. be an inner field of a struct with `Scalar` layout, + // that has different alignment than the outer field. + let local_layout = self.layout_of_local(frame, local)?; + let ptr = self.allocate(local_layout, MemoryKind::Stack)?; + self.write_value_to_mplace(value, ptr)?; + let mplace = ptr.mplace; + // Update the local + *self.stack[frame].locals[local].access_mut()? = + Operand::Indirect(mplace); + mplace + } + } } Place::Ptr(mplace) => mplace }; @@ -687,38 +692,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) } - /// Make a place for an operand, allocating if needed - pub fn allocate_op( - &mut self, - OpTy { op, layout }: OpTy<'tcx>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { - Ok(match op { - Operand::Indirect(mplace) => MPlaceTy { mplace, layout }, - Operand::Immediate(value) => { - // FIXME: Is stack always right here? - let ptr = self.allocate(layout, MemoryKind::Stack)?; - self.write_value_to_mplace(value, ptr)?; - ptr - }, - }) - } - - pub fn write_discriminant_value( + pub fn write_discriminant_index( &mut self, variant_index: usize, dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { match dest.layout.variants { layout::Variants::Single { index } => { - if index != variant_index { - // If the layout of an enum is `Single`, all - // other variants are necessarily uninhabited. - assert_eq!(dest.layout.for_variant(&self, variant_index).abi, - layout::Abi::Uninhabited); - } + assert_eq!(index, variant_index); } layout::Variants::Tagged { ref tag, .. } => { - let discr_val = dest.layout.ty.ty_adt_def().unwrap() + let adt_def = dest.layout.ty.ty_adt_def().unwrap(); + assert!(variant_index < adt_def.variants.len()); + let discr_val = adt_def .discriminant_for_variant(*self.tcx, variant_index) .val; @@ -741,6 +727,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { niche_start, .. } => { + assert!(variant_index < dest.layout.ty.ty_adt_def().unwrap().variants.len()); if variant_index != dataful_variant { let niche_dest = self.place_field(dest, 0)?; @@ -770,43 +757,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(OpTy { op, layout: place.layout }) } - /// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout) - /// or a slice into the specific fixed-size place and layout that is given by the vtable/len. - /// This "unpacks" the existential quantifier, so to speak. - pub fn unpack_unsized_mplace( - &self, - mplace: MPlaceTy<'tcx> - ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { - trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty); - let layout = match mplace.extra { - PlaceExtra::Vtable(vtable) => { - // the drop function signature - let drop_instance = self.read_drop_type_from_vtable(vtable)?; - trace!("Found drop fn: {:?}", drop_instance); - let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx); - let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); - // the drop function takes *mut T where T is the type being dropped, so get that - let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty; - let layout = self.layout_of(ty)?; - // Sanity checks - let (size, align) = self.read_size_and_align_from_vtable(vtable)?; - assert_eq!(size, layout.size); - assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved - // FIXME: More checks for the vtable? We could make sure it is exactly - // the one one would expect for this type. - // Done! - layout - }, - PlaceExtra::Length(len) => { - let ty = self.tcx.mk_array(mplace.layout.field(self, 0)?.ty, len); - self.layout_of(ty)? - } - PlaceExtra::None => bug!("Expected a fat pointer"), - }; - trace!("Unpacked type: {:?}", layout.ty); - Ok(MPlaceTy { - mplace: MemPlace { extra: PlaceExtra::None, ..*mplace }, + /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. + /// Also return some more information so drop doesn't have to run the same code twice. + pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>) + -> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx>)> { + let vtable = mplace.vtable()?; // also sanity checks the type + let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; + let layout = self.layout_of(ty)?; + + // More sanity checks + let (size, align) = self.read_size_and_align_from_vtable(vtable)?; + assert_eq!(size, layout.size); + assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved + // FIXME: More checks for the vtable? We could make sure it is exactly + // the one one would expect for this type. + + let mplace = MPlaceTy { + mplace: MemPlace { extra: None, ..*mplace }, layout - }) + }; + Ok((instance, mplace)) } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 0271a09482cd2..933f06d3d10a0 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -76,8 +76,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.loop_detector.observe_and_analyze(&self.machine, &self.stack, &self.memory) } + pub fn run(&mut self) -> EvalResult<'tcx> { + while self.step()? {} + Ok(()) + } + /// Returns true as long as there are more things to do. - pub fn step(&mut self) -> EvalResult<'tcx, bool> { + fn step(&mut self) -> EvalResult<'tcx, bool> { if self.stack.is_empty() { return Ok(false); } @@ -122,7 +127,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { variant_index, } => { let dest = self.eval_place(place)?; - self.write_discriminant_value(variant_index, dest)?; + self.write_discriminant_index(variant_index, dest)?; } // Mark locals as alive @@ -147,10 +152,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { M::validation_op(self, op, operand)?; } } - EndRegion(ce) => { - M::end_region(self, Some(ce))?; - } + EndRegion(..) => {} UserAssertTy(..) => {} // Defined to do nothing. These are added by optimization passes, to avoid changing the @@ -185,9 +188,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { BinaryOp(bin_op, ref left, ref right) => { let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None }; - let left = self.eval_operand_and_read_value(left, layout)?; + let left = self.read_value(self.eval_operand(left, layout)?)?; let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; - let right = self.eval_operand_and_read_value(right, layout)?; + let right = self.read_value(self.eval_operand(right, layout)?)?; self.binop_ignore_overflow( bin_op, left, @@ -198,9 +201,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { CheckedBinaryOp(bin_op, ref left, ref right) => { // Due to the extra boolean in the result, we can never reuse the `dest.layout`. - let left = self.eval_operand_and_read_value(left, None)?; + let left = self.read_value(self.eval_operand(left, None)?)?; let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; - let right = self.eval_operand_and_read_value(right, layout)?; + let right = self.read_value(self.eval_operand(right, layout)?)?; self.binop_with_overflow( bin_op, left, @@ -211,7 +214,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { UnaryOp(un_op, ref operand) => { // The operand always has the same type as the result. - let val = self.eval_operand_and_read_value(operand, Some(dest.layout))?; + let val = self.read_value(self.eval_operand(operand, Some(dest.layout))?)?; let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?; self.write_scalar(val, dest)?; } @@ -219,7 +222,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - self.write_discriminant_value(variant_index, dest)?; + self.write_discriminant_index(variant_index, dest)?; if adt_def.is_enum() { (self.place_downcast(dest, variant_index)?, active_field_index) } else { @@ -243,7 +246,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Repeat(ref operand, _) => { let op = self.eval_operand(operand, None)?; let dest = self.force_allocation(dest)?; - let length = dest.len(); + let length = dest.len(&self)?; if length > 0 { // write the first @@ -265,7 +268,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // FIXME(CTFE): don't allow computing the length of arrays in const eval let src = self.eval_place(place)?; let mplace = self.force_allocation(src)?; - let len = mplace.len(); + let len = mplace.len(&self)?; let size = self.memory.pointer_size().bytes() as u8; self.write_scalar( Scalar::Bits { @@ -278,7 +281,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ref(_, _, ref place) => { let src = self.eval_place(place)?; - let val = self.force_allocation(src)?.to_ref(&self); + let val = self.force_allocation(src)?.to_ref(); self.write_value(val, dest)?; } @@ -309,7 +312,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Discriminant(ref place) => { let place = self.eval_place(place)?; - let discr_val = self.read_discriminant_value(self.place_to_op(place)?)?; + let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0; let size = dest.layout.size.bytes() as u8; self.write_scalar(Scalar::Bits { bits: discr_val, @@ -327,8 +330,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { debug!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; self.memory.tcx.span = terminator.source_info.span; + + let old_stack = self.cur_frame(); + let old_bb = self.frame().block; self.eval_terminator(terminator)?; if !self.stack.is_empty() { + // This should change *something* + debug_assert!(self.cur_frame() != old_stack || self.frame().block != old_bb); debug!("// {:?}", self.frame().block); } Ok(()) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator.rs similarity index 56% rename from src/librustc_mir/interpret/terminator/mod.rs rename to src/librustc_mir/interpret/terminator.rs index aec7bb0c0d606..11826e0ce0c25 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::borrow::Cow; + use rustc::mir; use rustc::ty::{self, Ty}; use rustc::ty::layout::LayoutOf; @@ -15,16 +17,20 @@ use syntax::source_map::Span; use rustc_target::spec::abi::Abi; use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, Machine, Value, OpTy, PlaceTy, ValTy, Operand}; - -use rustc_data_structures::indexed_vec::Idx; - -mod drop; +use super::{ + EvalContext, Machine, Value, OpTy, Place, PlaceTy, ValTy, Operand, StackPopCleanup +}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub fn goto_block(&mut self, target: mir::BasicBlock) { - self.frame_mut().block = target; - self.frame_mut().stmt = 0; + #[inline] + pub fn goto_block(&mut self, target: Option) -> EvalResult<'tcx> { + if let Some(target) = target { + self.frame_mut().block = target; + self.frame_mut().stmt = 0; + Ok(()) + } else { + err!(Unreachable) + } } pub(super) fn eval_terminator( @@ -38,7 +44,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.pop_stack_frame()? } - Goto { target } => self.goto_block(target), + Goto { target } => self.goto_block(Some(target))?, SwitchInt { ref discr, @@ -46,8 +52,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ref targets, .. } => { - let discr_val = self.eval_operand(discr, None)?; - let discr = self.read_value(discr_val)?; + let discr = self.read_value(self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. @@ -69,7 +74,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - self.goto_block(target_block); + self.goto_block(Some(target_block))?; } Call { @@ -78,9 +83,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ref destination, .. } => { - let destination = match *destination { - Some((ref lv, target)) => Some((self.eval_place(lv)?, target)), - None => None, + let (dest, ret) = match *destination { + Some((ref lv, target)) => (Some(self.eval_place(lv)?), Some(target)), + None => (None, None), }; let func = self.eval_operand(func, None)?; @@ -91,43 +96,44 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::FnDef(..) => { - let real_sig = instance_ty.fn_sig(*self.tcx); let sig = self.tcx.normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), + self.param_env, &sig, ); + let real_sig = instance_ty.fn_sig(*self.tcx); let real_sig = self.tcx.normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), + self.param_env, &real_sig, ); if !self.check_sig_compat(sig, real_sig)? { return err!(FunctionPointerTyMismatch(real_sig, sig)); } + (instance, sig) } - ref other => bug!("instance def ty: {:?}", other), + _ => bug!("unexpected fn ptr to ty: {:?}", instance_ty), } - (instance, sig) } - ty::FnDef(def_id, substs) => ( - self.resolve(def_id, substs)?, - func.layout.ty.fn_sig(*self.tcx), - ), + ty::FnDef(def_id, substs) => { + let sig = func.layout.ty.fn_sig(*self.tcx); + let sig = self.tcx.normalize_erasing_late_bound_regions( + self.param_env, + &sig, + ); + (self.resolve(def_id, substs)?, sig) + }, _ => { let msg = format!("can't handle callee of type {:?}", func.layout.ty); return err!(Unimplemented(msg)); } }; let args = self.eval_operands(args)?; - let sig = self.tcx.normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), - &sig, - ); self.eval_fn_call( fn_def, - destination, &args[..], + dest, + ret, terminator.source_info.span, - sig, + Some(sig), )?; } @@ -157,19 +163,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { target, .. } => { - let cond_val = self.eval_operand_and_read_value(cond, None)? - .to_scalar()? - .to_bool()?; + let cond_val = self.read_value(self.eval_operand(cond, None)?)? + .to_scalar()?.to_bool()?; if expected == cond_val { - self.goto_block(target); + self.goto_block(Some(target))?; } else { use rustc::mir::interpret::EvalErrorKind::*; return match *msg { BoundsCheck { ref len, ref index } => { - let len = self.eval_operand_and_read_value(len, None) + let len = self.read_value(self.eval_operand(len, None)?) .expect("can't eval len").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; - let index = self.eval_operand_and_read_value(index, None) + let index = self.read_value(self.eval_operand(index, None)?) .expect("can't eval index").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) @@ -270,138 +275,146 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { return Ok(false); } + /// Call this function -- pushing the stack frame and initializing the arguments. + /// `sig` is optional in case of FnPtr/FnDef -- but mandatory for closures! fn eval_fn_call( &mut self, instance: ty::Instance<'tcx>, - destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, args: &[OpTy<'tcx>], + dest: Option>, + ret: Option, span: Span, - sig: ty::FnSig<'tcx>, + sig: Option>, ) -> EvalResult<'tcx> { trace!("eval_fn_call: {:#?}", instance); - if let Some((place, _)) = destination { - assert_eq!(place.layout.ty, sig.output()); - } + match instance.def { ty::InstanceDef::Intrinsic(..) => { - let (ret, target) = match destination { + // The intrinsic itself cannot diverge, so if we got here without a return + // place... (can happen e.g. for transmute returning `!`) + let dest = match dest { Some(dest) => dest, - _ => return err!(Unreachable), + None => return err!(Unreachable) }; - M::call_intrinsic(self, instance, args, ret, target)?; - self.dump_place(*ret); - Ok(()) - } - // FIXME: figure out why we can't just go through the shim - ty::InstanceDef::ClosureOnceShim { .. } => { - if M::eval_fn_call(self, instance, destination, args, span)? { - return Ok(()); - } - let mut arg_locals = self.frame().mir.args_iter(); - match sig.abi { - // closure as closure once - Abi::RustCall => { - for (arg_local, &op) in arg_locals.zip(args) { - let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.copy_op(op, dest)?; - } - } - // non capture closure as fn ptr - // need to inject zst ptr for closure object (aka do nothing) - // and need to pack arguments - Abi::Rust => { - trace!( - "args: {:#?}", - self.frame().mir.args_iter().zip(args.iter()) - .map(|(local, arg)| (local, **arg, arg.layout.ty)) - .collect::>() - ); - let local = arg_locals.nth(1).unwrap(); - for (i, &op) in args.into_iter().enumerate() { - let dest = self.eval_place(&mir::Place::Local(local).field( - mir::Field::new(i), - op.layout.ty, - ))?; - self.copy_op(op, dest)?; - } - } - _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), - } + M::call_intrinsic(self, instance, args, dest)?; + // No stack frame gets pushed, the main loop will just act as if the + // call completed. + self.goto_block(ret)?; + self.dump_place(*dest); Ok(()) } + ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::DropGlue(..) | ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { - // Push the stack frame, and potentially be entirely done if the call got hooked - if M::eval_fn_call(self, instance, destination, args, span)? { - // FIXME: Can we make it return the frame to push, instead - // of the hook doing half of the work and us doing the argument - // initialization? - return Ok(()); - } + let mir = match M::find_fn(self, instance, args, dest, ret)? { + Some(mir) => mir, + None => return Ok(()), + }; + + let return_place = match dest { + Some(place) => *place, + None => Place::null(&self), + }; + self.push_stack_frame( + instance, + span, + mir, + return_place, + StackPopCleanup::Goto(ret), + )?; - // Pass the arguments - let mut arg_locals = self.frame().mir.args_iter(); - trace!("ABI: {:?}", sig.abi); + // If we didn't get a signture, ask `fn_sig` + let sig = sig.unwrap_or_else(|| { + let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx); + self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig) + }); + assert_eq!(sig.inputs().len(), args.len()); + // We can't test the types, as it is fine if the types are ABI-compatible but + // not equal. + + // Figure out how to pass which arguments. + // FIXME: Somehow this is horribly full of special cases here, and codegen has + // none of that. What is going on? trace!( - "args: {:#?}", - self.frame().mir.args_iter().zip(args.iter()) - .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::>() + "ABI: {:?}, args: {:#?}", + sig.abi, + args.iter() + .map(|arg| (arg.layout.ty, format!("{:?}", **arg))) + .collect::>() + ); + trace!( + "spread_arg: {:?}, locals: {:#?}", + mir.spread_arg, + mir.args_iter() + .map(|local| + (local, self.layout_of_local(self.cur_frame(), local).unwrap().ty) + ) + .collect::>() ); - match sig.abi { - Abi::RustCall => { - assert_eq!(args.len(), 2); - - { - // write first argument - let first_local = arg_locals.next().unwrap(); - let dest = self.eval_place(&mir::Place::Local(first_local))?; - self.copy_op(args[0], dest)?; - } - // unpack and write all other args - let layout = args[1].layout; - if let ty::Tuple(_) = layout.ty.sty { - if layout.is_zst() { - // Nothing to do, no need to unpack zsts - return Ok(()); - } - if self.frame().mir.args_iter().count() == layout.fields.count() + 1 { - for (i, arg_local) in arg_locals.enumerate() { - let arg = self.operand_field(args[1], i as u64)?; - let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.copy_op(arg, dest)?; - } - } else { - trace!("manual impl of rust-call ABI"); - // called a manual impl of a rust-call function - let dest = self.eval_place( - &mir::Place::Local(arg_locals.next().unwrap()), - )?; - self.copy_op(args[1], dest)?; - } - } else { - bug!( - "rust-call ABI tuple argument was {:#?}", - layout - ); - } + // We have two iterators: Where the arguments come from, + // and where they go to. + + // For where they come from: If the ABI is RustCall, we untuple the + // last incoming argument. These do not have the same type, + // so to keep the code paths uniform we accept an allocation + // (for RustCall ABI only). + let args_effective : Cow<[OpTy<'tcx>]> = + if sig.abi == Abi::RustCall && !args.is_empty() { + // Untuple + let (&untuple_arg, args) = args.split_last().unwrap(); + trace!("eval_fn_call: Will pass last argument by untupling"); + Cow::from(args.iter().map(|&a| Ok(a)) + .chain((0..untuple_arg.layout.fields.count()).into_iter() + .map(|i| self.operand_field(untuple_arg, i as u64)) + ) + .collect::>>>()?) + } else { + // Plain arg passing + Cow::from(args) + }; + + // Now we have to spread them out across the callee's locals, + // taking into account the `spread_arg`. + let mut args_iter = args_effective.iter(); + let mut local_iter = mir.args_iter(); + // HACK: ClosureOnceShim calls something that expects a ZST as + // first argument, but the callers do not actually pass that ZST. + // Codegen doesn't care because ZST arguments do not even exist there. + match instance.def { + ty::InstanceDef::ClosureOnceShim { .. } if sig.abi == Abi::Rust => { + let local = local_iter.next().unwrap(); + let dest = self.eval_place(&mir::Place::Local(local))?; + assert!(dest.layout.is_zst()); } - _ => { - for (arg_local, &op) in arg_locals.zip(args) { - let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.copy_op(op, dest)?; + _ => {} + } + // Now back to norml argument passing. + while let Some(local) = local_iter.next() { + let dest = self.eval_place(&mir::Place::Local(local))?; + if Some(local) == mir.spread_arg { + // Must be a tuple + for i in 0..dest.layout.fields.count() { + let dest = self.place_field(dest, i as u64)?; + self.copy_op(*args_iter.next().unwrap(), dest)?; } + } else { + // Normal argument + self.copy_op(*args_iter.next().unwrap(), dest)?; } } + // Now we should be done + assert!(args_iter.next().is_none()); Ok(()) } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.memory.pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; - let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?; + let ptr = self.ref_to_mplace(self.read_value(args[0])?)?; + let vtable = ptr.vtable()?; let fn_ptr = self.memory.read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align @@ -415,11 +428,50 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty; let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee); args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?; - args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // strip vtable + args[0].op = Operand::Immediate(Value::Scalar(ptr.ptr.into())); // strip vtable trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function - self.eval_fn_call(instance, destination, &args, span, sig) + self.eval_fn_call(instance, &args, dest, ret, span, sig) } } } + + fn drop_in_place( + &mut self, + place: PlaceTy<'tcx>, + instance: ty::Instance<'tcx>, + span: Span, + target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); + // We take the address of the object. This may well be unaligned, which is fine + // for us here. However, unaligned accesses will probably make the actual drop + // implementation fail -- a problem shared by rustc. + let place = self.force_allocation(place)?; + + let (instance, place) = match place.layout.ty.sty { + ty::Dynamic(..) => { + // Dropping a trait object. + self.unpack_dyn_trait(place)? + } + _ => (instance, place), + }; + + let arg = OpTy { + op: Operand::Immediate(place.to_ref()), + layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, + }; + + let ty = self.tcx.mk_nil(); // return type is () + let dest = PlaceTy::null(&self, self.layout_of(ty)?); + + self.eval_fn_call( + instance, + &[arg], + Some(dest), + Some(target), + span, + None, + ) + } } diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs deleted file mode 100644 index 8e413aa8284fb..0000000000000 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::mir::BasicBlock; -use rustc::ty::{self, layout::LayoutOf}; -use syntax::source_map::Span; - -use rustc::mir::interpret::EvalResult; -use interpret::{Machine, EvalContext, PlaceTy, PlaceExtra, OpTy, Operand}; - -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub(crate) fn drop_in_place( - &mut self, - place: PlaceTy<'tcx>, - instance: ty::Instance<'tcx>, - span: Span, - target: BasicBlock, - ) -> EvalResult<'tcx> { - trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); - // We take the address of the object. This may well be unaligned, which is fine for us - // here. However, unaligned accesses will probably make the actual drop implementation fail - // -- a problem shared by rustc. - let place = self.force_allocation(place)?; - - let (instance, place) = match place.layout.ty.sty { - ty::Dynamic(..) => { - // Dropping a trait object. - let vtable = match place.extra { - PlaceExtra::Vtable(vtable) => vtable, - _ => bug!("Expected vtable when dropping {:#?}", place), - }; - let place = self.unpack_unsized_mplace(place)?; - let instance = self.read_drop_type_from_vtable(vtable)?; - (instance, place) - } - _ => (instance, place), - }; - - let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx); - let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); - - let arg = OpTy { - op: Operand::Immediate(place.to_ref(&self)), - layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, - }; - - // This should always be (), but getting it from the sig seems - // easier than creating a layout of (). - let dest = PlaceTy::null(&self, self.layout_of(fn_sig.output())?); - - self.eval_fn_call( - instance, - Some((dest, target)), - &[arg], - span, - fn_sig, - ) - } -} diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 4ce0563749ac6..74567b4297491 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -68,7 +68,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - self.memory.mark_static_initialized( + self.memory.intern_static( vtable.alloc_id, Mutability::Immutable, )?; @@ -76,14 +76,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(vtable) } + /// Return the drop fn instance as well as the actual dynamic type pub fn read_drop_type_from_vtable( &self, vtable: Pointer, - ) -> EvalResult<'tcx, ty::Instance<'tcx>> { + ) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; - self.memory.get_fn(drop_fn) + let drop_instance = self.memory.get_fn(drop_fn)?; + trace!("Found drop fn: {:?}", drop_instance); + let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); + // the drop function takes *mut T where T is the type being dropped, so get that + let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty; + Ok((drop_instance, ty)) } pub fn read_size_and_align_from_vtable( diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index b0dfceb259746..d50fd6e13c106 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -19,7 +19,7 @@ use rustc::mir::interpret::{ }; use super::{ - MPlaceTy, Machine, EvalContext + OpTy, Machine, EvalContext }; macro_rules! validation_failure{ @@ -187,39 +187,39 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - /// This function checks the memory where `dest` points to. The place must be sized - /// (i.e., dest.extra == PlaceExtra::None). + /// This function checks the data at `op`. /// It will error if the bits at the destination do not match the ones described by the layout. /// The `path` may be pushed to, but the part that is present when the function /// starts must not be changed! - pub fn validate_mplace( + pub fn validate_operand( &self, - dest: MPlaceTy<'tcx>, + dest: OpTy<'tcx>, path: &mut Vec, - seen: &mut FxHashSet<(MPlaceTy<'tcx>)>, - todo: &mut Vec<(MPlaceTy<'tcx>, Vec)>, + seen: &mut FxHashSet<(OpTy<'tcx>)>, + todo: &mut Vec<(OpTy<'tcx>, Vec)>, ) -> EvalResult<'tcx> { - self.memory.dump_alloc(dest.to_ptr()?.alloc_id); - trace!("validate_mplace: {:?}, {:#?}", *dest, dest.layout); + trace!("validate_operand: {:?}, {:#?}", *dest, dest.layout); // Find the right variant. We have to handle this as a prelude, not via // proper recursion with the new inner layout, to be able to later nicely // print the field names of the enum field that is being accessed. let (variant, dest) = match dest.layout.variants { - layout::Variants::NicheFilling { niche: ref tag, .. } | - layout::Variants::Tagged { ref tag, .. } => { - let size = tag.value.size(self); - // we first read the tag value as scalar, to be able to validate it - let tag_mplace = self.mplace_field(dest, 0)?; - let tag_value = self.read_scalar(tag_mplace.into())?; - path.push(PathElem::Tag); - self.validate_scalar( - tag_value, size, tag, &path, tag_mplace.layout.ty - )?; - path.pop(); // remove the element again - // then we read it again to get the index, to continue - let variant = self.read_discriminant_as_variant_index(dest.into())?; - let inner_dest = self.mplace_downcast(dest, variant)?; + layout::Variants::NicheFilling { .. } | + layout::Variants::Tagged { .. } => { + let variant = match self.read_discriminant(dest) { + Ok(res) => res.1, + Err(err) => match err.kind { + EvalErrorKind::InvalidDiscriminant(val) => + return validation_failure!( + format!("invalid enum discriminant {}", val), path + ), + _ => + return validation_failure!( + format!("non-integer enum discriminant"), path + ), + } + }; + let inner_dest = self.operand_downcast(dest, variant)?; // Put the variant projection onto the path, as a field path.push(PathElem::Field(dest.layout.ty .ty_adt_def() @@ -229,6 +229,23 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { (variant, inner_dest) }, layout::Variants::Single { index } => { + // Pre-processing for trait objects: Treat them at their real type. + // (We do not do this for slices and strings: For slices it is not needed, + // `mplace_array_fields` does the right thing, and for strings there is no + // real type that would show the actual length.) + let dest = match dest.layout.ty.sty { + ty::Dynamic(..) => { + let dest = dest.to_mem_place(); // immediate trait objects are not a thing + match self.unpack_dyn_trait(dest) { + Ok(res) => res.1.into(), + Err(_) => + return validation_failure!( + "invalid vtable in fat pointer", path + ), + } + } + _ => dest + }; (index, dest) } }; @@ -251,7 +268,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // expectation. layout::Abi::Scalar(ref scalar_layout) => { let size = scalar_layout.value.size(self); - let value = self.read_value(dest.into())?; + let value = match self.read_value(dest) { + Ok(val) => val, + Err(err) => match err.kind { + EvalErrorKind::PointerOutOfBounds { .. } | + EvalErrorKind::ReadUndefBytes => + return validation_failure!( + "uninitialized or out-of-bounds memory", path + ), + _ => + return validation_failure!( + "unrepresentable data", path + ), + } + }; let scalar = value.to_scalar_or_undef(); self.validate_scalar(scalar, size, scalar_layout, &path, dest.layout.ty)?; if scalar_layout.value == Primitive::Pointer { @@ -260,18 +290,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id); if let Some(AllocType::Static(did)) = alloc_kind { // statics from other crates are already checked. - // extern statics should not be validated as they have no body. + // extern statics cannot be validated as they have no body. if !did.is_local() || self.tcx.is_foreign_item(did) { return Ok(()); } } if value.layout.ty.builtin_deref(false).is_some() { - trace!("Recursing below ptr {:#?}", value); - let ptr_place = self.ref_to_mplace(value)?; - // we have not encountered this pointer+layout - // combination before - if seen.insert(ptr_place) { - todo.push((ptr_place, path_clone_and_deref(path))); + let ptr_op = self.ref_to_mplace(value)?.into(); + // we have not encountered this pointer+layout combination + // before. + if seen.insert(ptr_op) { + trace!("Recursing below ptr {:#?}", *value); + todo.push((ptr_op, path_clone_and_deref(path))); } } } @@ -285,49 +315,98 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // The fields don't need to correspond to any bit pattern of the union's fields. // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389 }, - layout::FieldPlacement::Array { .. } => { - for (i, field) in self.mplace_array_fields(dest)?.enumerate() { - let field = field?; - path.push(PathElem::ArrayElem(i)); - self.validate_mplace(field, path, seen, todo)?; - path.truncate(path_len); + layout::FieldPlacement::Array { .. } if !dest.layout.is_zst() => { + let dest = dest.to_mem_place(); // non-ZST array/slice/str cannot be immediate + // Special handling for strings to verify UTF-8 + match dest.layout.ty.sty { + ty::Str => { + match self.read_str(dest) { + Ok(_) => {}, + Err(err) => match err.kind { + EvalErrorKind::PointerOutOfBounds { .. } | + EvalErrorKind::ReadUndefBytes => + // The error here looks slightly different than it does + // for slices, because we do not report the index into the + // str at which we are OOB. + return validation_failure!( + "uninitialized or out-of-bounds memory", path + ), + _ => + return validation_failure!( + "non-UTF-8 data in str", path + ), + } + } + } + _ => { + // This handles the unsized case correctly as well, as well as + // SIMD an all sorts of other array-like types. + for (i, field) in self.mplace_array_fields(dest)?.enumerate() { + let field = field?; + path.push(PathElem::ArrayElem(i)); + self.validate_operand(field.into(), path, seen, todo)?; + path.truncate(path_len); + } + } } }, + layout::FieldPlacement::Array { .. } => { + // An empty array. Nothing to do. + } layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - // Fat pointers need special treatment. + // Fat pointers are treated like pointers, not aggregates. if dest.layout.ty.builtin_deref(true).is_some() { // This is a fat pointer. - let ptr = match self.ref_to_mplace(self.read_value(dest.into())?) { + let ptr = match self.read_value(dest.into()) + .and_then(|val| self.ref_to_mplace(val)) + { Ok(ptr) => ptr, - Err(err) => match err.kind { - EvalErrorKind::ReadPointerAsBytes => - return validation_failure!( - "fat pointer length is not a valid integer", path - ), - EvalErrorKind::ReadBytesAsPointer => - return validation_failure!( - "fat pointer vtable is not a valid pointer", path - ), - _ => return Err(err), - } + Err(_) => + return validation_failure!( + "undefined location or metadata in fat pointer", path + ), }; - let unpacked_ptr = self.unpack_unsized_mplace(ptr)?; + // check metadata early, for better diagnostics + match self.tcx.struct_tail(ptr.layout.ty).sty { + ty::Dynamic(..) => { + match ptr.extra.unwrap().to_ptr() { + Ok(_) => {}, + Err(_) => + return validation_failure!( + "non-pointer vtable in fat pointer", path + ), + } + } + ty::Slice(..) | ty::Str => { + match ptr.extra.unwrap().to_usize(self) { + Ok(_) => {}, + Err(_) => + return validation_failure!( + "non-integer slice length in fat pointer", path + ), + } + } + _ => + bug!("Unexpected unsized type tail: {:?}", + self.tcx.struct_tail(ptr.layout.ty) + ), + } // for safe ptrs, recursively check it if !dest.layout.ty.is_unsafe_ptr() { - trace!("Recursing below fat ptr {:?} (unpacked: {:?})", ptr, unpacked_ptr); - if seen.insert(unpacked_ptr) { - todo.push((unpacked_ptr, path_clone_and_deref(path))); + let ptr = ptr.into(); + if seen.insert(ptr) { + trace!("Recursing below fat ptr {:?}", ptr); + todo.push((ptr, path_clone_and_deref(path))); } } } else { // Not a pointer, perform regular aggregate handling below for i in 0..offsets.len() { - let field = self.mplace_field(dest, i as u64)?; + let field = self.operand_field(dest, i as u64)?; path.push(self.aggregate_field_path_elem(dest.layout.ty, variant, i)); - self.validate_mplace(field, path, seen, todo)?; + self.validate_operand(field, path, seen, todo)?; path.truncate(path_len); } - // FIXME: For a TyStr, check that this is valid UTF-8. } } } diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index cfa84264b1f5a..f91ff3642cd10 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -83,6 +83,7 @@ pub mod transform; pub mod util; pub mod interpret; pub mod monomorphize; +pub mod const_eval; pub use hair::pattern::check_crate as matchck_crate; use rustc::ty::query::Providers; diff --git a/src/librustc_mir/monomorphize/collector.rs b/src/librustc_mir/monomorphize/collector.rs index f19c4532b47d8..52bbffa7519df 100644 --- a/src/librustc_mir/monomorphize/collector.rs +++ b/src/librustc_mir/monomorphize/collector.rs @@ -1271,7 +1271,7 @@ fn collect_const<'a, 'tcx>( ConstValue::ScalarPair(Scalar::Ptr(ptr), _) | ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output), - ConstValue::ByRef(alloc, _offset) => { + ConstValue::ByRef(_id, alloc, _offset) => { for &id in alloc.relocations.values() { collect_miri(tcx, id, output); } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index fa11e6f0719b1..1715930dbb61c 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -14,7 +14,7 @@ use rustc::hir::def::Def; use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; -use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind}; +use rustc::mir::{NullOp, UnOp, StatementKind, Statement, BasicBlock, LocalKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext}; use rustc::mir::interpret::{ @@ -170,7 +170,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { | DoubleFree | InvalidFunctionPointer | InvalidBool - | InvalidDiscriminant + | InvalidDiscriminant(..) | PointerOutOfBounds { .. } | InvalidNullPointerUsage | MemoryLockViolation { .. } @@ -257,10 +257,9 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { source_info: SourceInfo, ) -> Option> { self.ecx.tcx.span = source_info.span; - match self.ecx.const_value_to_op(c.literal.val) { + match self.ecx.const_to_op(c.literal) { Ok(op) => { - let layout = self.tcx.layout_of(self.param_env.and(c.literal.ty)).ok()?; - Some((OpTy { op, layout }, c.span)) + Some((op, c.span)) }, Err(error) => { let (stacktrace, span) = self.ecx.generate_stacktrace(None); @@ -382,6 +381,20 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { let (arg, _) = self.eval_operand(arg, source_info)?; let val = self.use_ecx(source_info, |this| { let prim = this.ecx.read_scalar(arg)?.not_undef()?; + match op { + UnOp::Neg => { + // Need to do overflow check here: For actual CTFE, MIR + // generation emits code that does this before calling the op. + let size = arg.layout.size; + if prim.to_bits(size)? == (1 << (size.bits() - 1)) { + return err!(OverflowNeg); + } + } + UnOp::Not => { + // Cannot overflow + } + } + // Now run the actual operation. this.ecx.unary_op(op, prim, arg.layout) })?; Some((OpTy::from_scalar_value(val, place_layout), span)) diff --git a/src/test/run-pass/ctfe/promotion.rs b/src/test/run-pass/ctfe/promotion.rs index 2d228408254aa..28b876c308b64 100644 --- a/src/test/run-pass/ctfe/promotion.rs +++ b/src/test/run-pass/ctfe/promotion.rs @@ -8,10 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// compile-flags: -O + fn foo(_: &'static [&'static str]) {} fn bar(_: &'static [&'static str; 3]) {} +fn baz_i32(_: &'static i32) {} +fn baz_u32(_: &'static u32) {} fn main() { foo(&["a", "b", "c"]); bar(&["d", "e", "f"]); + + // make sure that these do not cause trouble despite overflowing + baz_u32(&(0-1)); + baz_i32(&-std::i32::MIN); } diff --git a/src/test/ui/consts/const-eval/double_check2.stderr b/src/test/ui/consts/const-eval/double_check2.stderr index 739af12d09c69..21025877340ea 100644 --- a/src/test/ui/consts/const-eval/double_check2.stderr +++ b/src/test/ui/consts/const-eval/double_check2.stderr @@ -5,7 +5,7 @@ LL | / static FOO: (&Foo, &Bar) = unsafe {( //~ undefined behavior LL | | Union { usize: &BAR }.foo, LL | | Union { usize: &BAR }.bar, LL | | )}; - | |___^ type validation failed: encountered 5 at .1.., but expected something in the range 42..=99 + | |___^ type validation failed: encountered invalid enum discriminant 5 at .1. | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior diff --git a/src/test/ui/consts/const-eval/ub-enum.stderr b/src/test/ui/consts/const-eval/ub-enum.stderr index 98e9b598b543f..572d08ddfeebd 100644 --- a/src/test/ui/consts/const-eval/ub-enum.stderr +++ b/src/test/ui/consts/const-eval/ub-enum.stderr @@ -2,7 +2,7 @@ error[E0080]: this constant likely exhibits undefined behavior --> $DIR/ub-enum.rs:22:1 | LL | const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b }; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at ., but expected something in the range 0..=0 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-integer enum discriminant | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior @@ -10,7 +10,7 @@ error[E0080]: this constant likely exhibits undefined behavior --> $DIR/ub-enum.rs:35:1 | LL | const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b }; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 0 at ., but expected something in the range 2..=2 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered invalid enum discriminant 0 | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior diff --git a/src/test/ui/union-ub-fat-ptr.rs b/src/test/ui/union-ub-fat-ptr.rs index ffa824fc6af47..0c42d28eb00f4 100644 --- a/src/test/ui/union-ub-fat-ptr.rs +++ b/src/test/ui/union-ub-fat-ptr.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(unused)] + // normalize-stderr-test "alignment \d+" -> "alignment N" // normalize-stderr-test "offset \d+" -> "offset N" // normalize-stderr-test "allocation \d+" -> "allocation N" @@ -37,7 +39,8 @@ union SliceTransmute { bad: BadSliceRepr, slice: &'static [u8], str: &'static str, - my_str: &'static Str, + my_str: &'static MyStr, + my_slice: &'static MySliceBool, } #[repr(C)] @@ -71,7 +74,12 @@ union DynTransmute { trait Trait {} impl Trait for bool {} -struct Str(str); +// custom unsized type +struct MyStr(str); + +// custom unsized type with sized fields +struct MySlice(bool, T); +type MySliceBool = MySlice<[bool]>; // OK const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str}; @@ -81,8 +89,8 @@ const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } // bad str const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str}; //~^ ERROR this constant likely exhibits undefined behavior -// bad str in Str -const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; +// bad str in user-defined unsized type +const C2: &MyStr = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; //~^ ERROR this constant likely exhibits undefined behavior // OK @@ -107,10 +115,25 @@ const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 // bad data *inside* the trait object const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl }; //~^ ERROR this constant likely exhibits undefined behavior - // bad data *inside* the slice const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }]; //~^ ERROR this constant likely exhibits undefined behavior +// good MySliceBool +const I1: &MySliceBool = &MySlice(true, [false]); +// bad: sized field is not okay +const I2: &MySliceBool = &MySlice(unsafe { BoolTransmute { val: 3 }.bl }, [false]); +//~^ ERROR this constant likely exhibits undefined behavior +// bad: unsized part is not okay +const I3: &MySliceBool = &MySlice(true, [unsafe { BoolTransmute { val: 3 }.bl }]); +//~^ ERROR this constant likely exhibits undefined behavior + +// invalid UTF-8 +const J1: &str = unsafe { SliceTransmute { slice: &[0xFF] }.str }; +//~^ ERROR this constant likely exhibits undefined behavior +// invalid UTF-8 in user-defined str-like +const J2: &MyStr = unsafe { SliceTransmute { slice: &[0xFF] }.my_str }; +//~^ ERROR this constant likely exhibits undefined behavior + fn main() { } diff --git a/src/test/ui/union-ub-fat-ptr.stderr b/src/test/ui/union-ub-fat-ptr.stderr index cc22422304d68..5d817dce205b1 100644 --- a/src/test/ui/union-ub-fat-ptr.stderr +++ b/src/test/ui/union-ub-fat-ptr.stderr @@ -1,69 +1,69 @@ error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:79:1 + --> $DIR/union-ub-fat-ptr.rs:87:1 | LL | const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered uninitialized or out-of-bounds memory at . | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:82:1 + --> $DIR/union-ub-fat-ptr.rs:90:1 | LL | const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-integer slice length in fat pointer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:85:1 + --> $DIR/union-ub-fat-ptr.rs:93:1 | -LL | const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer +LL | const C2: &MyStr = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-integer slice length in fat pointer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:91:1 + --> $DIR/union-ub-fat-ptr.rs:99:1 | LL | const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered uninitialized or out-of-bounds memory at .[1] | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:94:1 + --> $DIR/union-ub-fat-ptr.rs:102:1 | LL | const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-integer slice length in fat pointer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:98:1 + --> $DIR/union-ub-fat-ptr.rs:106:1 | LL | const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ tried to access memory with alignment N, but alignment N is required + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered invalid vtable in fat pointer at . | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:101:1 + --> $DIR/union-ub-fat-ptr.rs:109:1 | LL | const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ a memory access tried to interpret some bytes as a pointer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered invalid vtable in fat pointer at . | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:104:1 + --> $DIR/union-ub-fat-ptr.rs:112:1 | LL | const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer vtable is not a valid pointer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-pointer vtable in fat pointer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:108:1 + --> $DIR/union-ub-fat-ptr.rs:116:1 | LL | const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl }; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at ., but expected something in the range 0..=1 @@ -71,13 +71,45 @@ LL | const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl }; = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:112:1 + --> $DIR/union-ub-fat-ptr.rs:119:1 | LL | const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }]; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .[0], but expected something in the range 0..=1 | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior -error: aborting due to 10 previous errors +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:125:1 + | +LL | const I2: &MySliceBool = &MySlice(unsafe { BoolTransmute { val: 3 }.bl }, [false]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at ..0, but expected something in the range 0..=1 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:128:1 + | +LL | const I3: &MySliceBool = &MySlice(true, [unsafe { BoolTransmute { val: 3 }.bl }]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at ..1[0], but expected something in the range 0..=1 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:132:1 + | +LL | const J1: &str = unsafe { SliceTransmute { slice: &[0xFF] }.str }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-UTF-8 data in str at . + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:135:1 + | +LL | const J2: &MyStr = unsafe { SliceTransmute { slice: &[0xFF] }.my_str }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-UTF-8 data in str at ..0 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error: aborting due to 14 previous errors For more information about this error, try `rustc --explain E0080`.