From da85ffb62540899a268b2d7040995770e9714a5f Mon Sep 17 00:00:00 2001 From: Eduard Burtescu Date: Wed, 8 Jun 2016 00:35:01 +0300 Subject: [PATCH] trans: always use a memcpy for ABI argument/return casts. --- src/librustc_trans/abi.rs | 68 +++++++++++++++++++------ src/librustc_trans/base.rs | 94 ++++++++++++++++------------------- src/librustc_trans/callee.rs | 56 ++++----------------- src/librustc_trans/mir/mod.rs | 17 +++---- src/test/codegen/stores.rs | 16 ++++-- 5 files changed, 123 insertions(+), 128 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 9bbe0cb5f6957..df3d2d149b99c 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,8 +10,8 @@ use llvm::{self, ValueRef}; use base; -use builder::Builder; -use common::{type_is_fat_ptr, BlockAndBuilder}; +use build::AllocaFcx; +use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -22,7 +22,7 @@ use cabi_powerpc; use cabi_powerpc64; use cabi_mips; use cabi_asmjs; -use machine::{llalign_of_min, llsize_of, llsize_of_real}; +use machine::{llalign_of_min, llsize_of, llsize_of_real, llsize_of_store}; use type_::Type; use type_of; @@ -30,6 +30,7 @@ use rustc::hir; use rustc::ty::{self, Ty}; use libc::c_uint; +use std::cmp; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; @@ -150,26 +151,63 @@ impl ArgType { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, b: &Builder, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } + let ccx = bcx.ccx(); if self.is_indirect() { - let llsz = llsize_of(b.ccx, self.ty); - let llalign = llalign_of_min(b.ccx, self.ty); - base::call_memcpy(b, dst, val, llsz, llalign as u32); + let llsz = llsize_of(ccx, self.ty); + let llalign = llalign_of_min(ccx, self.ty); + base::call_memcpy(bcx, dst, val, llsz, llalign as u32); } else if let Some(ty) = self.cast { - let cast_dst = b.pointercast(dst, ty.ptr_to()); - let store = b.store(val, cast_dst); - let llalign = llalign_of_min(b.ccx, self.ty); - unsafe { - llvm::LLVMSetAlignment(store, llalign); + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + let cast_dst = bcx.pointercast(dst, ty.ptr_to()); + let store = bcx.store(val, cast_dst); + let llalign = llalign_of_min(ccx, self.ty); + unsafe { + llvm::LLVMSetAlignment(store, llalign); + } + } else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast"); + base::Lifetime::Start.call(bcx, llscratch); + + // ...where we first store the value... + bcx.store(val, llscratch); + + // ...and then memcpy it to the intended destination. + base::call_memcpy(bcx, + bcx.pointercast(dst, Type::i8p(ccx)), + bcx.pointercast(llscratch, Type::i8p(ccx)), + C_uint(ccx, llsize_of_store(ccx, self.ty)), + cmp::min(llalign_of_min(ccx, self.ty), + llalign_of_min(ccx, ty)) as u32); + + base::Lifetime::End.call(bcx, llscratch); } } else { - if self.original_ty == Type::i1(b.ccx) { - val = b.zext(val, Type::i8(b.ccx)); + if self.original_ty == Type::i1(ccx) { + val = bcx.zext(val, Type::i8(ccx)); } - b.store(val, dst); + bcx.store(val, dst); } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index c6d072c5399a4..d4f0786e9efd6 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -1042,7 +1042,7 @@ pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> next_cx } -enum Lifetime { Start, End } +pub enum Lifetime { Start, End } // If LLVM lifetime intrinsic support is enabled (i.e. optimizations // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` @@ -1079,24 +1079,25 @@ fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>, emit(ccx, size, lifetime_intrinsic) } -pub fn call_lifetime_start(cx: Block, ptr: ValueRef) { - core_lifetime_emit(cx.ccx(), ptr, Lifetime::Start, |ccx, size, lifetime_start| { - let ptr = PointerCast(cx, ptr, Type::i8p(ccx)); - Call(cx, - lifetime_start, - &[C_u64(ccx, size), ptr], - DebugLoc::None); - }) +impl Lifetime { + pub fn call(self, b: &Builder, ptr: ValueRef) { + core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| { + let ptr = b.pointercast(ptr, Type::i8p(ccx)); + b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None); + }); + } } -pub fn call_lifetime_end(cx: Block, ptr: ValueRef) { - core_lifetime_emit(cx.ccx(), ptr, Lifetime::End, |ccx, size, lifetime_end| { - let ptr = PointerCast(cx, ptr, Type::i8p(ccx)); - Call(cx, - lifetime_end, - &[C_u64(ccx, size), ptr], - DebugLoc::None); - }) +pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) { + if !bcx.unreachable.get() { + Lifetime::Start.call(&bcx.build(), ptr); + } +} + +pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) { + if !bcx.unreachable.get() { + Lifetime::End.call(&bcx.build(), ptr); + } } // Generates code for resumption of unwind at the end of a landing pad. @@ -1663,29 +1664,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { arg_ty, datum::Lvalue::new("FunctionContext::bind_args")) } else { - let lltmp = if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let lltemp = alloc_ty(bcx, arg_ty, ""); - let b = &bcx.build(); - // we pass fat pointers as two words, but we want to - // represent them internally as a pointer to two words, - // so make an alloca to store them in. - let meta = &self.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lltemp)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lltemp)); - lltemp - } else { - // otherwise, arg is passed by value, so store it into a temporary. - let llarg_ty = arg.cast.unwrap_or(arg.memory_ty(bcx.ccx())); - let lltemp = alloca(bcx, llarg_ty, ""); + unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", + uninit_reason, + arg_scope_id, |bcx, dst| { + debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty); let b = &bcx.build(); - arg.store_fn_arg(b, &mut llarg_idx, lltemp); - // And coerce the temporary into the type we expect. - b.pointercast(lltemp, arg.memory_ty(bcx.ccx()).ptr_to()) - }; - bcx.fcx.schedule_drop_mem(arg_scope_id, lltmp, arg_ty, None); - datum::Datum::new(lltmp, arg_ty, - datum::Lvalue::new("bind_args")) + if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { + let meta = &self.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); + meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); + } else { + arg.store_fn_arg(b, &mut llarg_idx, dst); + } + bcx + })) } } else { // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. @@ -1720,19 +1713,16 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { }; let pat = &hir_arg.pat; - bcx = match simple_name(pat) { - // The check for alloca is necessary because above for the immediate argument case - // we had to cast. At this point arg_datum is not an alloca anymore and thus - // breaks debuginfo if we allow this optimisation. - Some(name) - if unsafe { llvm::LLVMIsAAllocaInst(arg_datum.val) != ::std::ptr::null_mut() } => { - // Generate nicer LLVM for the common case of fn a pattern - // like `x: T` - set_value_name(arg_datum.val, &bcx.name(name)); - self.lllocals.borrow_mut().insert(pat.id, arg_datum); - bcx - }, - _ => _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) + bcx = if let Some(name) = simple_name(pat) { + // Generate nicer LLVM for the common case of fn a pattern + // like `x: T` + set_value_name(arg_datum.val, &bcx.name(name)); + self.lllocals.borrow_mut().insert(pat.id, arg_datum); + bcx + } else { + // General path. Copy out the values that are used in the + // pattern. + _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) }; debuginfo::create_argument_metadata(bcx, hir_arg); } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index c0c5ea818b2d7..01e0d45348256 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -34,8 +34,7 @@ use build::*; use cleanup; use cleanup::CleanupMethods; use closure; -use common::{self, Block, Result, CrateContext, FunctionContext}; -use common::{C_uint, C_undef}; +use common::{self, Block, Result, CrateContext, FunctionContext, C_undef}; use consts; use datum::*; use debuginfo::DebugLoc; @@ -44,7 +43,7 @@ use expr; use glue; use inline; use intrinsic; -use machine::{llalign_of_min, llsize_of_store}; +use machine::llalign_of_min; use meth; use monomorphize::{self, Instance}; use type_::Type; @@ -58,8 +57,6 @@ use syntax::codemap::DUMMY_SP; use syntax::errors; use syntax::ptr::P; -use std::cmp; - #[derive(Debug)] pub enum CalleeData { /// Constructor for enum variant/tuple-like-struct. @@ -708,49 +705,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); if !bcx.unreachable.get() { fn_ty.apply_attrs_callsite(llret); - } - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if !fn_ty.ret.is_ignore() && !fn_ty.ret.is_indirect() { - if let Some(llforeign_ret_ty) = fn_ty.ret.cast { - let llrust_ret_ty = fn_ty.ret.original_ty; - let llretslot = opt_llretslot.unwrap(); - - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast"); - base::call_lifetime_start(bcx, llscratch); - Store(bcx, llret, llscratch); - let llscratch_i8 = PointerCast(bcx, llscratch, Type::i8(ccx).ptr_to()); - let llretptr_i8 = PointerCast(bcx, llretslot, Type::i8(ccx).ptr_to()); - let llrust_size = llsize_of_store(ccx, llrust_ret_ty); - let llforeign_align = llalign_of_min(ccx, llforeign_ret_ty); - let llrust_align = llalign_of_min(ccx, llrust_ret_ty); - let llalign = cmp::min(llforeign_align, llrust_align); - debug!("llrust_size={}", llrust_size); - - if !bcx.unreachable.get() { - base::call_memcpy(&B(bcx), llretptr_i8, llscratch_i8, - C_uint(ccx, llrust_size), llalign as u32); + // If the function we just called does not use an outpointer, + // store the result into the rust outpointer. Cast the outpointer + // type to match because some ABIs will use a different type than + // the Rust type. e.g., a {u32,u32} struct could be returned as + // u64. + if !fn_ty.ret.is_indirect() { + if let Some(llretslot) = opt_llretslot { + fn_ty.ret.store(&bcx.build(), llret, llretslot); } - base::call_lifetime_end(bcx, llscratch); - } else if let Some(llretslot) = opt_llretslot { - base::store_ty(bcx, llret, llretslot, output.unwrap()); } } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index ffc14b4468b5b..b98e04e51c007 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -327,10 +327,10 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, llarg_idx += 1; llarg } else { + let lltemp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) + }); if common::type_is_fat_ptr(tcx, arg_ty) { - let lltemp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) - }); // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. @@ -338,17 +338,12 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, idx += 1; arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, lltemp)); meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, lltemp)); - lltemp } else { - // otherwise, arg is passed by value, so store it into a temporary. - let llarg_ty = arg.cast.unwrap_or(arg.memory_ty(bcx.ccx())); - let lltemp = bcx.with_block(|bcx| { - base::alloca(bcx, llarg_ty, &format!("arg{}", arg_index)) - }); + // otherwise, arg is passed by value, so make a + // temporary and store it there arg.store_fn_arg(bcx, &mut llarg_idx, lltemp); - // And coerce the temporary into the type we expect. - bcx.pointercast(lltemp, arg.memory_ty(bcx.ccx()).ptr_to()) } + lltemp }; bcx.with_block(|bcx| arg_scope.map(|scope| { // Is this a regular argument? diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 8c425507975ca..89bb5d93c74fa 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -26,8 +26,12 @@ pub struct Bytes { #[no_mangle] #[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 1 -// CHECK: [[VAR:%[0-9]+]] = bitcast i32* %{{.*}} to [4 x i8]* +// CHECK: %y = alloca [4 x i8] +// CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: store i32 %1, i32* [[TMP]] +// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8* +// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* +// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) *x = y; } @@ -37,7 +41,11 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { #[no_mangle] #[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 1 -// CHECK: [[VAR:%[0-9]+]] = bitcast i32* %{{.*}} to %Bytes* +// CHECK: %y = alloca %Bytes +// CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: store i32 %1, i32* [[TMP]] +// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8* +// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* +// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) *x = y; }