Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

allocators: don’t assume MIN_ALIGN for small sizes #46117

Merged
merged 3 commits into from
Nov 25, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions src/liballoc/tests/heap.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use alloc_system::System;
use std::heap::{Heap, Alloc, Layout};

/// https://github.com/rust-lang/rust/issues/45955
///
/// Note that `#[global_allocator]` is not used,
/// so `liballoc_jemalloc` is linked (on some platforms).
#[test]
fn alloc_system_overaligned_request() {
check_overalign_requests(System)
}

#[test]
fn std_heap_overaligned_request() {
check_overalign_requests(Heap)
}

fn check_overalign_requests<T: Alloc>(mut allocator: T) {
let size = 8;
let align = 16; // greater than size
let iterations = 100;
unsafe {
let pointers: Vec<_> = (0..iterations).map(|_| {
allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
}).collect();
for &ptr in &pointers {
assert_eq!((ptr as usize) % align, 0, "Got a pointer less aligned than requested")
}

// Clean up
for &ptr in &pointers {
allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
}
}
}
4 changes: 4 additions & 0 deletions src/liballoc/tests/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

#![deny(warnings)]

#![feature(allocator_api)]
#![feature(alloc_system)]
#![feature(attr_literals)]
#![feature(box_syntax)]
#![feature(inclusive_range_syntax)]
Expand All @@ -29,6 +31,7 @@
#![feature(unboxed_closures)]
#![feature(unicode)]

extern crate alloc_system;
extern crate std_unicode;
extern crate rand;

Expand All @@ -39,6 +42,7 @@ mod binary_heap;
mod btree;
mod cow_str;
mod fmt;
mod heap;
mod linked_list;
mod slice;
mod str;
Expand Down
25 changes: 12 additions & 13 deletions src/liballoc_jemalloc/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ mod contents {
const MALLOCX_ZERO: c_int = 0x40;

// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
// add fast paths for low alignment values.
#[cfg(all(any(target_arch = "arm",
target_arch = "mips",
target_arch = "powerpc")))]
Expand All @@ -92,8 +91,8 @@ mod contents {
a.trailing_zeros() as c_int
}

fn align_to_flags(align: usize) -> c_int {
if align <= MIN_ALIGN {
fn align_to_flags(align: usize, size: usize) -> c_int {
if align <= MIN_ALIGN && align <= size {
0
} else {
mallocx_align(align)
Expand All @@ -111,7 +110,7 @@ mod contents {
pub unsafe extern fn __rde_alloc(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let flags = align_to_flags(align);
let flags = align_to_flags(align, size);
let ptr = mallocx(size as size_t, flags) as *mut u8;
if ptr.is_null() {
let layout = Layout::from_size_align_unchecked(size, align);
Expand All @@ -132,7 +131,7 @@ mod contents {
pub unsafe extern fn __rde_dealloc(ptr: *mut u8,
size: usize,
align: usize) {
let flags = align_to_flags(align);
let flags = align_to_flags(align, size);
sdallocx(ptr as *mut c_void, size, flags);
}

Expand All @@ -142,7 +141,7 @@ mod contents {
min: *mut usize,
max: *mut usize) {
let layout = &*(layout as *const Layout);
let flags = align_to_flags(layout.align());
let flags = align_to_flags(layout.align(), layout.size());
let size = nallocx(layout.size(), flags) as usize;
*min = layout.size();
if size > 0 {
Expand All @@ -166,7 +165,7 @@ mod contents {
return 0 as *mut u8
}

let flags = align_to_flags(new_align);
let flags = align_to_flags(new_align, new_size);
let ptr = rallocx(ptr as *mut c_void, new_size, flags) as *mut u8;
if ptr.is_null() {
let layout = Layout::from_size_align_unchecked(new_size, new_align);
Expand All @@ -181,10 +180,10 @@ mod contents {
pub unsafe extern fn __rde_alloc_zeroed(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let ptr = if align <= MIN_ALIGN {
let ptr = if align <= MIN_ALIGN && align <= size {
calloc(size as size_t, 1) as *mut u8
} else {
let flags = align_to_flags(align) | MALLOCX_ZERO;
let flags = align_to_flags(align, size) | MALLOCX_ZERO;
mallocx(size as size_t, flags) as *mut u8
};
if ptr.is_null() {
Expand All @@ -203,7 +202,7 @@ mod contents {
err: *mut u8) -> *mut u8 {
let p = __rde_alloc(size, align, err);
if !p.is_null() {
let flags = align_to_flags(align);
let flags = align_to_flags(align, size);
*excess = nallocx(size, flags) as usize;
}
return p
Expand All @@ -220,7 +219,7 @@ mod contents {
err: *mut u8) -> *mut u8 {
let p = __rde_realloc(ptr, old_size, old_align, new_size, new_align, err);
if !p.is_null() {
let flags = align_to_flags(new_align);
let flags = align_to_flags(new_align, new_size);
*excess = nallocx(new_size, flags) as usize;
}
p
Expand All @@ -244,7 +243,7 @@ mod contents {
new_size: usize,
new_align: usize) -> u8 {
if old_align == new_align {
let flags = align_to_flags(new_align);
let flags = align_to_flags(new_align, new_size);
(xallocx(ptr as *mut c_void, new_size, 0, flags) == new_size) as u8
} else {
0
Expand Down
9 changes: 4 additions & 5 deletions src/liballoc_system/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@
#![rustc_alloc_kind = "lib"]

// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
// add fast paths for low alignment values.
#[cfg(all(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
Expand Down Expand Up @@ -132,7 +131,7 @@ mod platform {
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let ptr = if layout.align() <= MIN_ALIGN {
let ptr = if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
libc::malloc(layout.size()) as *mut u8
} else {
aligned_malloc(&layout)
Expand All @@ -148,7 +147,7 @@ mod platform {
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
if layout.align() <= MIN_ALIGN {
if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
let ptr = libc::calloc(layout.size(), 1) as *mut u8;
if !ptr.is_null() {
Ok(ptr)
Expand Down Expand Up @@ -180,7 +179,7 @@ mod platform {
})
}

if new_layout.align() <= MIN_ALIGN {
if new_layout.align() <= MIN_ALIGN && new_layout.align() <= new_layout.size(){
let ptr = libc::realloc(ptr as *mut libc::c_void, new_layout.size());
if !ptr.is_null() {
Ok(ptr as *mut u8)
Expand Down