Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix Bytes when Vec pointer's LSB is set #346

Merged
merged 1 commit into from
Dec 17, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 82 additions & 45 deletions src/bytes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -731,20 +731,23 @@ impl From<Vec<u8>> for Bytes {
let slice = vec.into_boxed_slice();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The is_empty special case above can be removed now that the low bit is being tracked properly.

let len = slice.len();
let ptr = slice.as_ptr();

assert!(
ptr as usize & KIND_VEC == 0,
"Vec pointer should not have LSB set: {:p}",
ptr,
);
drop(Box::into_raw(slice));

let data = ptr as usize | KIND_VEC;
Bytes {
ptr,
len,
data: AtomicPtr::new(data as *mut _),
vtable: &SHARED_VTABLE,
if ptr as usize & 0x1 == 0 {
let data = ptr as usize | KIND_VEC;
Bytes {
ptr,
len,
data: AtomicPtr::new(data as *mut _),
vtable: &PROMOTABLE_EVEN_VTABLE,
}
} else {
Bytes {
ptr,
len,
data: AtomicPtr::new(ptr as *mut _),
vtable: &PROMOTABLE_ODD_VTABLE,
}
}
}
}
Expand Down Expand Up @@ -782,65 +785,101 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
// nothing to drop for &'static [u8]
}

// ===== impl SharedVtable =====
// ===== impl PromotableVtable =====

struct Shared {
// holds vec for drop, but otherwise doesnt access it
_vec: Vec<u8>,
ref_cnt: AtomicUsize,
}

static SHARED_VTABLE: Vtable = Vtable {
clone: shared_clone,
drop: shared_drop,
static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
clone: promotable_even_clone,
drop: promotable_even_drop,
};

const KIND_ARC: usize = 0b0;
const KIND_VEC: usize = 0b1;
const KIND_MASK: usize = 0b1;
static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
clone: promotable_odd_clone,
drop: promotable_odd_drop,
};

unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
let shared = data.load(Ordering::Acquire);
let kind = shared as usize & KIND_MASK;

if kind == KIND_ARC {
shallow_clone_arc(shared as _, ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
shallow_clone_vec(data, shared, ptr, len)
let buf = (shared as usize & !KIND_MASK) as *mut u8;
shallow_clone_vec(data, shared, buf, ptr, len)
}
}

unsafe fn shared_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
let shared = *data.get_mut();
let kind = shared as usize & KIND_MASK;


if kind == KIND_ARC {
release_shared(shared as *mut Shared);
} else {
debug_assert_eq!(kind, KIND_VEC);
let buf = (shared as usize & !KIND_MASK) as *mut u8;
drop(rebuild_vec(buf, ptr, len));
}
}

drop(rebuild_vec(shared, ptr, len));
unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
let shared = data.load(Ordering::Acquire);
let kind = shared as usize & KIND_MASK;

if kind == KIND_ARC {
shallow_clone_arc(shared as _, ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
}
}

unsafe fn rebuild_vec(shared: *const (), offset: *const u8, len: usize) -> Vec<u8> {
debug_assert!(
shared as usize & KIND_MASK == KIND_VEC,
"rebuild_vec should have beeen called with KIND_VEC",
);
debug_assert!(
shared as usize & !KIND_MASK != 0,
"rebuild_vec should be called with non-null pointer: {:p}",
shared,
);
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
let shared = *data.get_mut();
let kind = shared as usize & KIND_MASK;

if kind == KIND_ARC {
release_shared(shared as *mut Shared);
} else {
debug_assert_eq!(kind, KIND_VEC);

drop(rebuild_vec(shared as *mut u8, ptr, len));
}
}

let buf = (shared as usize & !KIND_MASK) as *mut u8;
unsafe fn rebuild_vec(buf: *mut u8, offset: *const u8, len: usize) -> Vec<u8> {
let cap = (offset as usize - buf as usize) + len;
Vec::from_raw_parts(buf, cap, cap)
}

// ===== impl SharedVtable =====

struct Shared {
// holds vec for drop, but otherwise doesnt access it
_vec: Vec<u8>,
ref_cnt: AtomicUsize,
}

static SHARED_VTABLE: Vtable = Vtable {
clone: shared_clone,
drop: shared_drop,
};

const KIND_ARC: usize = 0b0;
const KIND_VEC: usize = 0b1;
const KIND_MASK: usize = 0b1;

unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
let shared = data.load(Ordering::Acquire);
shallow_clone_arc(shared as _, ptr, len)
}

unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
let shared = *data.get_mut();
release_shared(shared as *mut Shared);
}

unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);

Expand All @@ -857,21 +896,19 @@ unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) ->
}

#[cold]
unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), offset: *const u8, len: usize) -> Bytes {
unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), buf: *mut u8, offset: *const u8, len: usize) -> Bytes {
// If the buffer is still tracked in a `Vec<u8>`. It is time to
// promote the vec to an `Arc`. This could potentially be called
// concurrently, so some care must be taken.

debug_assert_eq!(ptr as usize & KIND_MASK, KIND_VEC);

// First, allocate a new `Shared` instance containing the
// `Vec` fields. It's important to note that `ptr`, `len`,
// and `cap` cannot be mutated without having `&mut self`.
// This means that these fields will not be concurrently
// updated and since the buffer hasn't been promoted to an
// `Arc`, those three fields still are the components of the
// vector.
let vec = rebuild_vec(ptr as *const (), offset, len);
let vec = rebuild_vec(buf, offset, len);
let shared = Box::new(Shared {
_vec: vec,
// Initialize refcount to 2. One for this reference, and one
Expand Down
67 changes: 67 additions & 0 deletions tests/test_bytes_odd_alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
//! Test using `Bytes` with an allocator that hands out "odd" pointers for
//! vectors (pointers where the LSB is set).

use std::alloc::{GlobalAlloc, Layout, System};
use std::ptr;

use bytes::Bytes;

#[global_allocator]
static ODD: Odd = Odd;

struct Odd;

unsafe impl GlobalAlloc for Odd {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate slightly bigger so that we can offset the pointer by 1
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
let ptr = ptr.offset(1);
ptr
} else {
ptr
}
} else {
System.alloc(layout)
}
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(ptr.offset(-1), new_layout);
} else {
System.alloc(layout);
}
}
}

#[test]
fn sanity_check_odd_allocator() {
let vec = vec![33u8; 1024];
let p = vec.as_ptr() as usize;
assert!(p & 0x1 == 0x1, "{:#b}", p);
}

#[test]
fn test_bytes_from_vec_drop() {
let vec = vec![33u8; 1024];
let _b = Bytes::from(vec);
}

#[test]
fn test_bytes_clone_drop() {
let vec = vec![33u8; 1024];
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}