Skip to content

Commit

Permalink
android: binder: Move buffer out of area shared with user space
Browse files Browse the repository at this point in the history
Binder driver allocates buffer meta data in a region that is mapped
in user space. These meta data contain pointers in the kernel.

This patch allocates buffer meta data on the kernel heap that is
not mapped in user space, and uses a pointer to refer to the data mapped.

Signed-off-by: Sherry Yang <sherryy@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Sherry Yang authored and gregkh committed Aug 28, 2017
1 parent 4175e2b commit 74310e0
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 67 deletions.
144 changes: 84 additions & 60 deletions drivers/android/binder_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->buffer +
alloc->buffer_size - (void *)buffer->data;
return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
return (u8 *)alloc->buffer +
alloc->buffer_size - (u8 *)buffer->data;
return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}

static void binder_insert_free_buffer(struct binder_alloc *alloc,
Expand Down Expand Up @@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);

if (new_buffer < buffer)
if (new_buffer->data < buffer->data)
p = &parent->rb_left;
else if (new_buffer > buffer)
else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
Expand All @@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
void *kern_ptr;

kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- offsetof(struct binder_buffer, data));
kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);

while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);

if (kern_ptr < buffer)
if (kern_ptr < buffer->data)
n = n->rb_left;
else if (kern_ptr > buffer)
else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
Expand Down Expand Up @@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}

/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));

while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
Expand Down Expand Up @@ -389,32 +391,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,

has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
if (n == NULL) {
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
buffer_size = size; /* no room for other buffers */
else
buffer_size = size + sizeof(struct binder_buffer);
}
WARN_ON(n && buffer_size != size);
end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
if (ret)
return ERR_PTR(ret);

rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
struct binder_buffer *new_buffer = (void *)buffer->data + size;
struct binder_buffer *new_buffer;

new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!new_buffer) {
pr_err("%s: %d failed to alloc new buffer struct\n",
__func__, alloc->pid);
goto err_alloc_buf_struct_failed;
}
new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}

rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
Expand All @@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
return buffer;

err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
end_page_addr, NULL);
return ERR_PTR(-ENOMEM);
}

/**
Expand Down Expand Up @@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,

static void *buffer_start_page(struct binder_buffer *buffer)
{
return (void *)((uintptr_t)buffer & PAGE_MASK);
return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}

static void *buffer_end_page(struct binder_buffer *buffer)
static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}

static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
int free_page_end = 1;
int free_page_start = 1;

bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
free_page_start = 0;
if (buffer_end_page(prev) == buffer_end_page(buffer))
free_page_end = 0;
if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev);
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer->data, prev->data);
}

if (!list_is_last(&buffer->entry, &alloc->buffers)) {
next = binder_buffer_next(buffer);
if (buffer_start_page(next) == buffer_end_page(buffer)) {
free_page_end = 0;
if (buffer_start_page(next) ==
buffer_start_page(buffer))
free_page_start = 0;
if (buffer_start_page(next) == buffer_start_page(buffer)) {
to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev);
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid,
buffer->data,
next->data);
}
}
list_del(&buffer->entry);
if (free_page_start || free_page_end) {

if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n",
alloc->pid, buffer->data);
to_free = false;
}

if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
alloc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(alloc, 0, free_page_start ?
buffer_start_page(buffer) : buffer_end_page(buffer),
(free_page_end ? buffer_end_page(buffer) :
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
prev->data, next->data);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
}
list_del(&buffer->entry);
kfree(buffer);
}

static void binder_free_buf_locked(struct binder_alloc *alloc,
Expand All @@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
BUG_ON((void *)buffer < alloc->buffer);
BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
BUG_ON(buffer->data < alloc->buffer);
BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);

if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
Expand Down Expand Up @@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;

if (binder_update_page_range(alloc, 1, alloc->buffer,
alloc->buffer + PAGE_SIZE, vma)) {
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
failure_string = "alloc buffer struct";
goto err_alloc_buf_struct_failed;
}
buffer = alloc->buffer;

buffer->data = alloc->buffer;
INIT_LIST_HEAD(&alloc->buffers);
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
Expand All @@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,

return 0;

err_alloc_small_buf_failed:
err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
Expand All @@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
struct binder_buffer *buffer;

BUG_ON(alloc->vma);

buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
struct binder_buffer *buffer;

buffer = rb_entry(n, struct binder_buffer, rb_node);

/* Transaction should already have been freed */
Expand All @@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++;
}

while (!list_empty(&alloc->buffers)) {
buffer = list_first_entry(&alloc->buffers,
struct binder_buffer, entry);
WARN_ON(!buffer->free);

list_del(&buffer->entry);
WARN_ON_ONCE(!list_empty(&alloc->buffers));
kfree(buffer);
}

page_count = 0;
if (alloc->pages) {
int i;
Expand Down
2 changes: 1 addition & 1 deletion drivers/android/binder_alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
uint8_t data[0];
void *data;
};

/**
Expand Down
11 changes: 5 additions & 6 deletions drivers/android/binder_alloc_selftest.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
void *page_addr, *end;
int page_index;

end = (void *)PAGE_ALIGN((uintptr_t)buffer + size);
for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) {
end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
page_addr = buffer->data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index]) {
pr_err("incorrect alloc state at page index %d\n",
Expand Down Expand Up @@ -209,8 +210,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
* we need one giant buffer before getting to the last page.
*/
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]
- sizeof(struct binder_buffer) * BUFFER_NUM;
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
binder_selftest_free_seq(alloc, front_sizes, seq, 0);
binder_selftest_free_seq(alloc, back_sizes, seq, 0);
}
Expand All @@ -228,8 +228,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
prev = index == 0 ? 0 : end_offset[index - 1];
end = prev;

BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer))
* BUFFER_NUM >= PAGE_SIZE);
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);

for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
if (align % 2)
Expand Down

0 comments on commit 74310e0

Please sign in to comment.