android: Checkout binder to 4.14.190

* To get rid of samsung's modifications

Change-Id: I2d39974fee21b5178c17e161e6d9a49dd75ded33
Signed-off-by: Ruchit <risen@pixelexperience.org>
urubino
Tim Zimmermann 4 months ago committed by Jenna-they-them
parent 27d17652f1
commit 8e9d7f1e19
  1. 412
      drivers/android/binder.c
  2. 41
      drivers/android/binder_alloc.c
  3. 3
      drivers/android/binderfs.c
  4. 9
      include/uapi/linux/android/binder.h
  5. 5
      include/uapi/linux/android/binderfs.h

@ -63,7 +63,6 @@
#include <linux/nsproxy.h>
#include <linux/poll.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
@ -78,11 +77,6 @@
#include "binder_alloc.h"
#include "binder_internal.h"
#include "binder_trace.h"
#ifdef CONFIG_SAMSUNG_FREECESS
#include <linux/freecess.h>
#endif
int system_server_pid = 0;
static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
@ -251,7 +245,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_work {
struct list_head entry;
enum binder_work_type {
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
@ -323,7 +317,6 @@ struct binder_error {
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
* (invariant after initialized)
* @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
@ -474,9 +467,6 @@ struct binder_priority {
* @files files_struct for process
* (protected by @files_lock)
* @files_lock mutex to protect @files
* @cred struct cred associated with the `struct file`
* in binder_open()
* (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@ -499,8 +489,7 @@ struct binder_priority {
* @requested_threads_started: number binder threads started
* (protected by @inner_lock)
* @tmp_ref: temporary reference to indicate proc is in use
* (atomic since @proc->inner_lock cannot
* always be acquired)
* (protected by @inner_lock)
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
@ -525,7 +514,6 @@ struct binder_proc {
struct task_struct *tsk;
struct files_struct *files;
struct mutex files_lock;
const struct cred *cred;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@ -536,7 +524,7 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
atomic_t tmp_ref;
int tmp_ref;
struct binder_priority default_priority;
struct dentry *debugfs_entry;
struct binder_alloc alloc;
@ -915,6 +903,27 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
return w;
}
/**
* binder_dequeue_work_head() - Dequeues the item at head of list
* @proc: binder_proc associated with list
* @list: list to dequeue head
*
* Removes the head of the list if there are items on the list
*
* Return: pointer dequeued binder_work, NULL if list was empty
*/
static struct binder_work *binder_dequeue_work_head(
struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
binder_inner_proc_unlock(proc);
return w;
}
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
@ -1337,7 +1346,6 @@ static struct binder_node *binder_init_node_ilocked(
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
@ -1962,18 +1970,6 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
if (ret && ref == new_ref) {
/*
* Cleanup the failed reference here as the target
* could now be dead and have already released its
* references by now. Calling on the new reference
* with strong=0 and a tmp_refs will not decrement
* the node. The new_ref gets kfree'd below.
*/
binder_cleanup_ref_olocked(new_ref);
ref = NULL;
}
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@ -2039,9 +2035,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
binder_inner_proc_lock(proc);
atomic_dec(&proc->tmp_ref);
proc->tmp_ref--;
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
!atomic_read(&proc->tmp_ref)) {
!proc->tmp_ref) {
binder_inner_proc_unlock(proc);
binder_free_proc(proc);
return;
@ -2103,26 +2099,18 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
static void binder_free_transaction(struct binder_transaction *t)
{
struct binder_proc *target_proc;
struct binder_proc *target_proc = t->to_proc;
spin_lock(&t->lock);
target_proc = t->to_proc;
if (target_proc) {
atomic_inc(&target_proc->tmp_ref);
spin_unlock(&t->lock);
binder_inner_proc_lock(target_proc);
if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
binder_proc_dec_tmpref(target_proc);
} else {
/*
* If the transaction has no target_proc, then
* t->buffer->transaction * has already been cleared.
*/
spin_unlock(&t->lock);
}
/*
* If the transaction has no target_proc, then
* t->buffer->transaction has already been cleared.
*/
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@ -2570,7 +2558,7 @@ static int binder_translate_binder(struct flat_binder_object *fp,
ret = -EINVAL;
goto done;
}
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
ret = -EPERM;
goto done;
}
@ -2616,7 +2604,7 @@ static int binder_translate_handle(struct flat_binder_object *fp,
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
ret = -EPERM;
goto done;
}
@ -2700,7 +2688,7 @@ static int binder_translate_fd(int fd,
ret = -EBADF;
goto err_fget;
}
ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
if (ret < 0) {
ret = -EPERM;
goto err_security;
@ -2799,48 +2787,6 @@ err_translate_fd_failed:
return target_fd;
}
//[SAnP
static void print_binder_proc_inner(struct binder_proc *proc)
{
struct rb_node *pn;
struct binder_thread *p_thread;
struct binder_transaction *t;
struct binder_buffer *buffer;
uint32_t cnt = 1;
binder_inner_proc_lock(proc);
for (pn = rb_first(&proc->threads); pn != NULL; pn = rb_next(pn)) {
p_thread = rb_entry(pn, struct binder_thread, rb_node);
t = p_thread->transaction_stack;
if (t) {
spin_lock(&t->lock);
if (t->from != p_thread && t->to_thread == p_thread) { //incoming transaction
buffer = t->buffer;
if (buffer != NULL) {
pr_info("[%d] from %d:%d to %d:%d size %zd:%zd\n",
cnt, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
t->to_proc ? t->to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
buffer->data_size, buffer->offsets_size);
} else {
pr_info("[%d] from %d:%d to %d:%d\n",
cnt, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
t->to_proc ? t->to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0);
}
cnt++;
}
spin_unlock(&t->lock);
}
}
binder_inner_proc_unlock(proc);
}
//SAnP]
static int binder_fixup_parent(struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
@ -2996,7 +2942,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
atomic_inc(&node->proc->tmp_ref);
node->proc->tmp_ref++;
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
@ -3005,76 +2951,6 @@ static struct binder_node *binder_get_node_refs_for_txn(
return target_node;
}
#ifdef CONFIG_SAMSUNG_FREECESS
// 1) Skip first 8(P)/12(Q) bytes (useless data)
// 2) Make sure that the invalid address issue is not occuring (j=9, j+=2)
// 3) Java layer uses 2 bytes char. And only the first byte has the data. (p+=2)
// 4) Parcel::writeInterfaceToken() in frameworks/native/libs/binder/Parcel.cpp
static void freecess_async_binder_report(struct binder_proc *proc,
struct binder_proc *target_proc,
struct binder_transaction_data *tr,
struct binder_transaction *t)
{
char buf_user[INTERFACETOKEN_BUFF_SIZE] = {0};
char buf[INTERFACETOKEN_BUFF_SIZE] = {0};
char *p = NULL;
int i = 0;
int j = 0;
int skip_bytes = 8;
if (!proc || !target_proc || !tr || !t)
return;
// for android P/Q/R verson, skip 8/12/16 bytes;
if (freecess_fw_version == 0)
skip_bytes = 8;
else if (freecess_fw_version == 1)
skip_bytes = 12;
else if (freecess_fw_version == 2)
skip_bytes = 16;
if ((tr->flags & TF_ONE_WAY) && target_proc
&& target_proc->tsk && target_proc->tsk->cred
&& (target_proc->tsk->cred->euid.val > 10000)
&& (proc->pid != target_proc->pid)) {
if (thread_group_is_frozen(target_proc->tsk)) {
if (t->buffer->data_size > skip_bytes) {
if (0 == copy_from_user(buf_user, (const void __user *)(uintptr_t)tr->data.ptr.buffer,
min_t(binder_size_t, tr->data_size, INTERFACETOKEN_BUFF_SIZE - 2))) {
p = &buf_user[skip_bytes];
i = 0;
j = skip_bytes + 1;
while (i < INTERFACETOKEN_BUFF_SIZE && j < t->buffer->data_size && *p != '\0') {
buf[i++] = *p;
j += 2;
p += 2;
}
if (i == INTERFACETOKEN_BUFF_SIZE) buf[i-1] = '\0';
}
binder_report(target_proc->tsk, tr->code, buf, tr->flags & TF_ONE_WAY);
}
}
}
}
static void freecess_sync_binder_report(struct binder_proc *proc,
struct binder_proc *target_proc,
struct binder_transaction_data *tr)
{
if (!proc || !target_proc || !tr)
return;
if ((!(tr->flags & TF_ONE_WAY)) && target_proc
&& target_proc->tsk && target_proc->tsk->cred
&& (target_proc->tsk->cred->euid.val > 10000)
&& (proc->pid != target_proc->pid)
&& thread_group_is_frozen(target_proc->tsk)) {
//if sync binder, we don't need detecting info, so set code and interfacename as default value.
binder_report(target_proc->tsk, 0, "sync_binder", tr->flags & TF_ONE_WAY);
}
}
#endif
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@ -3163,7 +3039,7 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
target_proc = target_thread->proc;
atomic_inc(&target_proc->tmp_ref);
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
@ -3217,13 +3093,8 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
#ifdef CONFIG_SAMSUNG_FREECESS
freecess_sync_binder_report(proc, target_proc, tr);
#endif
if (security_binder_transaction(proc->cred,
target_proc->cred) < 0) {
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@ -3332,26 +3203,9 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
int retries = 0;
int max_retries = 100;
security_task_getsecid(proc->tsk, &secid);
retry_lowmem:
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret == -ENOMEM && retries++ < max_retries) {
/*
* security_secid_to_secctx() can fail
* because of a GFP_ATOMIC allocation in
* which case -ENOMEM is returned. This needs
* to be retried, but there is currently no
* way to tell userspace to retry so we do
* it here. Sleep briefly to allow the low
* memory condition to resolve.
*/
udelay(100);
goto retry_lowmem;
}
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
@ -3383,13 +3237,6 @@ retry_lowmem:
BR_DEAD_REPLY : BR_FAILED_REPLY;
return_error_line = __LINE__;
t->buffer = NULL;
//[SAnP
if (return_error_param == -ENOSPC) {
mutex_lock(&binder_procs_lock);
print_binder_proc_inner(target_proc);
mutex_unlock(&binder_procs_lock);
}
//SAnP]
goto err_binder_alloc_buf_failed;
}
if (secctx) {
@ -3454,11 +3301,6 @@ retry_lowmem:
return_error_line = __LINE__;
goto err_bad_offset;
}
#ifdef CONFIG_SAMSUNG_FREECESS
freecess_async_binder_report(proc, target_proc, tr, t);
#endif
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
@ -3550,7 +3392,7 @@ retry_lowmem:
binder_size_t parent_offset;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
size_t num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
struct binder_buffer_object *parent =
binder_validate_ptr(target_proc, t->buffer,
@ -4615,6 +4457,7 @@ retry:
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
tr.secctx = t->security_ctx;
if (t->security_ctx) {
cmd = BR_TRANSACTION_SEC_CTX;
@ -4697,17 +4540,13 @@ static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
enum binder_work_type wtype;
while (1) {
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
wtype = w ? w->type : 0;
binder_inner_proc_unlock(proc);
w = binder_dequeue_work_head(proc, list);
if (!w)
return;
switch (wtype) {
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@ -4741,11 +4580,9 @@ static void binder_release_work(struct binder_proc *proc,
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
case BINDER_WORK_NODE:
break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
w->type);
break;
}
}
@ -4826,7 +4663,6 @@ static void binder_free_proc(struct binder_proc *proc)
}
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
kfree(proc);
}
@ -4855,7 +4691,7 @@ static int binder_thread_release(struct binder_proc *proc,
* The corresponding dec is when we actually
* free the thread in binder_free_thread()
*/
atomic_inc(&proc->tmp_ref);
proc->tmp_ref++;
/*
* take a ref on this thread to ensure it
* survives while we are releasing it
@ -4898,20 +4734,23 @@ static int binder_thread_release(struct binder_proc *proc,
}
/*
* If this thread used poll, make sure we remove the waitqueue from any
* poll data structures holding it.
* If this thread used poll, make sure we remove the waitqueue
* from any epoll data structures holding it with POLLFREE.
* waitqueue_active() is safe to use here because we're holding
* the inner lock.
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
wake_up_pollfree(&thread->wait);
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
waitqueue_active(&thread->wait)) {
wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
}
binder_inner_proc_unlock(thread->proc);
/*
* This is needed to avoid races between wake_up_pollfree() above and
* someone else removing the last entry from the queue for other reasons
* (e.g. ep_remove_wait_queue() being called due to an epoll file
* descriptor being closed). Such other users hold an RCU read lock, so
* we can be sure they're done after we call synchronize_rcu().
* This is needed to avoid races between wake_up_poll() above and
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
* lock, so we can be sure it's done after calling synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();
@ -5029,7 +4868,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
ret = -EBUSY;
goto out;
}
ret = security_binder_set_context_mgr(proc->cred);
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
@ -5241,14 +5080,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
case BINDER_SET_SYSTEM_SERVER_PID: {
if (copy_from_user(&system_server_pid, ubuf,
sizeof(system_server_pid))) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
@ -5358,11 +5189,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
atomic_set(&proc->tmp_ref, 0);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
mutex_init(&proc->files_lock);
proc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
@ -5582,7 +5411,7 @@ static void binder_deferred_release(struct binder_proc *proc)
* Make sure proc stays alive after we
* remove all the threads
*/
atomic_inc(&proc->tmp_ref);
proc->tmp_ref++;
proc->is_dead = true;
threads = 0;
@ -5932,131 +5761,6 @@ static void print_binder_proc(struct seq_file *m,
m->count = start_pos;
}
#ifdef CONFIG_SAMSUNG_FREECESS
static void binder_in_transaction(struct binder_proc *proc, int uid)
{
struct rb_node *n = NULL;
struct binder_thread *thread = NULL;
struct binder_transaction *t = NULL;
struct binder_work *w = NULL;
bool empty = true;
bool found = false;
//check binder threads todo and transcation_stack list
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node);
empty = binder_worklist_empty_ilocked(&thread->todo);
if (!empty) {
list_for_each_entry(w, &thread->todo, entry) {
if (w->type == BINDER_WORK_TRANSACTION) {
t = container_of(w, struct binder_transaction, work);
if (!(t->flags & TF_ONE_WAY)) {
found = true;
break;
}
}
else if (w->type != BINDER_WORK_TRANSACTION_COMPLETE && w->type != BINDER_WORK_NODE) {
found = true;
break;
}
}
if (found == true) {
binder_inner_proc_unlock(proc);
cfb_report(uid, "thread");
return;
}
}
//processing one binder call
t = thread->transaction_stack;
if (t) {
if (t->to_thread == thread) {
binder_inner_proc_unlock(proc);
cfb_report(uid, "transaction_stack");
return;
}
}
}
//check binder proc todo list
#ifdef CONFIG_FAST_TRACK
empty = binder_proc_worklist_empty_ilocked(proc);
if (!empty) {
list_for_each_entry(w, &proc->todo, entry) {
if (w->type == BINDER_WORK_TRANSACTION) {
t = container_of(w, struct binder_transaction, work);
if (!(t->flags & TF_ONE_WAY)) {
found = true;
break;
}
}
else if (w->type != BINDER_WORK_TRANSACTION_COMPLETE && w->type != BINDER_WORK_NODE) {
found = true;
break;
}
}
list_for_each_entry(w, &proc->fg_todo, entry) {
if (w->type == BINDER_WORK_TRANSACTION) {
t = container_of(w, struct binder_transaction, work);
if (!(t->flags & TF_ONE_WAY)) {
found = true;
break;
}
}
else if (w->type != BINDER_WORK_TRANSACTION_COMPLETE && w->type != BINDER_WORK_NODE) {
found = true;
break;
}
}
if (found == true) {
binder_inner_proc_unlock(proc);
cfb_report(uid, "proc");
return;
}
}
#else
empty = binder_worklist_empty_ilocked(&proc->todo);
if (!empty) {
list_for_each_entry(w, &proc->todo, entry) {
if (w->type == BINDER_WORK_TRANSACTION) {
t = container_of(w, struct binder_transaction, work);
if (!(t->flags & TF_ONE_WAY)) {
found = true;
break;
}
}
else if (w->type != BINDER_WORK_TRANSACTION_COMPLETE && w->type != BINDER_WORK_NODE) {
found = true;
break;
}
}
if (found == true) {
binder_inner_proc_unlock(proc);
cfb_report(uid, "proc");
return;
}
}
#endif
binder_inner_proc_unlock(proc);
}
void binders_in_transcation(int uid)
{
struct binder_proc *itr;
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr != NULL && (itr->tsk->cred->euid.val == uid)) {
binder_in_transaction(itr, uid);
}
}
mutex_unlock(&binder_procs_lock);
}
#endif
static const char * const binder_return_strings[] = {
"BR_ERROR",
"BR_OK",

@ -33,14 +33,8 @@
#include "binder_alloc.h"
#include "binder_trace.h"
#ifdef CONFIG_SAMSUNG_FREECESS
#include <linux/freecess.h>
#endif
struct list_lru binder_alloc_lru;
extern int system_server_pid;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
@ -365,10 +359,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
#ifdef CONFIG_SAMSUNG_FREECESS
struct task_struct *p = NULL;
#endif
if (!binder_alloc_get_vma(alloc)) {
pr_err("%d: binder_alloc_buf, no vma\n",
alloc->pid);
@ -391,27 +381,11 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
#ifdef CONFIG_SAMSUNG_FREECESS
if (is_async && (alloc->free_async_space < 3*(size + sizeof(struct binder_buffer))
|| (alloc->free_async_space < ((alloc->buffer_size/2)*9/10)))) {
rcu_read_lock();
p = find_task_by_vpid(alloc->pid);
rcu_read_unlock();
if (p != NULL && thread_group_is_frozen(p)) {
binder_report(p, -1, "free_buffer_full", is_async);
}
}
#endif
if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
//binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
// "%d: binder_alloc_buf size %zd failed, no async space left\n",
// alloc->pid, size);
pr_info("%d: binder_alloc_buf size %zd(%zd) failed, no async space left\n",
alloc->pid, size, alloc->free_async_space);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}
@ -515,14 +489,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->extra_buffers_size = extra_buffers_size;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
if ((system_server_pid == alloc->pid) && (alloc->free_async_space <= 153600)) { // 150K
pr_info("%d: [free_size<150K] binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
if ((system_server_pid == alloc->pid) && (size >= 122880)) { // 120K
pr_info("%d: [alloc_size>120K] binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@ -1210,4 +1176,3 @@ void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
dest, bytes);
}

@ -448,7 +448,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_uid = info->root_uid;
inode->i_gid = info->root_gid;
refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->miscdev.minor = minor;
@ -658,7 +657,7 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
int ret;
struct binderfs_info *info;
struct inode *inode = NULL;
struct binderfs_device device_info = { { 0 } };
struct binderfs_device device_info = { 0 };
const char *name;
size_t len;

@ -88,7 +88,6 @@ enum flat_binder_object_flags {
* scheduling policy from the caller (for synchronous transactions).
*/
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
#ifdef __KERNEL__
/**
* @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
@ -97,7 +96,6 @@ enum flat_binder_object_flags {
* context
*/
FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
#endif /* __KERNEL__ */
};
#ifdef BINDER_IPC_32BIT
@ -276,7 +274,6 @@ struct binder_node_info_for_ref {
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
#define BINDER_SET_SYSTEM_SERVER_PID _IOW('b', 14, __u32)
/*
* NOTE: Two special error codes you should check for when calling
@ -335,13 +332,11 @@ struct binder_transaction_data {
} data;
};
#ifdef __KERNEL__
struct binder_transaction_data_secctx {
struct binder_transaction_data transaction_data;
binder_uintptr_t secctx;
};
#endif /* __KERNEL__ */
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@ -377,19 +372,16 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
#ifdef __KERNEL__
BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
struct binder_transaction_data_secctx),
/*
* binder_transaction_data_secctx: the received command.
*/
#endif /* __KERNEL__ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
@ -548,4 +540,3 @@ enum binder_driver_command_protocol {
};
#endif /* _UAPI_LINUX_BINDER_H */

@ -22,8 +22,8 @@
*/
struct binderfs_device {
char name[BINDERFS_MAX_NAME + 1];
__u32 major;
__u32 minor;
__u8 major;
__u8 minor;
};
/**
@ -32,4 +32,3 @@ struct binderfs_device {
#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
#endif /* _UAPI_LINUX_BINDER_CTL_H */

Loading…
Cancel
Save