UPSTREAM: binder: tell userspace to dump current backtrace when detected oneway spamming

When async binder buffer got exhausted, some normal oneway transactions
will also be discarded and may cause system or application failures. By
that time, the binder debug information we dump may not be relevant to
the root cause. And this issue is difficult to debug if without the
backtrace of the thread sending spam.

This change will send BR_ONEWAY_SPAM_SUSPECT to userspace when oneway
spamming is detected, request to dump current backtrace. Oneway spamming
will be reported only once when exceeding the threshold (target process
dips below 80% of its oneway space, and current process is responsible
for either more than 50 transactions, or more than 50% of the oneway
space). And the detection will restart when the async buffer has
returned to a healthy state.

Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Hang Lu <hangl@codeaurora.org>
Link: https://lore.kernel.org/r/1617961246-4502-3-git-send-email-hangl@codeaurora.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Bug: 181190340
Change-Id: Id3d2526099bc89f04d8ad3ad6e48141b2a8f2515
(cherry picked from commit a7dc1e6f99df59799ab0128d9c4e47bbeceb934d)
Signed-off-by: Hang Lu <hangl@codeaurora.org>
urubino
Hang Lu 3 years ago committed by Jenna-they-them
parent a01b134ea2
commit e41a259fbc
  1. 33
      drivers/android/binder.c
  2. 15
      drivers/android/binder_alloc.c
  3. 8
      drivers/android/binder_alloc.h
  4. 8
      include/uapi/linux/android/binder.h

@ -194,7 +194,7 @@ enum binder_stat_types {
};
struct binder_stats {
atomic_t br[_IOC_NR(BR_FROZEN_REPLY) + 1];
atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
@ -248,6 +248,7 @@ struct binder_work {
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
@ -515,6 +516,8 @@ struct binder_priority {
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
* @binderfs_entry: process-specific binderfs log file
* @oneway_spam_detection_enabled: process enabled oneway spam detection
* or not
*
* Bookkeeping structure for binder processes
*/
@ -553,6 +556,7 @@ struct binder_proc {
spinlock_t inner_lock;
spinlock_t outer_lock;
struct dentry *binderfs_entry;
bool oneway_spam_detection_enabled;
};
enum {
@ -3522,7 +3526,10 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_object_type;
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
if (t->buffer->oneway_spam_suspect)
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
else
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
@ -4304,9 +4311,14 @@ retry:
binder_stat_br(proc, thread, e->cmd);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
case BINDER_WORK_TRANSACTION_COMPLETE:
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
if (proc->oneway_spam_detection_enabled &&
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
cmd = BR_ONEWAY_SPAM_SUSPECT;
else
cmd = BR_TRANSACTION_COMPLETE;
binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
if (put_user(cmd, (uint32_t __user *)ptr))
@ -5290,6 +5302,18 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
ret = -EINVAL;
goto err;
}
binder_inner_proc_lock(proc);
proc->oneway_spam_detection_enabled = (bool)enable;
binder_inner_proc_unlock(proc);
break;
}
default:
ret = -EINVAL;
goto err;
@ -5996,6 +6020,7 @@ static const char * const binder_return_strings[] = {
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
};
static const char * const binder_command_strings[] = {

@ -344,7 +344,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
{
/*
* Find the amount and size of buffers allocated by the current caller;
@ -372,13 +372,19 @@ static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
/*
* Warn if this pid has more than 50 transactions, or more than 50% of
* async space (which is 25% of total buffer size).
* async space (which is 25% of total buffer size). Oneway spam is only
* detected when the threshold is exceeded.
*/
if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
alloc->pid, pid, num_buffers, total_alloc_size);
if (!alloc->oneway_spam_detected) {
alloc->oneway_spam_detected = true;
return true;
}
}
return false;
}
static struct binder_buffer *binder_alloc_new_buf_locked(
@ -527,6 +533,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
@ -538,7 +545,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
* of async space left (which is less than 10% of total
* buffer size).
*/
debug_low_async_space_locked(alloc, pid);
buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
} else {
alloc->oneway_spam_detected = false;
}
}
return buffer;

@ -35,6 +35,8 @@ struct binder_transaction;
* @clear_on_free: %true if buffer must be zeroed after use
* @allow_user_free: %true if user is allowed to free buffer
* @async_transaction: %true if buffer is in use for an async txn
* @oneway_spam_suspect: %true if total async allocate size just exceed
* spamming detect threshold
* @debug_id: unique ID for debugging
* @transaction: pointer to associated struct binder_transaction
* @target_node: struct binder_node associated with this buffer
@ -54,7 +56,8 @@ struct binder_buffer {
unsigned clear_on_free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:28;
unsigned oneway_spam_suspect:1;
unsigned debug_id:27;
struct binder_transaction *transaction;
@ -96,6 +99,8 @@ struct binder_lru_page {
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
@ -116,6 +121,7 @@ struct binder_alloc {
uint32_t buffer_free;
int pid;
size_t pages_high;
bool oneway_spam_detected;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST

@ -295,6 +295,7 @@ struct binder_frozen_status_info {
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
/*
* NOTE: Two special error codes you should check for when calling
@ -482,6 +483,13 @@ enum binder_driver_return_protocol {
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is frozen. No parameters.
*/
BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
/*
* Current process sent too many oneway calls to target, and the last
* asynchronous transaction makes the allocated async buffer size exceed
* detection threshold. No parameters.
*/
};
enum binder_driver_command_protocol {

Loading…
Cancel
Save