Skip to content

Commit 4fc87c2

Browse files
Carlos Llamasgregkh
authored andcommitted
rust_binder: fix oneway spam detection
The spam detection logic in TreeRange was executed before the current request was inserted into the tree. So the new request was not being factored in the spam calculation. Fix this by moving the logic after the new range has been inserted. Also, the detection logic for ArrayRange was missing altogether which meant large spamming transactions could get away without being detected. Fix this by implementing an equivalent low_oneway_space() in ArrayRange. Note that I looked into centralizing this logic in RangeAllocator but iterating through 'state' and 'size' got a bit too complicated (for me) and I abandoned this effort. Cc: stable <stable@kernel.org> Cc: Alice Ryhl <aliceryhl@google.com> Fixes: eafedbc ("rust_binder: add Rust Binder driver") Signed-off-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Alice Ryhl <aliceryhl@google.com> Link: https://patch.msgid.link/20260210232949.3770644-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 994d5df commit 4fc87c2

3 files changed

Lines changed: 44 additions & 13 deletions

File tree

drivers/android/binder/range_alloc/array.rs

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ impl<T> ArrayRangeAllocator<T> {
118118
size: usize,
119119
is_oneway: bool,
120120
pid: Pid,
121-
) -> Result<usize> {
121+
) -> Result<(usize, bool)> {
122122
// Compute new value of free_oneway_space, which is set only on success.
123123
let new_oneway_space = if is_oneway {
124124
match self.free_oneway_space.checked_sub(size) {
@@ -146,7 +146,38 @@ impl<T> ArrayRangeAllocator<T> {
146146
.ok()
147147
.unwrap();
148148

149-
Ok(insert_at_offset)
149+
// Start detecting spammers once we have less than 20%
150+
// of async space left (which is less than 10% of total
151+
// buffer size).
152+
//
153+
// (This will short-circuit, so `low_oneway_space` is
154+
// only called when necessary.)
155+
let oneway_spam_detected =
156+
is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
157+
158+
Ok((insert_at_offset, oneway_spam_detected))
159+
}
160+
161+
/// Find the amount and size of buffers allocated by the current caller.
162+
///
163+
/// The idea is that once we cross the threshold, whoever is responsible
164+
/// for the low async space is likely to try to send another async transaction,
165+
/// and at some point we'll catch them in the act. This is more efficient
166+
/// than keeping a map per pid.
167+
fn low_oneway_space(&self, calling_pid: Pid) -> bool {
168+
let mut total_alloc_size = 0;
169+
let mut num_buffers = 0;
170+
171+
// Warn if this pid has more than 50 transactions, or more than 50% of
172+
// async space (which is 25% of total buffer size). Oneway spam is only
173+
// detected when the threshold is exceeded.
174+
for range in &self.ranges {
175+
if range.state.is_oneway() && range.state.pid() == calling_pid {
176+
total_alloc_size += range.size;
177+
num_buffers += 1;
178+
}
179+
}
180+
num_buffers > 50 || total_alloc_size > self.size / 4
150181
}
151182

152183
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {

drivers/android/binder/range_alloc/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,11 +188,11 @@ impl<T> RangeAllocator<T> {
188188
self.reserve_new(args)
189189
}
190190
Impl::Array(array) => {
191-
let offset =
191+
let (offset, oneway_spam_detected) =
192192
array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
193193
Ok(ReserveNew::Success(ReserveNewSuccess {
194194
offset,
195-
oneway_spam_detected: false,
195+
oneway_spam_detected,
196196
_empty_array_alloc: args.empty_array_alloc,
197197
_new_tree_alloc: args.new_tree_alloc,
198198
_tree_alloc: args.tree_alloc,

drivers/android/binder/range_alloc/tree.rs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -164,15 +164,6 @@ impl<T> TreeRangeAllocator<T> {
164164
self.free_oneway_space
165165
};
166166

167-
// Start detecting spammers once we have less than 20%
168-
// of async space left (which is less than 10% of total
169-
// buffer size).
170-
//
171-
// (This will short-circut, so `low_oneway_space` is
172-
// only called when necessary.)
173-
let oneway_spam_detected =
174-
is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
175-
176167
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
177168
None => {
178169
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@@ -203,6 +194,15 @@ impl<T> TreeRangeAllocator<T> {
203194
self.free_tree.insert(free_tree_node);
204195
}
205196

197+
// Start detecting spammers once we have less than 20%
198+
// of async space left (which is less than 10% of total
199+
// buffer size).
200+
//
201+
// (This will short-circuit, so `low_oneway_space` is
202+
// only called when necessary.)
203+
let oneway_spam_detected =
204+
is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
205+
206206
Ok((found_off, oneway_spam_detected))
207207
}
208208

0 commit comments

Comments
 (0)