Skip to content

Commit 8ef2c15

Browse files
Darksonngregkh
authored andcommitted
rust_binder: check ownership before using vma
When installing missing pages (or zapping them), Rust Binder will look up the vma in the mm by address, and then call vm_insert_page (or zap_page_range_single). However, if the vma is closed and replaced with a different vma at the same address, this can lead to Rust Binder installing pages into the wrong vma. By installing the page into a writable vma, it becomes possible to write to your own binder pages, which are normally read-only. Although you're not supposed to be able to write to those pages, the intent behind the design of Rust Binder is that even if you get that ability, it should not lead to anything bad. Unfortunately, due to another bug, that is not the case. To fix this, store a pointer in vm_private_data and check that the vma returned by vma_lookup() has the right vm_ops and vm_private_data before trying to use the vma. This should ensure that Rust Binder will refuse to interact with any other VMA. The plan is to introduce more vma abstractions to avoid this unsafe access to vm_ops and vm_private_data, but for now let's start with the simplest possible fix. C Binder performs the same check in a slightly different way: it provides a vm_ops->close that sets a boolean to true, then checks that boolean after calling vma_lookup(), but this is more fragile than the solution in this patch. (We probably still want to do both, but the vm_ops->close callback will be added later as part of the follow-up vma API changes.) It's still possible to remap the vma so that pages appear in the right vma, but at the wrong offset, but this is a separate issue and will be fixed when Rust Binder gets a vm_ops->close callback. Cc: stable <stable@kernel.org> Fixes: eafedbc ("rust_binder: add Rust Binder driver") Reported-by: Jann Horn <jannh@google.com> Reviewed-by: Jann Horn <jannh@google.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Acked-by: Danilo Krummrich <dakr@kernel.org> Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com> Link: https://patch.msgid.link/20260218-binder-vma-check-v2-1-60f9d695a990@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 4fc87c2 commit 8ef2c15

1 file changed

Lines changed: 63 additions & 20 deletions

File tree

drivers/android/binder/page_range.rs

Lines changed: 63 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange {
142142
_pin: PhantomPinned,
143143
}
144144

145+
// We do not define any ops. For now, used only to check identity of vmas.
146+
static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
147+
148+
// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
149+
// check its vm_ops and private data before using it.
150+
fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
151+
// SAFETY: Just reading the vm_ops pointer of any active vma is safe.
152+
let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
153+
if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
154+
return None;
155+
}
156+
157+
// SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe.
158+
let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data };
159+
// The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once
160+
// the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any
161+
// VMA associated with it, so there can't be any false positives due to pointer reuse here.
162+
if !ptr::eq(vm_private_data, owner.cast()) {
163+
return None;
164+
}
165+
166+
vma.as_mixedmap_vma()
167+
}
168+
145169
struct Inner {
146170
/// Array of pages.
147171
///
@@ -308,6 +332,18 @@ impl ShrinkablePageRange {
308332
inner.size = num_pages;
309333
inner.vma_addr = vma.start();
310334

335+
// This pointer is only used for comparison - it's not dereferenced.
336+
//
337+
// SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
338+
// `vm_private_data`.
339+
unsafe {
340+
(*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>()
341+
};
342+
343+
// SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
344+
// `vm_ops`.
345+
unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
346+
311347
Ok(num_pages)
312348
}
313349

@@ -399,22 +435,24 @@ impl ShrinkablePageRange {
399435
//
400436
// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
401437
// workqueue.
402-
MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
403-
.mmap_read_lock()
404-
.vma_lookup(vma_addr)
405-
.ok_or(ESRCH)?
406-
.as_mixedmap_vma()
407-
.ok_or(ESRCH)?
408-
.vm_insert_page(user_page_addr, &new_page)
409-
.inspect_err(|err| {
410-
pr_warn!(
411-
"Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
412-
user_page_addr,
413-
vma_addr,
414-
i,
415-
err
416-
)
417-
})?;
438+
check_vma(
439+
MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
440+
.mmap_read_lock()
441+
.vma_lookup(vma_addr)
442+
.ok_or(ESRCH)?,
443+
self,
444+
)
445+
.ok_or(ESRCH)?
446+
.vm_insert_page(user_page_addr, &new_page)
447+
.inspect_err(|err| {
448+
pr_warn!(
449+
"Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
450+
user_page_addr,
451+
vma_addr,
452+
i,
453+
err
454+
)
455+
})?;
418456

419457
let inner = self.lock.lock();
420458

@@ -667,12 +705,15 @@ unsafe extern "C" fn rust_shrink_free_page(
667705
let mmap_read;
668706
let mm_mutex;
669707
let vma_addr;
708+
let range_ptr;
670709

671710
{
672711
// CAST: The `list_head` field is first in `PageInfo`.
673712
let info = item as *mut PageInfo;
674713
// SAFETY: The `range` field of `PageInfo` is immutable.
675-
let range = unsafe { &*((*info).range) };
714+
range_ptr = unsafe { (*info).range };
715+
// SAFETY: The `range` outlives its `PageInfo` values.
716+
let range = unsafe { &*range_ptr };
676717

677718
mm = match range.mm.mmget_not_zero() {
678719
Some(mm) => MmWithUser::into_mmput_async(mm),
@@ -717,9 +758,11 @@ unsafe extern "C" fn rust_shrink_free_page(
717758
// SAFETY: The lru lock is locked when this method is called.
718759
unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
719760

720-
if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
721-
let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
722-
vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
761+
if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
762+
if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
763+
let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
764+
vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
765+
}
723766
}
724767

725768
drop(mmap_read);

0 commit comments

Comments
 (0)