Skip to content

Commit 4006038

Browse files
committed
FROMLIST: misc: fastrpc: Remove buffer from list prior to unmap operation
fastrpc_req_munmap_impl() is called to unmap any buffer. The buffer is getting removed from the list after it is unmapped from DSP. This can create potential race conditions if any other thread removes the entry from list while unmap operation is ongoing. Remove the entry before calling unmap operation. Link: https://lore.kernel.org/all/20250513042825.2147985-5-ekansh.gupta@oss.qualcomm.com/ Fixes: 2419e55 ("misc: fastrpc: add mmap/unmap support") Cc: stable@kernel.org Signed-off-by: Ekansh Gupta <ekansh.gupta@oss.qualcomm.com>
1 parent 419cf1f commit 4006038

1 file changed

Lines changed: 22 additions & 7 deletions

File tree

drivers/misc/fastrpc.c

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1868,9 +1868,6 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
18681868
&args[0]);
18691869
if (!err) {
18701870
dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1871-
spin_lock(&fl->lock);
1872-
list_del(&buf->node);
1873-
spin_unlock(&fl->lock);
18741871
fastrpc_buf_free(buf);
18751872
} else {
18761873
dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
@@ -1884,13 +1881,15 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
18841881
struct fastrpc_buf *buf = NULL, *iter, *b;
18851882
struct fastrpc_req_munmap req;
18861883
struct device *dev = fl->sctx->dev;
1884+
int err;
18871885

18881886
if (copy_from_user(&req, argp, sizeof(req)))
18891887
return -EFAULT;
18901888

18911889
spin_lock(&fl->lock);
18921890
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1893-
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
1891+
if (iter->raddr == req.vaddrout && iter->size == req.size) {
1892+
list_del(&iter->node);
18941893
buf = iter;
18951894
break;
18961895
}
@@ -1903,7 +1902,14 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
19031902
return -EINVAL;
19041903
}
19051904

1906-
return fastrpc_req_munmap_impl(fl, buf);
1905+
err = fastrpc_req_munmap_impl(fl, buf);
1906+
if (err) {
1907+
spin_lock(&fl->lock);
1908+
list_add_tail(&buf->node, &fl->mmaps);
1909+
spin_unlock(&fl->lock);
1910+
}
1911+
1912+
return err;
19071913
}
19081914

19091915
static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
@@ -1997,14 +2003,23 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
19972003

19982004
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
19992005
err = -EFAULT;
2000-
goto err_assign;
2006+
goto err_copy;
20012007
}
20022008

20032009
dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
20042010
buf->raddr, buf->size);
20052011

20062012
return 0;
2007-
2013+
err_copy:
2014+
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2015+
spin_lock_irqsave(&fl->cctx->lock, flags);
2016+
list_del(&buf->node);
2017+
spin_unlock_irqrestore(&fl->cctx->lock, flags);
2018+
} else {
2019+
spin_lock(&fl->lock);
2020+
list_del(&buf->node);
2021+
spin_unlock(&fl->lock);
2022+
}
20082023
err_assign:
20092024
fastrpc_req_munmap_impl(fl, buf);
20102025

0 commit comments

Comments
 (0)