Skip to content

Commit ce9d27c

Browse files
Brian Fostercmaiolino
authored andcommitted
xfs: replace zero range flush with folio batch
Now that the zero range pagecache flush is purely isolated to providing zeroing correctness in this case, we can remove it and replace it with the folio batch mechanism that is used for handling unwritten extents. This is still slightly odd in that XFS reports a hole vs. a mapping that reflects the COW fork extents, but that has always been the case in this situation and so a separate issue. We drop the iomap warning that assumes the folio batch is always associated with unwritten mappings, but this is mainly a development assertion as otherwise the core iomap fbatch code doesn't care much about the mapping type if it's handed the set of folios to process. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent c770f99 commit ce9d27c

2 files changed

Lines changed: 6 additions & 18 deletions

File tree

fs/iomap/buffered-io.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1632,10 +1632,6 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
16321632
while ((ret = iomap_iter(&iter, ops)) > 0) {
16331633
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
16341634

1635-
if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1636-
srcmap->type != IOMAP_UNWRITTEN))
1637-
return -EIO;
1638-
16391635
if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
16401636
(srcmap->type == IOMAP_HOLE ||
16411637
srcmap->type == IOMAP_UNWRITTEN)) {

fs/xfs/xfs_iomap.c

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1781,7 +1781,6 @@ xfs_buffered_write_iomap_begin(
17811781
{
17821782
struct iomap_iter *iter = container_of(iomap, struct iomap_iter,
17831783
iomap);
1784-
struct address_space *mapping = inode->i_mapping;
17851784
struct xfs_inode *ip = XFS_I(inode);
17861785
struct xfs_mount *mp = ip->i_mount;
17871786
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1813,7 +1812,6 @@ xfs_buffered_write_iomap_begin(
18131812
if (error)
18141813
return error;
18151814

1816-
restart:
18171815
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
18181816
if (error)
18191817
return error;
@@ -1866,8 +1864,8 @@ xfs_buffered_write_iomap_begin(
18661864

18671865
/*
18681866
* We may need to zero over a hole in the data fork if it's fronted by
1869-
* COW blocks and dirty pagecache. To make sure zeroing occurs, force
1870-
* writeback to remap pending blocks and restart the lookup.
1867+
* COW blocks and dirty pagecache. Scan such file ranges for dirty
1868+
* cache and fill the iomap batch with folios that need zeroing.
18711869
*/
18721870
if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
18731871
loff_t start, end;
@@ -1889,16 +1887,10 @@ xfs_buffered_write_iomap_begin(
18891887
xfs_trim_extent(&imap, offset_fsb,
18901888
cmap.br_startoff + cmap.br_blockcount - offset_fsb);
18911889
start = XFS_FSB_TO_B(mp, imap.br_startoff);
1892-
end = XFS_FSB_TO_B(mp,
1893-
imap.br_startoff + imap.br_blockcount) - 1;
1894-
if (filemap_range_needs_writeback(mapping, start, end)) {
1895-
xfs_iunlock(ip, lockmode);
1896-
error = filemap_write_and_wait_range(mapping, start,
1897-
end);
1898-
if (error)
1899-
return error;
1900-
goto restart;
1901-
}
1890+
end = XFS_FSB_TO_B(mp, imap.br_startoff + imap.br_blockcount);
1891+
iomap_fill_dirty_folios(iter, &start, end, &iomap_flags);
1892+
xfs_trim_extent(&imap, offset_fsb,
1893+
XFS_B_TO_FSB(mp, start) - offset_fsb);
19021894

19031895
goto found_imap;
19041896
}

0 commit comments

Comments
 (0)