Skip to content

Commit 3b312a6

Browse files
zhangyi089tytso
authored andcommitted
ext4: factor out journalled block zeroing range
Refactor __ext4_block_zero_page_range() by separating the block zeroing operations for ordered data mode and journal data mode into two distinct functions: - ext4_block_do_zero_range(): handles non-journal data mode with ordered data support - ext4_block_journalled_zero_range(): handles journal data mode Also extract a common helper, ext4_load_tail_bh(), to handle buffer head and folio retrieval, along with the associated error handling. This prepares for converting the partial block zero range to the iomap infrastructure. Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://patch.msgid.link/20260327102939.1095257-4-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
1 parent bd099a0 commit 3b312a6

1 file changed

Lines changed: 69 additions & 29 deletions

File tree

fs/ext4/inode.c

Lines changed: 69 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -4032,13 +4032,11 @@ void ext4_set_aops(struct inode *inode)
40324032
* ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
40334033
* racing writeback can come later and flush the stale pagecache to disk.
40344034
*/
4035-
static int __ext4_block_zero_page_range(handle_t *handle,
4036-
struct address_space *mapping, loff_t from, loff_t length,
4037-
bool *did_zero)
4035+
static struct buffer_head *ext4_load_tail_bh(struct inode *inode, loff_t from)
40384036
{
40394037
unsigned int offset, blocksize, pos;
40404038
ext4_lblk_t iblock;
4041-
struct inode *inode = mapping->host;
4039+
struct address_space *mapping = inode->i_mapping;
40424040
struct buffer_head *bh;
40434041
struct folio *folio;
40444042
int err = 0;
@@ -4047,7 +4045,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
40474045
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
40484046
mapping_gfp_constraint(mapping, ~__GFP_FS));
40494047
if (IS_ERR(folio))
4050-
return PTR_ERR(folio);
4048+
return ERR_CAST(folio);
40514049

40524050
blocksize = inode->i_sb->s_blocksize;
40534051

@@ -4099,33 +4097,73 @@ static int __ext4_block_zero_page_range(handle_t *handle,
40994097
}
41004098
}
41014099
}
4102-
if (ext4_should_journal_data(inode)) {
4103-
BUFFER_TRACE(bh, "get write access");
4104-
err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
4105-
EXT4_JTR_NONE);
4106-
if (err)
4107-
goto unlock;
4108-
}
4109-
folio_zero_range(folio, offset, length);
4100+
return bh;
4101+
4102+
unlock:
4103+
folio_unlock(folio);
4104+
folio_put(folio);
4105+
return err ? ERR_PTR(err) : NULL;
4106+
}
4107+
4108+
static int ext4_block_do_zero_range(handle_t *handle, struct inode *inode,
4109+
loff_t from, loff_t length, bool *did_zero)
4110+
{
4111+
struct buffer_head *bh;
4112+
struct folio *folio;
4113+
int err = 0;
4114+
4115+
bh = ext4_load_tail_bh(inode, from);
4116+
if (IS_ERR_OR_NULL(bh))
4117+
return PTR_ERR_OR_ZERO(bh);
4118+
4119+
folio = bh->b_folio;
4120+
folio_zero_range(folio, offset_in_folio(folio, from), length);
41104121
BUFFER_TRACE(bh, "zeroed end of block");
41114122

4112-
if (ext4_should_journal_data(inode)) {
4113-
err = ext4_dirty_journalled_data(handle, bh);
4114-
} else {
4115-
mark_buffer_dirty(bh);
4116-
/*
4117-
* Only the written block requires ordered data to prevent
4118-
* exposing stale data.
4119-
*/
4120-
if (!buffer_unwritten(bh) && !buffer_delay(bh) &&
4121-
ext4_should_order_data(inode))
4122-
err = ext4_jbd2_inode_add_write(handle, inode, from,
4123-
length);
4124-
}
4123+
mark_buffer_dirty(bh);
4124+
/*
4125+
* Only the written block requires ordered data to prevent exposing
4126+
* stale data.
4127+
*/
4128+
if (ext4_should_order_data(inode) &&
4129+
!buffer_unwritten(bh) && !buffer_delay(bh))
4130+
err = ext4_jbd2_inode_add_write(handle, inode, from, length);
41254131
if (!err && did_zero)
41264132
*did_zero = true;
41274133

4128-
unlock:
4134+
folio_unlock(folio);
4135+
folio_put(folio);
4136+
return err;
4137+
}
4138+
4139+
static int ext4_block_journalled_zero_range(handle_t *handle,
4140+
struct inode *inode, loff_t from, loff_t length, bool *did_zero)
4141+
{
4142+
struct buffer_head *bh;
4143+
struct folio *folio;
4144+
int err;
4145+
4146+
bh = ext4_load_tail_bh(inode, from);
4147+
if (IS_ERR_OR_NULL(bh))
4148+
return PTR_ERR_OR_ZERO(bh);
4149+
folio = bh->b_folio;
4150+
4151+
BUFFER_TRACE(bh, "get write access");
4152+
err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
4153+
EXT4_JTR_NONE);
4154+
if (err)
4155+
goto out;
4156+
4157+
folio_zero_range(folio, offset_in_folio(folio, from), length);
4158+
BUFFER_TRACE(bh, "zeroed end of block");
4159+
4160+
err = ext4_dirty_journalled_data(handle, bh);
4161+
if (err)
4162+
goto out;
4163+
4164+
if (did_zero)
4165+
*did_zero = true;
4166+
out:
41294167
folio_unlock(folio);
41304168
folio_put(folio);
41314169
return err;
@@ -4156,9 +4194,11 @@ static int ext4_block_zero_page_range(handle_t *handle,
41564194
if (IS_DAX(inode)) {
41574195
return dax_zero_range(inode, from, length, did_zero,
41584196
&ext4_iomap_ops);
4197+
} else if (ext4_should_journal_data(inode)) {
4198+
return ext4_block_journalled_zero_range(handle, inode, from,
4199+
length, did_zero);
41594200
}
4160-
return __ext4_block_zero_page_range(handle, mapping, from, length,
4161-
did_zero);
4201+
return ext4_block_do_zero_range(handle, inode, from, length, did_zero);
41624202
}
41634203

41644204
/*

0 commit comments

Comments
 (0)