Skip to content

Commit cb36c5f

Browse files
committed
Replace L1 dirty walk with DNODE_FIND_DIRTY
This walk is inherently racy w.r.t. dbuf eviction and sync. Consider: 0. A large sparse file with 3 levels of indirection. 1. A new L1 block is added to a brand new L2 block. 2. The L1 block syncs out and is immediately evicted. 3. Before the L3->L2 BP is updated in the L3 block, dnode_free_range attempts to free the new L1. In this case neither dnode_dirty_l1range nor dnode_next_offset can find the newly synced-out L1 block and its L0 blocks: - dnode_dirty_l1range uses in-memory index but the L1 is evicted - dnode_next_offset considers on-disk BPs but the L3->L2 is missing And then free_children will later PANIC because the L1 was not dirtied during open context when freeing the range. This case was found during testing llseek(SEEK_HOLE/SEEK_DATA) without txg sync and is distinct from the _other_ free_childen panic found and addressed by openzfs#16025. The fix is to replace dnode_dirty_l1range with dnode_next_offset(DNODE_FIND_DIRTY) which knows how to find all dirty L1 blocks. This PR also changes to use minlvl=1 to avoid redirtying L2 blocks that are only dirtied in a prior txg. Successive frees otherwise needlessly redirty already-empty L1s which wastes time during txg sync turning them back into holes. Signed-off-by: Robert Evans <[email protected]>
1 parent 8756866 commit cb36c5f

File tree

1 file changed

+8
-74
lines changed

1 file changed

+8
-74
lines changed

module/zfs/dnode.c

Lines changed: 8 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -2079,76 +2079,6 @@ dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
20792079
}
20802080
}
20812081

2082-
/*
2083-
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
2084-
* and end_blkid.
2085-
*/
2086-
static void
2087-
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
2088-
dmu_tx_t *tx)
2089-
{
2090-
dmu_buf_impl_t *db_search;
2091-
dmu_buf_impl_t *db;
2092-
avl_index_t where;
2093-
2094-
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
2095-
2096-
mutex_enter(&dn->dn_dbufs_mtx);
2097-
2098-
db_search->db_level = 1;
2099-
db_search->db_blkid = start_blkid + 1;
2100-
db_search->db_state = DB_SEARCH;
2101-
for (;;) {
2102-
2103-
db = avl_find(&dn->dn_dbufs, db_search, &where);
2104-
if (db == NULL)
2105-
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
2106-
2107-
if (db == NULL || db->db_level != 1 ||
2108-
db->db_blkid >= end_blkid) {
2109-
break;
2110-
}
2111-
2112-
/*
2113-
* Setup the next blkid we want to search for.
2114-
*/
2115-
db_search->db_blkid = db->db_blkid + 1;
2116-
ASSERT3U(db->db_blkid, >=, start_blkid);
2117-
2118-
/*
2119-
* If the dbuf transitions to DB_EVICTING while we're trying
2120-
* to dirty it, then we will be unable to discover it in
2121-
* the dbuf hash table. This will result in a call to
2122-
* dbuf_create() which needs to acquire the dn_dbufs_mtx
2123-
* lock. To avoid a deadlock, we drop the lock before
2124-
* dirtying the level-1 dbuf.
2125-
*/
2126-
mutex_exit(&dn->dn_dbufs_mtx);
2127-
dnode_dirty_l1(dn, db->db_blkid, tx);
2128-
mutex_enter(&dn->dn_dbufs_mtx);
2129-
}
2130-
2131-
#ifdef ZFS_DEBUG
2132-
/*
2133-
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
2134-
*/
2135-
db_search->db_level = 1;
2136-
db_search->db_blkid = start_blkid + 1;
2137-
db_search->db_state = DB_SEARCH;
2138-
db = avl_find(&dn->dn_dbufs, db_search, &where);
2139-
if (db == NULL)
2140-
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
2141-
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
2142-
if (db->db_level != 1 || db->db_blkid >= end_blkid)
2143-
break;
2144-
if (db->db_state != DB_EVICTING)
2145-
ASSERT(db->db_dirtycnt > 0);
2146-
}
2147-
#endif
2148-
kmem_free(db_search, sizeof (dmu_buf_impl_t));
2149-
mutex_exit(&dn->dn_dbufs_mtx);
2150-
}
2151-
21522082
void
21532083
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
21542084
{
@@ -2332,8 +2262,6 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
23322262
if (last != first)
23332263
dnode_dirty_l1(dn, last, tx);
23342264

2335-
dnode_dirty_l1range(dn, first, last, tx);
2336-
23372265
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
23382266
SPA_BLKPTRSHIFT;
23392267
for (uint64_t i = first + 1; i < last; i++) {
@@ -2342,10 +2270,16 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
23422270
* level-1 indirect block at or after i. Note
23432271
* that dnode_next_offset() operates in terms of
23442272
* level-0-equivalent bytes.
2273+
* N.B. this uses minlvl=1 to avoid redirtying L1s
2274+
* freed in prior txgs as minlvl=1 checks L0s and skips
2275+
* dirty L1s containing no L0 BPs or only freed L0s.
2276+
* minlvl=2 would also work, but that would then match
2277+
* every dirty L1 pointer unconditionally.
23452278
*/
23462279
uint64_t ibyte = i << shift;
2347-
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
2348-
&ibyte, 2, 1, 0);
2280+
int err = dnode_next_offset(
2281+
dn, DNODE_FIND_HAVELOCK | DNODE_FIND_DIRTY,
2282+
&ibyte, 1, 1, 0);
23492283
i = ibyte >> shift;
23502284
if (i >= last)
23512285
break;

0 commit comments

Comments
 (0)