@@ -31,20 +31,20 @@ struct kmem_cache *xfs_buf_cache;
3131 *
3232 * xfs_buf_stale:
3333 * b_sema (caller holds)
34- * b_lock
34+ * b_lockref.lock
3535 * lru_lock
3636 *
3737 * xfs_buf_rele:
38- * b_lock
38+ * b_lockref.lock
3939 * lru_lock
4040 *
4141 * xfs_buftarg_drain_rele
4242 * lru_lock
43- * b_lock (trylock due to inversion)
43+ * b_lockref.lock (trylock due to inversion)
4444 *
4545 * xfs_buftarg_isolate
4646 * lru_lock
47- * b_lock (trylock due to inversion)
47+ * b_lockref.lock (trylock due to inversion)
4848 */
4949
5050static void xfs_buf_submit (struct xfs_buf * bp );
@@ -78,11 +78,11 @@ xfs_buf_stale(
7878 */
7979 bp -> b_flags &= ~_XBF_DELWRI_Q ;
8080
81- spin_lock (& bp -> b_lock );
81+ spin_lock (& bp -> b_lockref . lock );
8282 atomic_set (& bp -> b_lru_ref , 0 );
83- if (bp -> b_hold >= 0 )
83+ if (! __lockref_is_dead ( & bp -> b_lockref ) )
8484 list_lru_del_obj (& bp -> b_target -> bt_lru , & bp -> b_lru );
85- spin_unlock (& bp -> b_lock );
85+ spin_unlock (& bp -> b_lockref . lock );
8686}
8787
8888static void
@@ -274,10 +274,8 @@ xfs_buf_alloc(
274274 * inserting into the hash table are safe (and will have to wait for
275275 * the unlock to do anything non-trivial).
276276 */
277- bp -> b_hold = 1 ;
277+ lockref_init ( & bp -> b_lockref ) ;
278278 sema_init (& bp -> b_sema , 0 ); /* held, no waiters */
279-
280- spin_lock_init (& bp -> b_lock );
281279 atomic_set (& bp -> b_lru_ref , 1 );
282280 init_completion (& bp -> b_iowait );
283281 INIT_LIST_HEAD (& bp -> b_lru );
@@ -434,20 +432,6 @@ xfs_buf_find_lock(
434432 return 0 ;
435433}
436434
437- static bool
438- xfs_buf_try_hold (
439- struct xfs_buf * bp )
440- {
441- spin_lock (& bp -> b_lock );
442- if (bp -> b_hold == -1 ) {
443- spin_unlock (& bp -> b_lock );
444- return false;
445- }
446- bp -> b_hold ++ ;
447- spin_unlock (& bp -> b_lock );
448- return true;
449- }
450-
451435static inline int
452436xfs_buf_lookup (
453437 struct xfs_buf_cache * bch ,
@@ -460,7 +444,7 @@ xfs_buf_lookup(
460444
461445 rcu_read_lock ();
462446 bp = rhashtable_lookup (& bch -> bc_hash , map , xfs_buf_hash_params );
463- if (!bp || !xfs_buf_try_hold ( bp )) {
447+ if (!bp || !lockref_get_not_dead ( & bp -> b_lockref )) {
464448 rcu_read_unlock ();
465449 return - ENOENT ;
466450 }
@@ -511,7 +495,7 @@ xfs_buf_find_insert(
511495 error = PTR_ERR (bp );
512496 goto out_free_buf ;
513497 }
514- if (bp && xfs_buf_try_hold ( bp )) {
498+ if (bp && lockref_get_not_dead ( & bp -> b_lockref )) {
515499 /* found an existing buffer */
516500 rcu_read_unlock ();
517501 error = xfs_buf_find_lock (bp , flags );
@@ -853,16 +837,14 @@ xfs_buf_hold(
853837{
854838 trace_xfs_buf_hold (bp , _RET_IP_ );
855839
856- spin_lock (& bp -> b_lock );
857- bp -> b_hold ++ ;
858- spin_unlock (& bp -> b_lock );
840+ lockref_get (& bp -> b_lockref );
859841}
860842
861843static void
862844xfs_buf_destroy (
863845 struct xfs_buf * bp )
864846{
865- ASSERT (bp -> b_hold < 0 );
847+ ASSERT (__lockref_is_dead ( & bp -> b_lockref ) );
866848 ASSERT (!(bp -> b_flags & _XBF_DELWRI_Q ));
867849
868850 if (!xfs_buf_is_uncached (bp )) {
@@ -888,19 +870,20 @@ xfs_buf_rele(
888870{
889871 trace_xfs_buf_rele (bp , _RET_IP_ );
890872
891- spin_lock (& bp -> b_lock );
892- if (!-- bp -> b_hold ) {
873+ if (lockref_put_or_lock (& bp -> b_lockref ))
874+ return ;
875+ if (!-- bp -> b_lockref .count ) {
893876 if (xfs_buf_is_uncached (bp ) || !atomic_read (& bp -> b_lru_ref ))
894877 goto kill ;
895878 list_lru_add_obj (& bp -> b_target -> bt_lru , & bp -> b_lru );
896879 }
897- spin_unlock (& bp -> b_lock );
880+ spin_unlock (& bp -> b_lockref . lock );
898881 return ;
899882
900883kill :
901- bp -> b_hold = -1 ;
884+ lockref_mark_dead ( & bp -> b_lockref ) ;
902885 list_lru_del_obj (& bp -> b_target -> bt_lru , & bp -> b_lru );
903- spin_unlock (& bp -> b_lock );
886+ spin_unlock (& bp -> b_lockref . lock );
904887
905888 xfs_buf_destroy (bp );
906889}
@@ -1471,18 +1454,18 @@ xfs_buftarg_drain_rele(
14711454 struct xfs_buf * bp = container_of (item , struct xfs_buf , b_lru );
14721455 struct list_head * dispose = arg ;
14731456
1474- if (!spin_trylock (& bp -> b_lock ))
1457+ if (!spin_trylock (& bp -> b_lockref . lock ))
14751458 return LRU_SKIP ;
1476- if (bp -> b_hold > 0 ) {
1459+ if (bp -> b_lockref . count > 0 ) {
14771460 /* need to wait, so skip it this pass */
1478- spin_unlock (& bp -> b_lock );
1461+ spin_unlock (& bp -> b_lockref . lock );
14791462 trace_xfs_buf_drain_buftarg (bp , _RET_IP_ );
14801463 return LRU_SKIP ;
14811464 }
14821465
1483- bp -> b_hold = -1 ;
1466+ lockref_mark_dead ( & bp -> b_lockref ) ;
14841467 list_lru_isolate_move (lru , item , dispose );
1485- spin_unlock (& bp -> b_lock );
1468+ spin_unlock (& bp -> b_lockref . lock );
14861469 return LRU_REMOVED ;
14871470}
14881471
@@ -1564,34 +1547,35 @@ xfs_buftarg_isolate(
15641547 struct list_head * dispose = arg ;
15651548
15661549 /*
1567- * we are inverting the lru lock/ bp->b_lock here, so use a trylock.
1568- * If we fail to get the lock, just skip it .
1550+ * We are inverting the lru lock vs bp->b_lockref.lock order here, so
1551+ * use a trylock. If we fail to get the lock, just skip the buffer .
15691552 */
1570- if (!spin_trylock (& bp -> b_lock ))
1553+ if (!spin_trylock (& bp -> b_lockref . lock ))
15711554 return LRU_SKIP ;
1555+
15721556 /*
15731557 * Decrement the b_lru_ref count unless the value is already
15741558 * zero. If the value is already zero, we need to reclaim the
15751559 * buffer, otherwise it gets another trip through the LRU.
15761560 */
15771561 if (atomic_add_unless (& bp -> b_lru_ref , -1 , 0 )) {
1578- spin_unlock (& bp -> b_lock );
1562+ spin_unlock (& bp -> b_lockref . lock );
15791563 return LRU_ROTATE ;
15801564 }
15811565
15821566 /*
15831567 * If the buffer is in use, remove it from the LRU for now as we can't
15841568 * free it. It will be freed when the last reference drops.
15851569 */
1586- if (bp -> b_hold > 0 ) {
1570+ if (bp -> b_lockref . count > 0 ) {
15871571 list_lru_isolate (lru , & bp -> b_lru );
1588- spin_unlock (& bp -> b_lock );
1572+ spin_unlock (& bp -> b_lockref . lock );
15891573 return LRU_REMOVED ;
15901574 }
15911575
1592- bp -> b_hold = -1 ;
1576+ lockref_mark_dead ( & bp -> b_lockref ) ;
15931577 list_lru_isolate_move (lru , item , dispose );
1594- spin_unlock (& bp -> b_lock );
1578+ spin_unlock (& bp -> b_lockref . lock );
15951579 return LRU_REMOVED ;
15961580}
15971581
0 commit comments