@@ -759,6 +759,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
759759 if (err )
760760 goto err_destroy_rq ;
761761
762+ set_bit (MLX5E_RQ_STATE_ENABLED , & rq -> state );
762763 err = mlx5e_modify_rq_state (rq , MLX5_RQC_STATE_RST , MLX5_RQC_STATE_RDY );
763764 if (err )
764765 goto err_disable_rq ;
@@ -773,6 +774,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
773774 return 0 ;
774775
775776err_disable_rq :
777+ clear_bit (MLX5E_RQ_STATE_ENABLED , & rq -> state );
776778 mlx5e_disable_rq (rq );
777779err_destroy_rq :
778780 mlx5e_destroy_rq (rq );
@@ -782,7 +784,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
782784
783785static void mlx5e_close_rq (struct mlx5e_rq * rq )
784786{
785- set_bit ( MLX5E_RQ_STATE_FLUSH , & rq -> state );
787+ clear_bit ( MLX5E_RQ_STATE_ENABLED , & rq -> state );
786788 napi_synchronize (& rq -> channel -> napi ); /* prevent mlx5e_post_rx_wqes */
787789 cancel_work_sync (& rq -> am .work );
788790
@@ -1006,7 +1008,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
10061008 MLX5_SET (sqc , sqc , min_wqe_inline_mode , sq -> min_inline_mode );
10071009 MLX5_SET (sqc , sqc , state , MLX5_SQC_STATE_RST );
10081010 MLX5_SET (sqc , sqc , tis_lst_sz , param -> type == MLX5E_SQ_ICO ? 0 : 1 );
1009- MLX5_SET (sqc , sqc , flush_in_error_en , 1 );
10101011
10111012 MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_CYCLIC );
10121013 MLX5_SET (wq , wq , uar_page , sq -> uar .index );
@@ -1083,6 +1084,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
10831084 if (err )
10841085 goto err_destroy_sq ;
10851086
1087+ set_bit (MLX5E_SQ_STATE_ENABLED , & sq -> state );
10861088 err = mlx5e_modify_sq (sq , MLX5_SQC_STATE_RST , MLX5_SQC_STATE_RDY ,
10871089 false, 0 );
10881090 if (err )
@@ -1096,6 +1098,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
10961098 return 0 ;
10971099
10981100err_disable_sq :
1101+ clear_bit (MLX5E_SQ_STATE_ENABLED , & sq -> state );
10991102 mlx5e_disable_sq (sq );
11001103err_destroy_sq :
11011104 mlx5e_destroy_sq (sq );
@@ -1112,7 +1115,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
11121115
11131116static void mlx5e_close_sq (struct mlx5e_sq * sq )
11141117{
1115- set_bit ( MLX5E_SQ_STATE_FLUSH , & sq -> state );
1118+ clear_bit ( MLX5E_SQ_STATE_ENABLED , & sq -> state );
11161119 /* prevent netif_tx_wake_queue */
11171120 napi_synchronize (& sq -> channel -> napi );
11181121
@@ -3092,7 +3095,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
30923095 if (!netif_xmit_stopped (netdev_get_tx_queue (dev , i )))
30933096 continue ;
30943097 sched_work = true;
3095- set_bit ( MLX5E_SQ_STATE_FLUSH , & sq -> state );
3098+ clear_bit ( MLX5E_SQ_STATE_ENABLED , & sq -> state );
30963099 netdev_err (dev , "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n" ,
30973100 i , sq -> sqn , sq -> cq .mcq .cqn , sq -> cc , sq -> pc );
30983101 }
@@ -3147,13 +3150,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
31473150 for (i = 0 ; i < priv -> params .num_channels ; i ++ ) {
31483151 struct mlx5e_channel * c = priv -> channel [i ];
31493152
3150- set_bit ( MLX5E_RQ_STATE_FLUSH , & c -> rq .state );
3153+ clear_bit ( MLX5E_RQ_STATE_ENABLED , & c -> rq .state );
31513154 napi_synchronize (& c -> napi );
31523155 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
31533156
31543157 old_prog = xchg (& c -> rq .xdp_prog , prog );
31553158
3156- clear_bit ( MLX5E_RQ_STATE_FLUSH , & c -> rq .state );
3159+ set_bit ( MLX5E_RQ_STATE_ENABLED , & c -> rq .state );
31573160 /* napi_schedule in case we have missed anything */
31583161 set_bit (MLX5E_CHANNEL_NAPI_SCHED , & c -> flags );
31593162 napi_schedule (& c -> napi );
0 commit comments