@@ -304,14 +304,20 @@ static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
304304 schedule_delayed_work (& bp -> fw_reset_task , delay );
305305}
306306
307- static void bnxt_queue_sp_work (struct bnxt * bp )
307+ static void __bnxt_queue_sp_work (struct bnxt * bp )
308308{
309309 if (BNXT_PF (bp ))
310310 queue_work (bnxt_pf_wq , & bp -> sp_task );
311311 else
312312 schedule_work (& bp -> sp_task );
313313}
314314
315+ static void bnxt_queue_sp_work (struct bnxt * bp , unsigned int event )
316+ {
317+ set_bit (event , & bp -> sp_event );
318+ __bnxt_queue_sp_work (bp );
319+ }
320+
315321static void bnxt_sched_reset_rxr (struct bnxt * bp , struct bnxt_rx_ring_info * rxr )
316322{
317323 if (!rxr -> bnapi -> in_reset ) {
@@ -320,7 +326,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
320326 set_bit (BNXT_RESET_TASK_SP_EVENT , & bp -> sp_event );
321327 else
322328 set_bit (BNXT_RST_RING_SP_EVENT , & bp -> sp_event );
323- bnxt_queue_sp_work (bp );
329+ __bnxt_queue_sp_work (bp );
324330 }
325331 rxr -> rx_next_cons = 0xffff ;
326332}
@@ -2384,7 +2390,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
23842390 default :
23852391 goto async_event_process_exit ;
23862392 }
2387- bnxt_queue_sp_work (bp );
2393+ __bnxt_queue_sp_work (bp );
23882394async_event_process_exit :
23892395 return 0 ;
23902396}
@@ -2413,8 +2419,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
24132419 }
24142420
24152421 set_bit (vf_id - bp -> pf .first_vf_id , bp -> pf .vf_event_bmap );
2416- set_bit (BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT , & bp -> sp_event );
2417- bnxt_queue_sp_work (bp );
2422+ bnxt_queue_sp_work (bp , BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT );
24182423 break ;
24192424
24202425 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT :
@@ -11031,8 +11036,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
1103111036 if (mask != vnic -> rx_mask || uc_update || mc_update ) {
1103211037 vnic -> rx_mask = mask ;
1103311038
11034- set_bit (BNXT_RX_MASK_SP_EVENT , & bp -> sp_event );
11035- bnxt_queue_sp_work (bp );
11039+ bnxt_queue_sp_work (bp , BNXT_RX_MASK_SP_EVENT );
1103611040 }
1103711041}
1103811042
@@ -11597,8 +11601,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
1159711601 struct bnxt * bp = netdev_priv (dev );
1159811602
1159911603 netdev_err (bp -> dev , "TX timeout detected, starting reset task!\n" );
11600- set_bit (BNXT_RESET_TASK_SP_EVENT , & bp -> sp_event );
11601- bnxt_queue_sp_work (bp );
11604+ bnxt_queue_sp_work (bp , BNXT_RESET_TASK_SP_EVENT );
1160211605}
1160311606
1160411607static void bnxt_fw_health_check (struct bnxt * bp )
@@ -11635,8 +11638,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
1163511638 return ;
1163611639
1163711640fw_reset :
11638- set_bit (BNXT_FW_EXCEPTION_SP_EVENT , & bp -> sp_event );
11639- bnxt_queue_sp_work (bp );
11641+ bnxt_queue_sp_work (bp , BNXT_FW_EXCEPTION_SP_EVENT );
1164011642}
1164111643
1164211644static void bnxt_timer (struct timer_list * t )
@@ -11653,43 +11655,33 @@ static void bnxt_timer(struct timer_list *t)
1165311655 if (bp -> fw_cap & BNXT_FW_CAP_ERROR_RECOVERY )
1165411656 bnxt_fw_health_check (bp );
1165511657
11656- if (BNXT_LINK_IS_UP (bp ) && bp -> stats_coal_ticks ) {
11657- set_bit (BNXT_PERIODIC_STATS_SP_EVENT , & bp -> sp_event );
11658- bnxt_queue_sp_work (bp );
11659- }
11658+ if (BNXT_LINK_IS_UP (bp ) && bp -> stats_coal_ticks )
11659+ bnxt_queue_sp_work (bp , BNXT_PERIODIC_STATS_SP_EVENT );
1166011660
11661- if (bnxt_tc_flower_enabled (bp )) {
11662- set_bit (BNXT_FLOW_STATS_SP_EVENT , & bp -> sp_event );
11663- bnxt_queue_sp_work (bp );
11664- }
11661+ if (bnxt_tc_flower_enabled (bp ))
11662+ bnxt_queue_sp_work (bp , BNXT_FLOW_STATS_SP_EVENT );
1166511663
1166611664#ifdef CONFIG_RFS_ACCEL
11667- if ((bp -> flags & BNXT_FLAG_RFS ) && bp -> ntp_fltr_count ) {
11668- set_bit (BNXT_RX_NTP_FLTR_SP_EVENT , & bp -> sp_event );
11669- bnxt_queue_sp_work (bp );
11670- }
11665+ if ((bp -> flags & BNXT_FLAG_RFS ) && bp -> ntp_fltr_count )
11666+ bnxt_queue_sp_work (bp , BNXT_RX_NTP_FLTR_SP_EVENT );
1167111667#endif /*CONFIG_RFS_ACCEL*/
1167211668
1167311669 if (bp -> link_info .phy_retry ) {
1167411670 if (time_after (jiffies , bp -> link_info .phy_retry_expires )) {
1167511671 bp -> link_info .phy_retry = false;
1167611672 netdev_warn (bp -> dev , "failed to update phy settings after maximum retries.\n" );
1167711673 } else {
11678- set_bit (BNXT_UPDATE_PHY_SP_EVENT , & bp -> sp_event );
11679- bnxt_queue_sp_work (bp );
11674+ bnxt_queue_sp_work (bp , BNXT_UPDATE_PHY_SP_EVENT );
1168011675 }
1168111676 }
1168211677
11683- if (test_bit (BNXT_STATE_L2_FILTER_RETRY , & bp -> state )) {
11684- set_bit (BNXT_RX_MASK_SP_EVENT , & bp -> sp_event );
11685- bnxt_queue_sp_work (bp );
11686- }
11678+ if (test_bit (BNXT_STATE_L2_FILTER_RETRY , & bp -> state ))
11679+ bnxt_queue_sp_work (bp , BNXT_RX_MASK_SP_EVENT );
1168711680
1168811681 if ((bp -> flags & BNXT_FLAG_CHIP_P5 ) && !bp -> chip_rev &&
11689- netif_carrier_ok (dev )) {
11690- set_bit (BNXT_RING_COAL_NOW_SP_EVENT , & bp -> sp_event );
11691- bnxt_queue_sp_work (bp );
11692- }
11682+ netif_carrier_ok (dev ))
11683+ bnxt_queue_sp_work (bp , BNXT_RING_COAL_NOW_SP_EVENT );
11684+
1169311685bnxt_restart_timer :
1169411686 mod_timer (& bp -> timer , jiffies + bp -> current_interval );
1169511687}
@@ -12968,8 +12960,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1296812960 bp -> ntp_fltr_count ++ ;
1296912961 spin_unlock_bh (& bp -> ntp_fltr_lock );
1297012962
12971- set_bit (BNXT_RX_NTP_FLTR_SP_EVENT , & bp -> sp_event );
12972- bnxt_queue_sp_work (bp );
12963+ bnxt_queue_sp_work (bp , BNXT_RX_NTP_FLTR_SP_EVENT );
1297312964
1297412965 return new_fltr -> sw_id ;
1297512966
0 commit comments