diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 64 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 13 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 21 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 12 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 42 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 54 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 25 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 39 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 24 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 29 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 373 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 118 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 837 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 8 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/time-event.c | 11 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 49 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 436 |
17 files changed, 1089 insertions, 1066 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 210be26aadaa..843f3b41b72e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { - .configure_keys = !d0i3, + .configure_keys = !d0i3 && !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, @@ -1636,32 +1638,10 @@ out_free_resp: } static struct iwl_wowlan_status * -iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) { - u32 base = mvm->error_event_table[0]; - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - u32 error_id; - } err_info; int ret; - iwl_trans_read_mem_bytes(mvm->trans, base, - &err_info, sizeof(err_info)); - - if (err_info.valid) { - IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", - err_info.valid, err_info.error_id); - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); - } - return ERR_PTR(-EIO); - } - /* only for tracing for now */ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); if (ret) @@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, bool keep; struct iwl_mvm_sta *mvm_ap_sta; - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + fw_status = iwl_mvm_get_wakeup_status(mvm); if (IS_ERR_OR_NULL(fw_status)) goto out_unlock; @@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, u32 reasons = 0; int i, j, n_matches, ret; - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + fw_status = iwl_mvm_get_wakeup_status(mvm); if (!IS_ERR_OR_NULL(fw_status)) { reasons = le32_to_cpu(fw_status->wakeup_reasons); kfree(fw_status); @@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } +static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 base = mvm->error_event_table[0]; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + + iwl_trans_read_mem_bytes(mvm->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid && + err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); + } + return err_info.valid; +} + static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; @@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* query SRAM first in case we want event logging */ iwl_mvm_read_d3_sram(mvm); + if (iwl_mvm_check_rt_status(mvm, vif)) { + set_bit(STATUS_FW_ERROR, &mvm->trans->status); + iwl_mvm_dump_nic_error_log(mvm); + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + NULL, 0); + ret = 1; + goto err; + } + if (d0i3_first) { ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (ret < 0) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index de40752aa67e..3b6b3d8fb961 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, }; int ret, bt_force_ant_mode; - for (bt_force_ant_mode = 0; - bt_force_ant_mode < ARRAY_SIZE(modes_str); - bt_force_ant_mode++) { - if (!strcmp(buf, modes_str[bt_force_ant_mode])) - break; - } - - if (bt_force_ant_mode >= ARRAY_SIZE(modes_str)) - return -EINVAL; + ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); + if (ret < 0) + return ret; + bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 96d26b749952..dade206d5511 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; + set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, @@ -363,12 +364,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; + /* + * Set a 'fake' TID for the command queue, since we use the + * hweight() of the tid_bitmap as a refcount now. Not that + * we ever even consider the command queue as one we might + * want to reuse, but be safe nevertheless. + */ + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = + BIT(IWL_MAX_TID_COUNT + 2); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); return 0; } @@ -699,8 +708,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; - /* in case of BIOS bug */ - if (n_profiles <= 0) { + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } @@ -1022,7 +1035,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ - if (mvm->fw->dbg_dest_tlv) + if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 781f30356720..6486cfb33f40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS)) + trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MISSED_BEACONS); + if (!trigger) return; - trigger = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS); bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = @@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), - trigger)) - return; - if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index c78d017749d3..505b0385d800 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; - switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) iwl_mvm_del_aux_sta(mvm); /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 + * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); @@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_TDLS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(tdls_trig->action_bitmap & BIT(action))) return; @@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) @@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8f71eeed50d9..7ba5bc2ed1c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -512,6 +512,7 @@ enum iwl_mvm_scan_type { IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, + IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { @@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. - * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it - * This is a state of a queue that has had traffic on it, but during the - * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on - * it. In this state, when a new queue is needed to be allocated but no - * such free queue exists, an inactive queue might be freed and given to - * the new RA/TID. - * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured - * This is the state of a queue that has had traffic pass through it, but - * needs to be reconfigured for some reason, e.g. the queue needs to - * become unshared and aggregations re-enabled on. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, - IWL_MVM_QUEUE_INACTIVE, - IWL_MVM_QUEUE_RECONFIGURING, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) @@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; +struct iwl_mvm_dqa_txq_info { + u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ + bool reserved; /* Is this the TXQ reserved for a STA */ + u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ + u8 txq_tid; /* The TID "owner" of this queue*/ + u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + /* Timestamp for inactivation per TID of this queue */ + unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; + enum iwl_mvm_queue_status status; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -843,17 +843,7 @@ struct iwl_mvm { u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; - struct { - u8 hw_queue_refcount; - u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ - bool reserved; /* Is this the TXQ reserved for a STA */ - u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ - u8 txq_tid; /* The TID "owner" of this queue*/ - u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ - /* Timestamp for inactivation per TID of this queue */ - unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; - enum iwl_mvm_queue_status status; - } queue_info[IWL_MAX_HW_QUEUES]; + struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ struct work_struct add_stream_wk; /* To add streams to queues */ @@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, mvmvif->low_latency &= ~cause; } -/* hw scheduler queue config */ -bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout); -int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, - u8 sta_id, u8 tid, unsigned int timeout); - -int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); - /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ @@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { + lockdep_assert_held(&mvm->mutex); + /* calling this function without using dump_start/end since at this + * point we already hold the op mode mutex + */ + iwl_fw_dbg_collect_sync(&mvm->fwrt); iwl_fw_cancel_timestamp(&mvm->fwrt); iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); @@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); -void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); - #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index fff98fed35ed..3633f27d048a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, u32 status; int resp_len, n_channels; u16 mcc; - bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); - if (!resp_v2) - cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); @@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, pkt = cmd.resp_pkt; /* Extract MCC response */ - if (resp_v2) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); @@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } } else { - struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; + struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; - n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); + n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); @@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } - resp_cp->status = mcc_resp_v1->status; - resp_cp->mcc = mcc_resp_v1->mcc; - resp_cp->cap = mcc_resp_v1->cap; - resp_cp->source_id = mcc_resp_v1->source_id; - resp_cp->n_channels = mcc_resp_v1->n_channels; - memcpy(resp_cp->channels, mcc_resp_v1->channels, + resp_cp->status = mcc_resp_v3->status; + resp_cp->mcc = mcc_resp_v3->mcc; + resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); + resp_cp->source_id = mcc_resp_v3->source_id; + resp_cp->time = mcc_resp_v3->time; + resp_cp->geo_info = mcc_resp_v3->geo_info; + resp_cp->n_channels = mcc_resp_v3->n_channels; + memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0599d323cbeb..0e2092526fae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx) return iwl_mvm_firmware_running(ctx); } +static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, host_cmd); + mutex_unlock(&mvm->mutex); + + return ret; +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, + .send_hcmd = iwl_mvm_fwrt_send_hcmd, }; static struct iwl_op_mode * @@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->max_rx_agg_size) hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; + else + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; + else + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; op_mode = hw->priv; @@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; - trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; - memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, + trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv; + trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg; + memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv)); - trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; - trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; + trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv; + trans->dbg_dump_mask = mvm->fw->dbg.dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); + if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status)) + iwl_fw_alive_error_dump(&mvm->fwrt); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_FW_NOTIF); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) @@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - NULL); + NULL, 0); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 2c75f51a04e4..089972280daa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, + &tx_resp_rate)) { + WARN_ON_ONCE(1); + return; + } #ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate but @@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ table = &lq_sta->lq; lq_hwrate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { + WARN_ON_ONCE(1); + return; + } /* Here we actually compare this rate to the latest LQ command */ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { @@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { lq_hwrate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, - &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate)) { + WARN_ON_ONCE(1); + return; + } + /* * Only collect stats if retried rate is in the same RS * table as active/search. @@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; - rs_rate_from_ucode_rate(ucode_rate, band, &rate); + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { + WARN_ON_ONCE(1); + return; + } if (is_mimo(&rate)) lq_cmd->mimo_delim = num_rates - 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index a050220da678..ef624833cf1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - mvmsta->vif == tx_blocked_vif) { + if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } @@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 894dd6379b9a..26ac9402568d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -923,6 +923,185 @@ static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm, } } +static void +iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status) +{ + /* + * Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the HE phy data (due + * to the bits being used for TSF). This shouldn't + * happen though as management frames where we need + * the TSF/timers are not be transmitted in HE-MU. + */ + u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + he->data2 |= le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); + if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + + if (he_mu) { +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + rate_n_flags), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); + } +} + +static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, + struct iwl_rx_mpdu_desc *desc, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status, + u64 he_phy_data, u32 rate_n_flags, + int queue) +{ + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + bool sigb_data; + u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN; + u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN; + + he->data1 |= cpu_to_le16(d1known); + he->data2 |= cpu_to_le16(d2known); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + + switch (he_type) { + case RATE_MCS_HE_TYPE_MU: + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + + sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, + he_phy_data) == + IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; + if (sigb_data) + iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); + /* fall through */ + case RATE_MCS_HE_TYPE_TRIG: + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data5 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + break; + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); + he->data3 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + break; + } + + switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) { + case IWL_RX_HE_PHY_INFO_TYPE_MU: + case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO: + case IWL_RX_HE_PHY_INFO_TYPE_TB: + iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags, + he, he_mu, rx_status); + break; + default: + /* nothing */ + break; + } +} + static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc, u32 rate_n_flags, u16 phy_info, int queue) @@ -933,9 +1112,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, u64 he_phy_data = HE_PHY_DATA_INVAL; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; - u32 he_type = 0xffffffff; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 stbc, ltf; - static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | @@ -953,25 +1131,19 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; unsigned int radiotap_len = 0; - bool overload = phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD; - bool sigb_data = false; he = skb_put_data(skb, &known, sizeof(known)); radiotap_len += sizeof(known); rx_status->flag |= RX_FLAG_RADIOTAP_HE; - he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { - if (mvm->trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) he_phy_data = le64_to_cpu(desc->v3.he_phy_data); else he_phy_data = le64_to_cpu(desc->v1.he_phy_data); if (he_type == RATE_MCS_HE_TYPE_MU) { - he_mu = skb_put_data(skb, &mu_known, - sizeof(mu_known)); + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); radiotap_len += sizeof(mu_known); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } @@ -980,60 +1152,21 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, /* temporarily hide the radiotap data */ __skb_pull(skb, radiotap_len); - if (overload && he_type == RATE_MCS_HE_TYPE_SU) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); - if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, he_phy_data)) - he->data3 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); - + if (he_phy_data != HE_PHY_DATA_INVAL && + he_type == RATE_MCS_HE_TYPE_SU) { + /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } - } else if (overload && he_mu && he_phy_data != HE_PHY_DATA_INVAL) { - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - - sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, - he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; - if (sigb_data) - iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); - } - if (he_phy_data != HE_PHY_DATA_INVAL && - (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_MU)) { - u8 bss_color = FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, - he_phy_data); - - if (bss_color) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); - he->data3 |= cpu_to_le16(bss_color); - } } + if (he_phy_data != HE_PHY_DATA_INVAL) + iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status, + he_phy_data, rate_n_flags, queue); + /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; @@ -1056,84 +1189,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; } - if (he_phy_data != HE_PHY_DATA_INVAL && - (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO || - FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO)) { - /* - * Unfortunately, we have to leave the mac80211 data - * incorrect for the case that we receive an HE-MU - * transmission and *don't* have the HE phy data (due - * to the bits being used for TSF). This shouldn't - * happen though as management frames where we need - * the TSF/timers are not be transmitted in HE-MU. - */ - u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); - u8 offs = 0; - - rx_status->bw = RATE_INFO_BW_HE_RU; - + /* actually data is filled in mac80211 */ + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - switch (ru) { - case 0 ... 36: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; - offs = ru; - break; - case 37 ... 52: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; - offs = ru - 37; - break; - case 53 ... 60: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - offs = ru - 53; - break; - case 61 ... 64: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; - offs = ru - 61; - break; - case 65 ... 66: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; - offs = ru - 65; - break; - case 67: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case 68: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; - break; - } - he->data2 |= - le16_encode_bits(offs, - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); - if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); - - if (he_mu) { -#define CHECK_BW(bw) \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ - RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) - CHECK_BW(20); - CHECK_BW(40); - CHECK_BW(80); - CHECK_BW(160); - he->data2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, - rate_n_flags), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - } - } else if (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_EXT_SU) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - } - stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> @@ -1202,9 +1263,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); - switch (he_type) { - case RATE_MCS_HE_TYPE_SU: - case RATE_MCS_HE_TYPE_EXT_SU: { + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) { u16 val; /* LTF syms correspond to streams */ @@ -1234,31 +1294,10 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, rx_status->nss); val = 0; } + he->data5 |= le16_encode_bits(val, IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); - } - break; - case RATE_MCS_HE_TYPE_MU: { - u16 val; - - if (he_phy_data == HE_PHY_DATA_INVAL) - break; - - val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, - he_phy_data); - - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - he->data5 |= - cpu_to_le16(FIELD_PREP( - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, - val)); - } - break; - case RATE_MCS_HE_TYPE_TRIG: - /* not supported */ - break; } } @@ -1424,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && @@ -1436,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - tx_blocked_vif == mvmsta->vif) { + if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -1448,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index e9048a98e793..cfb784fea77b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { .suspend_time = 95, .max_out_time = 44, }, + [IWL_SCAN_TYPE_FAST_BALANCE] = { + .suspend_time = 30, + .max_out_time = 37, + }, }; struct iwl_mvm_scan_params { @@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) return mvm->tcm.result.band_load[band]; } +struct iwl_is_dcm_with_go_iterator_data { + struct ieee80211_vif *current_vif; + bool is_dcm_with_p2p_go; +}; + +static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_is_dcm_with_go_iterator_data *data = _data; + struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif *curr_mvmvif = + iwl_mvm_vif_from_mac80211(data->current_vif); + + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && + other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) + data->is_dcm_with_p2p_go = true; +} + static enum -iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, +iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, enum iwl_mvm_traffic_load load, bool low_latency) { @@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, if (!global_cnt) return IWL_SCAN_TYPE_UNASSOC; - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && - fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) - return IWL_SCAN_TYPE_FRAGMENTED; + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && + (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) + return IWL_SCAN_TYPE_FRAGMENTED; + + /* in case of DCM with GO where BSS DTIM interval < 220msec + * set all scan requests as fast-balance scan + * */ + if (vif && vif->type == NL80211_IFTYPE_STATION && + vif->bss_conf.dtim_period < 220) { + struct iwl_is_dcm_with_go_iterator_data data = { + .current_vif = vif, + .is_dcm_with_p2p_go = false, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_is_dcm_with_go_iterator, + &data); + if (data.is_dcm_with_p2p_go) + return IWL_SCAN_TYPE_FAST_BALANCE; + } + } if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) return IWL_SCAN_TYPE_MILD; @@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, } static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { enum iwl_mvm_traffic_load load; bool low_latency; @@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) load = iwl_mvm_get_traffic_load(mvm); low_latency = iwl_mvm_low_latency(mvm); - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, - bool p2p_device, + struct ieee80211_vif *vif, enum nl80211_band band) { enum iwl_mvm_traffic_load load; @@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, load = iwl_mvm_get_traffic_load_band(mvm, band); low_latency = iwl_mvm_low_latency_band(mvm, band); - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static int @@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) params->scan_plans[0].iterations == 1; } +static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) +{ + return (type == IWL_SCAN_TYPE_FRAGMENTED || + type == IWL_SCAN_TYPE_FAST_BALANCE); +} + static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) @@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && @@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && - params->type != IWL_SCAN_TYPE_FRAGMENTED) + !iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; return flags; @@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags) { - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); @@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, if (iwl_mvm_is_cdb_supported(mvm)) { enum iwl_mvm_scan_type lb_type, hb_type; - lb_type = iwl_mvm_get_scan_type_band(mvm, false, + lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); - hb_type = iwl_mvm_get_scan_type_band(mvm, false, + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = @@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, cpu_to_le32(scan_timing[hb_type].suspend_time); } else { enum iwl_mvm_scan_type type = - iwl_mvm_get_scan_type(mvm, false); + iwl_mvm_get_scan_type(mvm, NULL); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); @@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return -ENOBUFS; if (iwl_mvm_is_cdb_supported(mvm)) { - type = iwl_mvm_get_scan_type_band(mvm, false, + type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); - hb_type = iwl_mvm_get_scan_type_band(mvm, false, + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) return 0; } else { - type = iwl_mvm_get_scan_type(mvm, false); + type = iwl_mvm_get_scan_type(mvm, NULL); if (type == mvm->scan_type) return 0; } @@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | SCAN_CONFIG_N_CHANNELS(num_channels) | - (type == IWL_SCAN_TYPE_FRAGMENTED ? + (iwl_mvm_is_scan_fragmented(type) ? SCAN_CONFIG_FLAG_SET_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); @@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) */ if (iwl_mvm_cdb_scan_api(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) - flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? + flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); @@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm) && - params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) + iwl_mvm_is_scan_fragmented(params->hb_type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && @@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, */ if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && - params->type != IWL_SCAN_TYPE_FRAGMENTED && + !iwl_mvm_is_scan_fragmented(params->type) && !iwl_mvm_is_adaptive_dwell_supported(mvm) && !iwl_mvm_is_oce_supported(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; @@ -1448,6 +1504,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; + + cmd->v8.general_flags2 = + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; } cmd->scan_start_mac_id = scan_vif->id; @@ -1586,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work) static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, - bool p2p) + struct ieee80211_vif *vif) { if (iwl_mvm_is_cdb_supported(mvm)) { params->type = - iwl_mvm_get_scan_type_band(mvm, p2p, + iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_2GHZ); params->hb_type = - iwl_mvm_get_scan_type_band(mvm, p2p, + iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_5GHZ); } else { - params->type = iwl_mvm_get_scan_type(mvm, p2p); + params->type = iwl_mvm_get_scan_type(mvm, vif); } } + int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1646,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; - iwl_mvm_fill_scan_type(mvm, ¶ms, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, vif); ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); if (ret < 0) @@ -1742,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; - iwl_mvm_fill_scan_type(mvm, ¶ms, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, vif); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 8f929c774e70..1887d2b9f185 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, return ret; } +static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 tid, u8 flags) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_DISABLE_QUEUE, + }; + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; + int ret; + + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) + return -EINVAL; + + if (iwl_mvm_has_new_tx_api(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_free(mvm->trans, queue); + + return 0; + } + + spin_lock_bh(&mvm->queue_info_lock); + + if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) { + spin_unlock_bh(&mvm->queue_info_lock); + return 0; + } + + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + + /* + * If there is another TID with the same AC - don't remove the MAC queue + * from the mapping + */ + if (tid < IWL_MAX_TID_COUNT) { + unsigned long tid_bitmap = + mvm->queue_info[queue].tid_bitmap; + int ac = tid_to_mac80211_ac[tid]; + int i; + + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { + if (tid_to_mac80211_ac[i] == ac) + remove_mac_queue = false; + } + } + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + + cmd.action = mvm->queue_info[queue].tid_bitmap ? + SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; + if (cmd.action == SCD_CFG_DISABLE_QUEUE) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; + + IWL_DEBUG_TX_QUEUES(mvm, + "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + queue, + mvm->queue_info[queue].tid_bitmap, + mvm->hw_queue_to_mac80211[queue]); + + /* If the queue is still enabled - nothing left to do in this func */ + if (cmd.action == SCD_CFG_ENABLE_QUEUE) { + spin_unlock_bh(&mvm->queue_info_lock); + return 0; + } + + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tid = mvm->queue_info[queue].txq_tid; + + /* Make sure queue info is correct even though we overwrite it */ + WARN(mvm->queue_info[queue].tid_bitmap || + mvm->hw_queue_to_mac80211[queue], + "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", + queue, mvm->hw_queue_to_mac80211[queue], + mvm->queue_info[queue].tid_bitmap); + + /* If we are here - the queue is freed and we can zero out these vals */ + mvm->queue_info[queue].tid_bitmap = 0; + mvm->hw_queue_to_mac80211[queue] = 0; + + /* Regardless if this is a reserved TXQ for a STA - mark it as false */ + mvm->queue_info[queue].reserved = false; + + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); + + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); + return ret; +} + static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; @@ -447,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, - bool same_sta) + u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 txq_curr_ac, sta_id, tid; unsigned long disable_agg_tids = 0; + bool same_sta; int ret; lockdep_assert_held(&mvm->mutex); @@ -465,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, tid = mvm->queue_info[queue].txq_tid; spin_unlock_bh(&mvm->queue_info_lock); + same_sta = sta_id == new_sta_id; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; @@ -479,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, mvmsta->vif->hw_queue[txq_curr_ac], tid, 0); if (ret) { - /* Re-mark the inactive queue as inactive */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); @@ -504,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; + /* + * This protects us against grabbing a queue that's being reconfigured + * by the inactivity checker. + */ + lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; @@ -517,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; - /* Don't try and take queues being reconfigured */ - if (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_RECONFIGURING) - continue; - ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } @@ -562,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, return -ENOSPC; } - /* Make sure the queue isn't in the middle of being reconfigured */ - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { - IWL_ERR(mvm, - "TXQ %d is in the middle of re-config - try again\n", - queue); - return -EBUSY; - } - return queue; } @@ -579,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ -int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force) +static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, @@ -616,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; mq = mvm->hw_queue_to_mac80211[queue]; - shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); + shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; spin_unlock_bh(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", @@ -674,6 +768,57 @@ out: return ret; } +static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, + u8 minq, u8 maxq) +{ + int i; + + lockdep_assert_held(&mvm->queue_info_lock); + + /* This should not be hit with new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -ENOSPC; + + /* Start by looking for a free queue */ + for (i = minq; i <= maxq; i++) + if (mvm->queue_info[i].tid_bitmap == 0 && + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) + return i; + + return -ENOSPC; +} + +static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout) +{ + int queue, size = IWL_DEFAULT_QUEUE_SIZE; + + if (tid == IWL_MAX_TID_COUNT) { + tid = IWL_MGMT_TID; + size = IWL_MGMT_QUEUE_SIZE; + } + queue = iwl_trans_txq_alloc(mvm->trans, + cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), + sta_id, tid, SCD_QUEUE_CFG, size, timeout); + + if (queue < 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "Failed allocating TXQ for sta %d tid %d, ret: %d\n", + sta_id, tid, queue); + return queue; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", + queue, sta_id, tid); + + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d (mac80211 map:0x%x)\n", + queue, mvm->hw_queue_to_mac80211[queue]); + + return queue; +} + static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) @@ -698,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; - mvmsta->tid_data[tid].is_tid_active = true; spin_unlock_bh(&mvmsta->lock); return 0; } +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 sta_id, u8 tid) +{ + bool enable_queue = true; + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure this TID isn't already enabled */ + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", + queue, tid); + return false; + } + + /* Update mappings and refcounts */ + if (mvm->queue_info[queue].tid_bitmap) + enable_queue = false; + + if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { + WARN(mac80211_queue >= + BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), + "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", + mac80211_queue, queue, sta_id, tid); + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + } + + mvm->queue_info[queue].tid_bitmap |= BIT(tid); + mvm->queue_info[queue].ra_sta_id = sta_id; + + if (enable_queue) { + if (tid != IWL_MAX_TID_COUNT) + mvm->queue_info[queue].mac80211_ac = + tid_to_mac80211_ac[tid]; + else + mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; + + mvm->queue_info[queue].txq_tid = tid; + } + + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + queue, mvm->queue_info[queue].tid_bitmap, + mvm->hw_queue_to_mac80211[queue]); + + spin_unlock_bh(&mvm->queue_info_lock); + + return enable_queue; +} + +static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_ENABLE_QUEUE, + .window = cfg->frame_limit, + .sta_id = cfg->sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = cfg->fifo, + .aggregate = cfg->aggregate, + .tid = cfg->tid, + }; + bool inc_ssn; + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + + /* Send the enabling command if we need to */ + if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, + cfg->sta_id, cfg->tid)) + return false; + + inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, + NULL, wdg_timeout); + if (inc_ssn) + le16_add_cpu(&cmd.ssn, 1); + + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), + "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); + + return inc_ssn; +} + +static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_UPDATE_QUEUE_TID, + }; + int tid; + unsigned long tid_bitmap; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + + spin_lock_bh(&mvm->queue_info_lock); + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) + return; + + /* Find any TID for queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + cmd.tid = tid; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", + queue, ret); + return; + } + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].txq_tid = tid; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", + queue, tid); +} + +static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + int tid = -1; + unsigned long tid_bitmap; + unsigned int wdg_timeout; + int ssn; + int ret = true; + + /* queue sharing is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + /* Find TID for queue, and make sure it is the only one on the queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + if (tid_bitmap != BIT(tid)) { + IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", + queue, tid_bitmap); + return; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, + tid); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + + ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); + + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, + tid_to_mac80211_ac[tid], ssn, + wdg_timeout, true); + if (ret) { + IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); + return; + } + + /* If aggs should be turned back on - do it */ + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { + struct iwl_mvm_add_sta_cmd cmd = {0}; + + mvmsta->tid_disable_agg &= ~BIT(tid); + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; + cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); + if (!ret) { + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d is now aggregated again\n", + queue); + + /* Mark queue intenally as aggregating again */ + iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); + } + } + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + spin_unlock_bh(&mvm->queue_info_lock); +} + +/* + * Remove inactive TIDs of a given queue. + * If all queue TIDs are inactive - mark the queue as inactive + * If only some the queue TIDs are inactive - unmap them from the queue + * + * Returns %true if all TIDs were removed and the queue could be reused. + */ +static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int queue, + unsigned long tid_bitmap, + unsigned long *unshare_queues, + unsigned long *changetid_queues) +{ + int tid; + + lockdep_assert_held(&mvmsta->lock); + lockdep_assert_held(&mvm->queue_info_lock); + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + /* If some TFDs are still queued - don't mark TID as inactive */ + if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) + tid_bitmap &= ~BIT(tid); + + /* Don't mark as inactive any TID that has an active BA */ + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) + tid_bitmap &= ~BIT(tid); + } + + /* If all TIDs in the queue are inactive - return it can be reused */ + if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { + IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); + return true; + } + + /* + * If we are here, this is a shared queue and not all TIDs timed-out. + * Remove the ones that did. + */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; + u16 tid_bitmap; + + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + + /* + * We need to take into account a situation in which a TXQ was + * allocated to TID x, and then turned shared by adding TIDs y + * and z. If TID x becomes inactive and is removed from the TXQ, + * ownership must be given to one of the remaining TIDs. + * This is mainly because if TID x continues - a new queue can't + * be allocated for it as long as it is an owner of another TXQ. + * + * Mark this queue in the right bitmap, we'll send the command + * to the firmware later. + */ + if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) + set_bit(queue, changetid_queues); + + IWL_DEBUG_TX_QUEUES(mvm, + "Removing inactive TID %d from shared Q:%d\n", + tid, queue); + } + + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d left with tid bitmap 0x%x\n", queue, + mvm->queue_info[queue].tid_bitmap); + + /* + * There may be different TIDs with the same mac queues, so make + * sure all TIDs have existing corresponding mac queues enabled + */ + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + mvm->hw_queue_to_mac80211[queue] |= + BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); + } + + /* If the queue is marked as shared - "unshare" it */ + if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && + mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { + IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", + queue); + set_bit(queue, unshare_queues); + } + + return false; +} + +/* + * Check for inactivity - this includes checking if any queue + * can be unshared and finding one (and only one) that can be + * reused. + * This function is also invoked as a sort of clean-up task, + * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. + * + * Returns the queue number, or -ENOSPC. + */ +static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) +{ + unsigned long now = jiffies; + unsigned long unshare_queues = 0; + unsigned long changetid_queues = 0; + int i, ret, free_queue = -ENOSPC; + + lockdep_assert_held(&mvm->mutex); + + if (iwl_mvm_has_new_tx_api(mvm)) + return -ENOSPC; + + spin_lock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + /* we skip the CMD queue below by starting at 1 */ + BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); + + for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + int tid; + unsigned long inactive_tid_bitmap = 0; + unsigned long queue_tid_bitmap; + + queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; + if (!queue_tid_bitmap) + continue; + + /* If TXQ isn't in active use anyway - nothing to do here... */ + if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && + mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) + continue; + + /* Check to see if there are inactive TIDs on this queue */ + for_each_set_bit(tid, &queue_tid_bitmap, + IWL_MAX_TID_COUNT + 1) { + if (time_after(mvm->queue_info[i].last_frame_time[tid] + + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) + continue; + + inactive_tid_bitmap |= BIT(tid); + } + + /* If all TIDs are active - finish check on this queue */ + if (!inactive_tid_bitmap) + continue; + + /* + * If we are here - the queue hadn't been served recently and is + * in use + */ + + sta_id = mvm->queue_info[i].ra_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* + * If the STA doesn't exist anymore, it isn't an error. It could + * be that it was removed since getting the queues, and in this + * case it should've inactivated its queues anyway. + */ + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* this isn't so nice, but works OK due to the way we loop */ + spin_unlock(&mvm->queue_info_lock); + + /* and we need this locking order */ + spin_lock(&mvmsta->lock); + spin_lock(&mvm->queue_info_lock); + ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, + inactive_tid_bitmap, + &unshare_queues, + &changetid_queues); + if (ret >= 0 && free_queue < 0) + free_queue = ret; + /* only unlock sta lock - we still need the queue info lock */ + spin_unlock(&mvmsta->lock); + } + + rcu_read_unlock(); + spin_unlock_bh(&mvm->queue_info_lock); + + /* Reconfigure queues requiring reconfiguation */ + for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) + iwl_mvm_unshare_queue(mvm, i); + for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) + iwl_mvm_change_queue_tid(mvm, i); + + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, + alloc_for_sta); + if (ret) + return ret; + } + + return free_queue; +} + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_hdr *hdr) @@ -719,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; - bool using_inactive_queue = false, same_sta = false; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; @@ -756,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == - IWL_MVM_QUEUE_RESERVED || - mvm->queue_info[mvmsta->reserved_queue].status == - IWL_MVM_QUEUE_INACTIVE)) { + IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); @@ -768,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue < 0) { + spin_unlock_bh(&mvm->queue_info_lock); - /* - * Check if this queue is already allocated but inactive. - * In such a case, we'll need to first free this queue before enabling - * it again, so we'll mark it as reserved to make sure no new traffic - * arrives on it - */ - if (queue > 0 && - mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; - using_inactive_queue = true; - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; - IWL_DEBUG_TX_QUEUES(mvm, - "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", - queue, mvmsta->sta_id, tid); + /* try harder - perhaps kill an inactive queue */ + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + + spin_lock_bh(&mvm->queue_info_lock); } /* No free queue - we'll have to share */ @@ -800,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ - if ((queue > 0) && !shared_queue) + if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); @@ -821,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); - /* - * If this queue was previously inactive (idle) - we need to free it - * first - */ - if (using_inactive_queue) { - ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); - if (ret) - return ret; - } - IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, @@ -874,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (inc_ssn) mvmsta->tid_data[tid].seq_number += 0x10; mvmsta->tid_data[tid].txq_id = queue; - mvmsta->tid_data[tid].is_tid_active = true; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; @@ -909,129 +1448,6 @@ out_err: return ret; } -static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_UPDATE_QUEUE_TID, - }; - int tid; - unsigned long tid_bitmap; - int ret; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - spin_lock_bh(&mvm->queue_info_lock); - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); - - if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) - return; - - /* Find any TID for queue */ - tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); - cmd.tid = tid; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", - queue, ret); - return; - } - - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].txq_tid = tid; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", - queue, tid); -} - -static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id; - int tid = -1; - unsigned long tid_bitmap; - unsigned int wdg_timeout; - int ssn; - int ret = true; - - /* queue sharing is disabled on new TX path */ - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->queue_info_lock); - sta_id = mvm->queue_info[queue].ra_sta_id; - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); - - /* Find TID for queue, and make sure it is the only one on the queue */ - tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); - if (tid_bitmap != BIT(tid)) { - IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", - queue, tid_bitmap); - return; - } - - IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, - tid); - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - - ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); - - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, - tid_to_mac80211_ac[tid], ssn, - wdg_timeout, true); - if (ret) { - IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); - return; - } - - /* If aggs should be turned back on - do it */ - if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { - struct iwl_mvm_add_sta_cmd cmd = {0}; - - mvmsta->tid_disable_agg &= ~BIT(tid); - - cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; - cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); - cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); - - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, - iwl_mvm_add_sta_cmd_size(mvm), &cmd); - if (!ret) { - IWL_DEBUG_TX_QUEUES(mvm, - "TXQ #%d is now aggregated again\n", - queue); - - /* Mark queue intenally as aggregating again */ - iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); - } - } - - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); -} - static inline u8 iwl_mvm_tid_to_ac_queue(int tid) { if (tid == IWL_MAX_TID_COUNT) @@ -1100,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long deferred_tid_traffic; - int queue, sta_id, tid; - - /* Check inactivity of queues */ - iwl_mvm_inactivity_check(mvm); + int sta_id, tid; mutex_lock(&mvm->mutex); - /* No queue reconfiguration in TVQM mode */ - if (iwl_mvm_has_new_tx_api(mvm)) - goto alloc_queues; - - /* Reconfigure queues requiring reconfiguation */ - for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { - bool reconfig; - bool change_owner; - - spin_lock_bh(&mvm->queue_info_lock); - reconfig = (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_RECONFIGURING); + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); - /* - * We need to take into account a situation in which a TXQ was - * allocated to TID x, and then turned shared by adding TIDs y - * and z. If TID x becomes inactive and is removed from the TXQ, - * ownership must be given to one of the remaining TIDs. - * This is mainly because if TID x continues - a new queue can't - * be allocated for it as long as it is an owner of another TXQ. - */ - change_owner = !(mvm->queue_info[queue].tid_bitmap & - BIT(mvm->queue_info[queue].txq_tid)) && - (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_SHARED); - spin_unlock_bh(&mvm->queue_info_lock); - - if (reconfig) - iwl_mvm_unshare_queue(mvm, queue); - else if (change_owner) - iwl_mvm_change_queue_owner(mvm, queue); - } - -alloc_queues: /* Go over all stations with deferred traffic */ for_each_set_bit(sta_id, mvm->sta_deferred_frames, IWL_MVM_STATION_COUNT) { @@ -1167,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; - bool using_inactive_queue = false, same_sta = false; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; - /* - * Check for inactive queues, so we don't reach a situation where we - * can't add a STA due to a shortage in queues that doesn't really exist - */ - iwl_mvm_inactivity_check(mvm); + /* run the general cleanup/unsharing of queues */ + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); spin_lock_bh(&mvm->queue_info_lock); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && - !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; @@ -1193,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "No available queues for new station\n"); - return -ENOSPC; - } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { - /* - * If this queue is already allocated but inactive we'll need to - * first free this queue before enabling it again, we'll mark - * it as reserved to make sure no new traffic arrives on it - */ - using_inactive_queue = true; - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; + /* try again - this time kick out a queue if needed */ + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + if (queue < 0) { + IWL_ERR(mvm, "No available queues for new station\n"); + return -ENOSPC; + } + spin_lock_bh(&mvm->queue_info_lock); } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; @@ -1210,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, mvmsta->reserved_queue = queue; - if (using_inactive_queue) - iwl_mvm_free_inactive_queue(mvm, queue, same_sta); - IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 0fc211108149..de1a0a2d8723 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -312,9 +312,6 @@ enum iwl_mvm_agg_state { * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU - * @is_tid_active: has this TID sent traffic in the last - * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this - * field should be ignored. * @tpt_meas_start: time of the throughput measurements start, is reset every HZ * @tx_count_last: number of frames transmitted during the last second * @tx_count: counts the number of frames transmitted since the last reset of @@ -332,7 +329,6 @@ struct iwl_mvm_tid_data { u16 txq_id; u16 ssn; u16 tx_time; - bool is_tid_active; unsigned long tpt_meas_start; u32 tx_count_last; u32 tx_count; @@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); -int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force); - #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index cd91bc44259c..e1a6f4e22253 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_time_event *te_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + FW_DBG_TRIGGER_TIME_EVENT); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(te_data->vif), - trig)) - return; - for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_action_bitmap = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a6877b3f8037..ec57682efe54 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; @@ -1143,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || - !mvmsta->tid_data[tid].is_tid_active)) { - /* If TXQ needs to be allocated... */ - if (txq_id == IWL_MVM_INVALID_QUEUE) { - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - - /* - * The frame is now deferred, and the worker scheduled - * will re-allocate it, so we can free it for now. - */ - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); - spin_unlock(&mvmsta->lock); - return 0; - } - - /* queue should always be active in new TX path */ - WARN_ON(iwl_mvm_has_new_tx_api(mvm)); + if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { + iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - /* If we are here - TXQ exists and needs to be re-activated */ - spin_lock(&mvm->queue_info_lock); - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; - mvmsta->tid_data[tid].is_tid_active = true; - spin_unlock(&mvm->queue_info_lock); - - IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", - txq_id); + /* + * The frame is now deferred, and the worker scheduled + * will re-allocate it, so we can free it for now. + */ + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); + return 0; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1414,15 +1395,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_TX_STATUS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index dcacc4d11abc..818e1180bbdd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); } -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) -{ - int i; - - lockdep_assert_held(&mvm->queue_info_lock); - - /* This should not be hit with new TX path */ - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return -ENOSPC; - - /* Start by looking for a free queue */ - for (i = minq; i <= maxq; i++) - if (mvm->queue_info[i].hw_queue_refcount == 0 && - mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) - return i; - - /* - * If no free queue found - settle for an inactive one to reconfigure - * Make sure that the inactive queue either already belongs to this STA, - * or that if it belongs to another one - it isn't the reserved queue - */ - for (i = minq; i <= maxq; i++) - if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && - (sta_id == mvm->queue_info[i].ra_sta_id || - !mvm->queue_info[i].reserved)) - return i; - - return -ENOSPC; -} - int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { @@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return -EINVAL; spin_lock_bh(&mvm->queue_info_lock); - if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, + if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) { spin_unlock_bh(&mvm->queue_info_lock); return -ENXIO; @@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return ret; } -static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 sta_id, u8 tid) -{ - bool enable_queue = true; - - spin_lock_bh(&mvm->queue_info_lock); - - /* Make sure this TID isn't already enabled */ - if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", - queue, tid); - return false; - } - - /* Update mappings and refcounts */ - if (mvm->queue_info[queue].hw_queue_refcount > 0) - enable_queue = false; - - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { - WARN(mac80211_queue >= - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", - mac80211_queue, queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - } - - mvm->queue_info[queue].hw_queue_refcount++; - mvm->queue_info[queue].tid_bitmap |= BIT(tid); - mvm->queue_info[queue].ra_sta_id = sta_id; - - if (enable_queue) { - if (tid != IWL_MAX_TID_COUNT) - mvm->queue_info[queue].mac80211_ac = - tid_to_mac80211_ac[tid]; - else - mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; - - mvm->queue_info[queue].txq_tid = tid; - } - - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue]); - - spin_unlock_bh(&mvm->queue_info_lock); - - return enable_queue; -} - -int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, - u8 sta_id, u8 tid, unsigned int timeout) -{ - int queue, size = IWL_DEFAULT_QUEUE_SIZE; - - if (tid == IWL_MAX_TID_COUNT) { - tid = IWL_MGMT_TID; - size = IWL_MGMT_QUEUE_SIZE; - } - queue = iwl_trans_txq_alloc(mvm->trans, - cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), - sta_id, tid, SCD_QUEUE_CFG, size, timeout); - - if (queue < 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ for sta %d tid %d, ret: %d\n", - sta_id, tid, queue); - return queue; - } - - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", - queue, sta_id, tid); - - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d (mac80211 map:0x%x)\n", - queue, mvm->hw_queue_to_mac80211[queue]); - - return queue; -} - -bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_ENABLE_QUEUE, - .window = cfg->frame_limit, - .sta_id = cfg->sta_id, - .ssn = cpu_to_le16(ssn), - .tx_fifo = cfg->fifo, - .aggregate = cfg->aggregate, - .tid = cfg->tid, - }; - bool inc_ssn; - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return false; - - /* Send the enabling command if we need to */ - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, - cfg->sta_id, cfg->tid)) - return false; - - inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, - NULL, wdg_timeout); - if (inc_ssn) - le16_add_cpu(&cmd.ssn, 1); - - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), - "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); - - return inc_ssn; -} - -int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_DISABLE_QUEUE, - }; - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; - int ret; - - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) - return -EINVAL; - - if (iwl_mvm_has_new_tx_api(mvm)) { - spin_lock_bh(&mvm->queue_info_lock); - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - - spin_unlock_bh(&mvm->queue_info_lock); - - iwl_trans_txq_free(mvm->trans, queue); - - return 0; - } - - spin_lock_bh(&mvm->queue_info_lock); - - if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { - spin_unlock_bh(&mvm->queue_info_lock); - return 0; - } - - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - - /* - * If there is another TID with the same AC - don't remove the MAC queue - * from the mapping - */ - if (tid < IWL_MAX_TID_COUNT) { - unsigned long tid_bitmap = - mvm->queue_info[queue].tid_bitmap; - int ac = tid_to_mac80211_ac[tid]; - int i; - - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { - if (tid_to_mac80211_ac[i] == ac) - remove_mac_queue = false; - } - } - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - mvm->queue_info[queue].hw_queue_refcount--; - - cmd.action = mvm->queue_info[queue].hw_queue_refcount ? - SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; - if (cmd.action == SCD_CFG_DISABLE_QUEUE) - mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; - - IWL_DEBUG_TX_QUEUES(mvm, - "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, - mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue]); - - /* If the queue is still enabled - nothing left to do in this func */ - if (cmd.action == SCD_CFG_ENABLE_QUEUE) { - spin_unlock_bh(&mvm->queue_info_lock); - return 0; - } - - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tid = mvm->queue_info[queue].txq_tid; - - /* Make sure queue info is correct even though we overwrite it */ - WARN(mvm->queue_info[queue].hw_queue_refcount || - mvm->queue_info[queue].tid_bitmap || - mvm->hw_queue_to_mac80211[queue], - "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue], - mvm->queue_info[queue].tid_bitmap); - - /* If we are here - the queue is freed and we can zero out these vals */ - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; - mvm->hw_queue_to_mac80211[queue] = 0; - - /* Regardless if this is a reserved TXQ for a STA - mark it as false */ - mvm->queue_info[queue].reserved = false; - - spin_unlock_bh(&mvm->queue_info_lock); - - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); - - if (ret) - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", - queue, ret); - return ret; -} - /** * iwl_mvm_send_lq_cmd() - Send link quality command * @sync: This command can be sent synchronously. @@ -1238,14 +985,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) goto out; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) @@ -1257,171 +1002,6 @@ out: ieee80211_connection_loss(vif); } -/* - * Remove inactive TIDs of a given queue. - * If all queue TIDs are inactive - mark the queue as inactive - * If only some the queue TIDs are inactive - unmap them from the queue - */ -static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, int queue, - unsigned long tid_bitmap) -{ - int tid; - - lockdep_assert_held(&mvmsta->lock); - lockdep_assert_held(&mvm->queue_info_lock); - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - /* If some TFDs are still queued - don't mark TID as inactive */ - if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) - tid_bitmap &= ~BIT(tid); - - /* Don't mark as inactive any TID that has an active BA */ - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) - tid_bitmap &= ~BIT(tid); - } - - /* If all TIDs in the queue are inactive - mark queue as inactive. */ - if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) - mvmsta->tid_data[tid].is_tid_active = false; - - IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", - queue); - return; - } - - /* - * If we are here, this is a shared queue and not all TIDs timed-out. - * Remove the ones that did. - */ - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; - - mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); - mvm->queue_info[queue].hw_queue_refcount--; - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - mvmsta->tid_data[tid].is_tid_active = false; - - IWL_DEBUG_TX_QUEUES(mvm, - "Removing inactive TID %d from shared Q:%d\n", - tid, queue); - } - - IWL_DEBUG_TX_QUEUES(mvm, - "TXQ #%d left with tid bitmap 0x%x\n", queue, - mvm->queue_info[queue].tid_bitmap); - - /* - * There may be different TIDs with the same mac queues, so make - * sure all TIDs have existing corresponding mac queues enabled - */ - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->hw_queue_to_mac80211[queue] |= - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); - } - - /* If the queue is marked as shared - "unshare" it */ - if (mvm->queue_info[queue].hw_queue_refcount == 1 && - mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; - IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", - queue); - } -} - -void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) -{ - unsigned long timeout_queues_map = 0; - unsigned long now = jiffies; - int i; - - if (iwl_mvm_has_new_tx_api(mvm)) - return; - - spin_lock_bh(&mvm->queue_info_lock); - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) - if (mvm->queue_info[i].hw_queue_refcount > 0) - timeout_queues_map |= BIT(i); - spin_unlock_bh(&mvm->queue_info_lock); - - rcu_read_lock(); - - /* - * If a queue time outs - mark it as INACTIVE (don't remove right away - * if we don't have to.) This is an optimization in case traffic comes - * later, and we don't HAVE to use a currently-inactive queue - */ - for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id; - int tid; - unsigned long inactive_tid_bitmap = 0; - unsigned long queue_tid_bitmap; - - spin_lock_bh(&mvm->queue_info_lock); - queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; - - /* If TXQ isn't in active use anyway - nothing to do here... */ - if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && - mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { - spin_unlock_bh(&mvm->queue_info_lock); - continue; - } - - /* Check to see if there are inactive TIDs on this queue */ - for_each_set_bit(tid, &queue_tid_bitmap, - IWL_MAX_TID_COUNT + 1) { - if (time_after(mvm->queue_info[i].last_frame_time[tid] + - IWL_MVM_DQA_QUEUE_TIMEOUT, now)) - continue; - - inactive_tid_bitmap |= BIT(tid); - } - spin_unlock_bh(&mvm->queue_info_lock); - - /* If all TIDs are active - finish check on this queue */ - if (!inactive_tid_bitmap) - continue; - - /* - * If we are here - the queue hadn't been served recently and is - * in use - */ - - sta_id = mvm->queue_info[i].ra_sta_id; - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* - * If the STA doesn't exist anymore, it isn't an error. It could - * be that it was removed since getting the queues, and in this - * case it should've inactivated its queues anyway. - */ - if (IS_ERR_OR_NULL(sta)) - continue; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - spin_lock_bh(&mvmsta->lock); - spin_lock(&mvm->queue_info_lock); - iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, - inactive_tid_bitmap); - spin_unlock(&mvm->queue_info_lock); - spin_unlock_bh(&mvmsta->lock); - } - - rcu_read_unlock(); -} - void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, @@ -1430,14 +1010,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) return; |