Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Conflicts:
	drivers/net/wireless/iwlwifi/mvm/mac80211.c
This commit is contained in:
John W. Linville 2013-02-12 12:51:08 -05:00
commit 5171f7a0b7
25 changed files with 578 additions and 309 deletions

View File

@ -3897,6 +3897,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed;
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
/*
* REPLY_WOWLAN_GET_STATUS = 0xe5
*/
struct iwlagn_wowlan_status {
__le64 replay_ctr;
__le32 rekey_status;
__le32 wakeup_reason;
u8 pattern_number;
u8 reserved1;
__le16 qos_seq_ctr[8];
__le16 non_qos_seq_ctr;
__le16 reserved2;
union iwlagn_all_tsc_rsc tsc_rsc;
__le16 reserved3;
} __packed;
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/

View File

@ -441,53 +441,155 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
return ret;
}
struct iwl_resume_data {
struct iwl_priv *priv;
struct iwlagn_wowlan_status *cmd;
bool valid;
};
static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_resume_data *resume_data = data;
struct iwl_priv *priv = resume_data->priv;
u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - 4 != sizeof(*resume_data->cmd)) {
IWL_ERR(priv, "rx wrong size data\n");
return true;
}
memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
resume_data->valid = true;
return true;
}
static int iwlagn_mac_resume(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_vif *vif;
unsigned long flags;
u32 base, status = 0xffffffff;
int ret = -EIO;
u32 base;
int ret;
enum iwl_d3_status d3_status;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct iwl_notification_wait status_wait;
static const u8 status_cmd[] = {
REPLY_WOWLAN_GET_STATUS,
};
struct iwlagn_wowlan_status status_data = {};
struct iwl_resume_data resume_data = {
.priv = priv,
.cmd = &status_data,
.valid = false,
};
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
#ifdef CONFIG_IWLWIFI_DEBUGFS
const struct fw_img *img;
#endif
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
if (iwl_trans_grab_nic_access(priv->trans, true, &flags)) {
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
iwl_trans_release_nic_access(priv->trans, &flags);
ret = 0;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
const struct fw_img *img;
img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
if (!priv->wowlan_sram) {
priv->wowlan_sram =
kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
GFP_KERNEL);
}
if (priv->wowlan_sram)
iwl_trans_read_mem(
priv->trans, 0x800000,
priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4);
}
#endif
}
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
ret = iwl_trans_d3_resume(priv->trans, &d3_status);
if (ret)
goto out_unlock;
if (d3_status != IWL_D3_STATUS_ALIVE) {
IWL_INFO(priv, "Device was reset during suspend\n");
goto out_unlock;
}
base = priv->device_pointers.error_event_table;
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
IWL_WARN(priv, "Invalid error table during resume!\n");
goto out_unlock;
}
iwl_trans_read_mem_bytes(priv->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
wakeup.rfkill_release = true;
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
goto out_unlock;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
img = &priv->fw->img[IWL_UCODE_WOWLAN];
if (!priv->wowlan_sram)
priv->wowlan_sram =
kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
GFP_KERNEL);
if (priv->wowlan_sram)
iwl_trans_read_mem(priv->trans, 0x800000,
priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4);
#endif
/*
* This is very strange. The GET_STATUS command is sent but the device
* doesn't reply properly, it seems it doesn't close the RBD so one is
* always left open ... As a result, we need to send another command
* and have to reset the driver afterwards. As we need to switch to
* runtime firmware again that'll happen.
*/
iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
&resume_data);
iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
/* an RBD is left open in the firmware now! */
ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
if (ret)
goto out_unlock;
if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
u32 reasons = le32_to_cpu(status_data.wakeup_reason);
struct cfg80211_wowlan_wakeup *wakeup_report;
IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
if (reasons) {
if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
wakeup.magic_pkt = true;
if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
wakeup.pattern_idx = status_data.pattern_number;
if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
wakeup.disconnect = true;
if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
wakeup.gtk_rekey_failure = true;
if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
wakeup.eap_identity_req = true;
if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
wakeup.four_way_handshake = true;
wakeup_report = &wakeup;
} else {
wakeup_report = NULL;
}
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
}
priv->wowlan = false;
iwlagn_prepare_restart(priv);
@ -496,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
out_unlock:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");

View File

@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
ieee80211_rx_ni(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)

View File

@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
}
if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
priv->beacon_ctx) {
if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
if (iwlagn_update_beacon(priv, vif))
IWL_ERR(priv, "Error sending IBSS beacon\n");
IWL_ERR(priv, "Error updating beacon\n");
}
mutex_unlock(&priv->mutex);

View File

@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
spin_lock(&priv->sta_lock);
spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
spin_unlock(&priv->sta_lock);
spin_unlock_bh(&priv->sta_lock);
return ret;
}

View File

@ -1117,7 +1117,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
spin_lock(&priv->sta_lock);
spin_lock_bh(&priv->sta_lock);
if (is_agg)
iwl_rx_reply_tx_agg(priv, tx_resp);
@ -1239,11 +1239,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
le16_to_cpu(tx_resp->seq_ctl));
iwl_check_abort_status(priv, tx_resp->frame_count, status);
spin_unlock(&priv->sta_lock);
spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
ieee80211_tx_status(priv->hw, skb);
ieee80211_tx_status_ni(priv->hw, skb);
}
if (is_offchannel_skb)
@ -1290,12 +1290,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
spin_lock(&priv->sta_lock);
spin_lock_bh(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock(&priv->sta_lock);
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@ -1309,7 +1309,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_QUEUES(priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
spin_unlock(&priv->sta_lock);
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@ -1378,11 +1378,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
}
}
spin_unlock(&priv->sta_lock);
spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(priv->hw, skb);
ieee80211_tx_status_ni(priv->hw, skb);
}
return 0;

View File

@ -113,13 +113,13 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
* Must be atomic and called with BH disabled.
* This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Must be atomic.
* the radio is killed. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@ -130,8 +130,7 @@ struct iwl_cfg;
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. Must be atomic and called
* with BH disabled.
* @wimax_active: invoked when WiMax becomes active. May sleep
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@ -178,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@ -196,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
{
might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
}
@ -223,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
might_sleep();
op_mode->ops->wimax_active(op_mode);
}

View File

@ -65,6 +65,7 @@
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
#include <linux/lockdep.h>
#include "iwl-debug.h"
#include "iwl-config.h"
@ -526,6 +527,10 @@ struct iwl_trans {
struct dentry *dbgfs_dir;
#ifdef CONFIG_LOCKDEP
struct lockdep_map sync_cmd_lockdep_map;
#endif
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@ -602,12 +607,22 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
struct iwl_host_cmd *cmd)
{
int ret;
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
return trans->ops->send_cmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
ret = trans->ops->send_cmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
return ret;
}
static inline struct iwl_device_cmd *
@ -791,4 +806,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
static inline void trans_lockdep_init(struct iwl_trans *trans)
{
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
#endif
}
#endif /* __iwl_trans_h__ */

View File

@ -97,14 +97,14 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_ifaddr *ifa;
int idx = 0;
read_lock(&idev->lock);
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
mvmvif->target_ipv6_addrs[idx] = ifa->addr;
idx++;
if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
break;
}
read_unlock(&idev->lock);
read_unlock_bh(&idev->lock);
mvmvif->num_target_ipv6_addrs = idx;
}
@ -490,7 +490,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
ret = iwl_mvm_sta_add_to_fw(mvm, ap_sta);
ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
if (ret)
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
@ -763,6 +763,146 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return ret;
}
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status *status;
u32 reasons;
int ret, len;
bool pkt8023 = false;
struct sk_buff *pkt = NULL;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
wakeup.rfkill_release = true;
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
return;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
return;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
return;
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
reasons = le32_to_cpu(status->wakeup_reasons);
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
goto report;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
wakeup.magic_pkt = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
wakeup.pattern_idx =
le16_to_cpu(status->pattern_number);
pkt8023 = true;
}
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
wakeup.gtk_rekey_failure = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
wakeup.rfkill_release = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
wakeup.eap_identity_req = true;
pkt8023 = true;
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
wakeup.four_way_handshake = true;
pkt8023 = true;
}
if (status->wake_packet_bufsize) {
u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
u32 pktlen = le32_to_cpu(status->wake_packet_length);
if (pkt8023) {
pkt = alloc_skb(pktsize, GFP_KERNEL);
if (!pkt)
goto report;
memcpy(skb_put(pkt, pktsize), status->wake_packet,
pktsize);
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
goto report;
wakeup.packet = pkt->data;
wakeup.packet_present_len = pkt->len;
wakeup.packet_len = pkt->len - (pktlen - pktsize);
wakeup.packet_80211 = false;
} else {
wakeup.packet = status->wake_packet;
wakeup.packet_present_len = pktsize;
wakeup.packet_len = pktlen;
wakeup.packet_80211 = true;
}
}
report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt);
out:
iwl_free_resp(&cmd);
}
int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@ -770,14 +910,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
.mvm = mvm,
};
struct ieee80211_vif *vif = NULL;
u32 base;
int ret;
enum iwl_d3_status d3_status;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
mutex_lock(&mvm->mutex);
@ -800,27 +934,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
goto out_unlock;
}
base = mvm->error_event_table;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN)
IWL_ERR(mvm, "this was due to RF-kill\n");
goto out_unlock;
}
/* TODO: get status and whatever else ... */
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_GET_STATUSES, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offloads (%d)\n", ret);
iwl_mvm_query_wakeup_reasons(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);

View File

@ -633,6 +633,9 @@ struct iwl_binding_cmd {
__le32 phy;
} __packed; /* BINDING_CMD_API_S_VER_1 */
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
/**
* struct iwl_time_quota_data - configuration of time quota per binding
* @id_and_color: ID and color of the relevant Binding

View File

@ -621,10 +621,6 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
(flags & CT_KILL_CARD_DISABLED) ?
"Reached" : "Not reached");
if (flags & CARD_DISABLED_MSK)
iwl_write32(mvm->trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
return 0;
}

View File

@ -584,7 +584,11 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_data_sta *ctxt_sta)
{
ctxt_sta->is_assoc = cpu_to_le32(vif->bss_conf.assoc ? 1 : 0);
/* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
ctxt_sta->is_assoc = cpu_to_le32(1);
else
ctxt_sta->is_assoc = cpu_to_le32(0);
ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_sta->bi_reciprocal =

View File

@ -474,7 +474,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (mvm->vif_count > 1) {
IWL_DEBUG_MAC80211(mvm,
"Disable power on existing interfaces\n");
ieee80211_iterate_active_interfaces(
ieee80211_iterate_active_interfaces_atomic(
mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_pm_disable_iterator, mvm);
@ -670,8 +670,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@ -683,6 +681,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
}
} else if (changes & BSS_CHANGED_DTIM_PERIOD) {
/*
* We received a beacon _after_ association so
* remove the session protection.
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
} else if (changes & BSS_CHANGED_PS) {
/*
* TODO: remove this temporary code.
@ -921,8 +926,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
ret = 0;
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
mvmvif->phy_ctxt->channel->band);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
ret = 0;

View File

@ -536,25 +536,28 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
if (rx_h->cmd_id == pkt->hdr.cmd) {
struct iwl_async_handler_entry *entry;
if (!rx_h->async)
return rx_h->fn(mvm, rxb, cmd);
struct iwl_async_handler_entry *entry;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
return 0;
if (rx_h->cmd_id != pkt->hdr.cmd)
continue;
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
entry->rxb._rx_page_order = rxb->_rx_page_order;
entry->fn = rx_h->fn;
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
}
if (!rx_h->async)
return rx_h->fn(mvm, rxb, cmd);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
return 0;
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
entry->rxb._rx_page_order = rxb->_rx_page_order;
entry->fn = rx_h->fn;
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
break;
}
return 0;

View File

@ -194,7 +194,7 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
cmd.id_and_color, iwlmvm_mod_params.power_scheme,
le16_to_cpu(cmd.flags));
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
sizeof(cmd), &cmd);
}

View File

@ -131,7 +131,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd;
int i, idx, ret;
int i, idx, ret, num_active_bindings, quota, quota_rem;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
@ -156,20 +156,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
* equally between all the bindings that require quota
*/
num_active_bindings = 0;
for (i = 0; i < MAX_BINDINGS; i++) {
cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
if (data.n_interfaces[i] > 0)
num_active_bindings++;
}
if (!num_active_bindings)
goto send_cmd;
quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
if (data.n_interfaces[i] <= 0)
continue;
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
cmd.quotas[idx].quota = cpu_to_le32(100);
cmd.quotas[idx].max_duration = cpu_to_le32(1000);
cmd.quotas[idx].quota = cpu_to_le32(quota);
cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
idx++;
}
for (i = idx; i < MAX_BINDINGS; i++)
cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
send_cmd:
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)

View File

@ -121,7 +121,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(mvm->hw, skb);
ieee80211_rx_ni(mvm->hw, skb);
}
/*

View File

@ -81,8 +81,9 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
return IWL_MVM_STATION_COUNT;
}
/* add a NEW station to fw */
int iwl_mvm_sta_add_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta)
/* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
struct iwl_mvm_add_sta_cmd add_sta_cmd;
@ -94,8 +95,11 @@ int iwl_mvm_sta_add_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta)
add_sta_cmd.sta_id = mvm_sta->sta_id;
add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
if (!update) {
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
}
add_sta_cmd.add_modify = update ? 1 : 0;
/* STA_FLG_FAT_EN_MSK ? */
/* STA_FLG_MIMO_EN_MSK ? */
@ -181,7 +185,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
/* for HW restart - need to reset the seq_number etc... */
memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
ret = iwl_mvm_sta_add_to_fw(mvm, sta);
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
return ret;
@ -195,6 +199,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
return 0;
}
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
return iwl_mvm_sta_send_to_fw(mvm, sta, true);
}
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
@ -1116,7 +1127,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(mvm_sta->vif != vif))
return -EINVAL;
key_flags = cpu_to_le16(keyconf->keyidx & STA_KEY_FLG_KEYID_MSK);
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK);
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
@ -1154,14 +1166,26 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
if (sta_id == IWL_INVALID_STATION)
if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
return;
rcu_read_lock();
if (!sta) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return;
}
}
mvm_sta = (void *)sta->drv_priv;
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
iv32, phase1key, CMD_ASYNC);
rcu_read_unlock();
}
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, int sta_id)

View File

@ -309,10 +309,14 @@ struct iwl_mvm_int_sta {
u32 tfd_queue_msk;
};
int iwl_mvm_sta_add_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta);
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update);
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);

View File

@ -76,6 +76,15 @@
#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
/* For ROC use a TE type which has priority high enough to be scheduled when
* there is a concurrent BSS or GO/AP. Currently, use a TE type that has
* priority similar to the TE priority used for action scans by the FW.
* TODO: This needs to be changed, based on the reason for the ROC, i.e., use
* TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use
* TE_P2P_DEVICE_ACTION_SCAN
*/
#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN
void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data)
{
@ -175,9 +184,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
*/
if (te_data->vif->type == NL80211_IFTYPE_STATION &&
(!te_data->vif->bss_conf.assoc ||
!te_data->vif->bss_conf.dtim_period))
!te_data->vif->bss_conf.dtim_period)) {
IWL_ERR(mvm,
"No assocation and the time event is over already...\n");
ieee80211_connection_loss(te_data->vif);
}
iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) {
@ -219,57 +230,86 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
return 0;
}
static bool iwl_mvm_time_event_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_time_event_data *te_data = data;
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_time_event_notif *notif;
struct iwl_time_event_resp *resp;
int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
u32 mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
/* until we do something else */
WARN_ON(te_data->id != TE_BSS_STA_AGGRESSIVE_ASSOC);
switch (pkt->hdr.cmd) {
case TIME_EVENT_CMD:
resp = (void *)pkt->data;
/* TODO: I can't check that since the fw is buggy - it doesn't
* put the right values when we remove a TE. We can be here
* when we remove a TE because the remove TE command is sent in
* ASYNC...
* WARN_ON(mac_id_n_color != le32_to_cpu(resp->id_and_color));
*/
te_data->uid = le32_to_cpu(resp->unique_id);
IWL_DEBUG_TE(mvm, "Got response - UID = 0x%x\n", te_data->uid);
return false;
case TIME_EVENT_NOTIFICATION:
notif = (void *)pkt->data;
WARN_ON(le32_to_cpu(notif->status) != 1);
WARN_ON(mac_id_n_color != le32_to_cpu(notif->id_and_color));
/* check if this is our Time Event that is starting */
if (le32_to_cpu(notif->unique_id) != te_data->uid)
return false;
IWL_DEBUG_TE(mvm, "Event %d is starting - time is %d\n",
te_data->uid, le32_to_cpu(notif->timestamp));
WARN_ONCE(!le32_to_cpu(notif->status),
"Failed to schedule protected session TE\n");
te_data->running = true;
te_data->end_jiffies = jiffies +
TU_TO_JIFFIES(te_data->duration);
if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
return true;
default:
WARN_ON(1);
return false;
};
if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
return true;
}
resp = (void *)pkt->data;
te_data->uid = le32_to_cpu(resp->unique_id);
IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
te_data->uid);
return true;
}
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_cmd *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
int ret;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
if (WARN_ON(te_data->id != TE_MAX)) {
spin_unlock_bh(&mvm->time_event_lock);
return -EIO;
}
te_data->vif = vif;
te_data->duration = le32_to_cpu(te_cmd->duration);
te_data->id = le32_to_cpu(te_cmd->id);
list_add_tail(&te_data->list, &mvm->time_event_list);
spin_unlock_bh(&mvm->time_event_lock);
/*
* Use a notification wait, which really just processes the
* command response and doesn't wait for anything, in order
* to be able to process the response and get the UID inside
* the RX path. Using CMD_WANT_SKB doesn't work because it
* stores the buffer and then wakes up this thread, by which
* time another notification (that the time event started)
* might already be processed unsuccessfully.
*/
iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
time_event_response,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
sizeof(*te_cmd), te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
goto out_clear_te;
}
/* No need to wait for anything, so just pass 1 (0 isn't valid) */
ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
/* should never fail */
WARN_ON_ONCE(ret);
if (ret) {
out_clear_te:
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
}
return ret;
}
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
@ -278,11 +318,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
static const u8 time_event_notif[] = { TIME_EVENT_CMD,
TIME_EVENT_NOTIFICATION };
struct iwl_notification_wait wait_time_event;
struct iwl_time_event_cmd time_cmd = {};
int ret;
lockdep_assert_held(&mvm->mutex);
@ -309,12 +345,6 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
time_event_notif,
ARRAY_SIZE(time_event_notif),
iwl_mvm_time_event_notif,
&mvmvif->time_event_data);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
@ -322,6 +352,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.apply_time =
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
time_cmd.dep_policy = TE_INDEPENDENT;
time_cmd.is_present = cpu_to_le32(1);
time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
@ -333,33 +364,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.repeat = cpu_to_le32(1);
time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
te_data->vif = vif;
te_data->duration = duration;
spin_lock_bh(&mvm->time_event_lock);
te_data->id = le32_to_cpu(time_cmd.id);
list_add_tail(&te_data->list, &mvm->time_event_list);
spin_unlock_bh(&mvm->time_event_lock);
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
sizeof(time_cmd), &time_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
goto out_remove_notif;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1 * HZ);
if (ret) {
IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
}
return;
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
/*
@ -424,43 +429,12 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
}
static bool iwl_mvm_roc_te_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_time_event_data *te_data = data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
struct iwl_time_event_resp *resp;
u32 mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
/* until we do something else */
WARN_ON(te_data->id != TE_P2P_DEVICE_DISCOVERABLE);
switch (pkt->hdr.cmd) {
case TIME_EVENT_CMD:
resp = (void *)pkt->data;
WARN_ON(mac_id_n_color != le32_to_cpu(resp->id_and_color));
te_data->uid = le32_to_cpu(resp->unique_id);
IWL_DEBUG_TE(mvm, "Got response - UID = 0x%x\n", te_data->uid);
return true;
default:
WARN_ON(1);
return false;
};
}
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
static const u8 roc_te_notif[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
struct iwl_time_event_cmd time_cmd = {};
int ret;
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
@ -474,16 +448,10 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
roc_te_notif,
ARRAY_SIZE(roc_te_notif),
iwl_mvm_roc_te_notif,
&mvmvif->time_event_data);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
time_cmd.id = cpu_to_le32(TE_P2P_DEVICE_DISCOVERABLE);
time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE);
time_cmd.apply_time = cpu_to_le32(0);
time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
@ -492,7 +460,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
time_cmd.interval = cpu_to_le32(1);
/*
* TE_P2P_DEVICE_DISCOVERABLE can have lower priority than other events
* IWL_MVM_ROC_TE_TYPE can have lower priority than other events
* that are being scheduled by the driver/fw, and thus it might not be
* scheduled. To improve the chances of it being scheduled, allow it to
* be fragmented.
@ -505,33 +473,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
time_cmd.repeat = cpu_to_le32(1);
time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
/* Push the te data to the tracked te list */
te_data->vif = vif;
te_data->duration = MSEC_TO_TU(duration);
spin_lock_bh(&mvm->time_event_lock);
te_data->id = le32_to_cpu(time_cmd.id);
list_add_tail(&te_data->list, &mvm->time_event_list);
spin_unlock_bh(&mvm->time_event_lock);
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
sizeof(time_cmd), &time_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
goto out_remove_notif;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1 * HZ);
if (ret) {
IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
iwl_mvm_te_clear_data(mvm, te_data);
}
return ret;
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
return ret;
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)

View File

@ -620,7 +620,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
ieee80211_tx_status(mvm->hw, skb);
ieee80211_tx_status_ni(mvm->hw, skb);
}
if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
@ -663,12 +663,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
spin_lock(&mvmsta->lock);
spin_lock_bh(&mvmsta->lock);
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
next_reclaimed);
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock(&mvmsta->lock);
spin_unlock_bh(&mvmsta->lock);
}
#ifdef CONFIG_PM_SLEEP
@ -832,7 +832,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
spin_lock(&mvmsta->lock);
spin_lock_bh(&mvmsta->lock);
__skb_queue_head_init(&reclaimed_skbs);
@ -886,13 +886,13 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
}
}
spin_unlock(&mvmsta->lock);
spin_unlock_bh(&mvmsta->lock);
rcu_read_unlock();
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(mvm->hw, skb);
ieee80211_tx_status_ni(mvm->hw, skb);
}
return 0;

View File

@ -249,7 +249,6 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@ -330,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
void iwl_pcie_tasklet(struct iwl_trans *trans);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);

View File

@ -81,10 +81,10 @@
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
* + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
* list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
* INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
* were enough free buffers and RX_STALLED is set it is cleared.
* + The Host/Firmware iwl->rxq is replenished at irq thread time from the
* rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
* pending. We stop the APM before we sync the interrupts / tasklets
* because we have to (see comment there). On the other hand, since
* the APM is stopped, we cannot access the HW (in particular not prph).
* pending. We stop the APM before we sync the interrupts because we
* have to (see comment there). On the other hand, since the APM is
* stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
@ -796,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
local_bh_enable();
}
void iwl_pcie_tasklet(struct iwl_trans *trans)
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
@ -811,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@ -855,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
handled |= CSR_INT_BIT_HW_ERR;
return;
goto out;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@ -1005,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
}
/******************************************************************************
@ -1127,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we have something to service, the irq thread will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@ -1167,9 +1176,9 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
/* the thread will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
@ -1277,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
if (likely(inta)) {
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_WAKE_THREAD;
} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,

View File

@ -760,7 +760,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
synchronize_irq(trans_pcie->pci_dev->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
@ -1480,6 +1479,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
@ -1567,15 +1567,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
iwl_pcie_tasklet, (unsigned long)trans);
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
err = request_irq(pdev->irq, iwl_pcie_isr_ict,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans)) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}

View File

@ -926,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
spin_lock(&txq->lock);
spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
@ -970,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
spin_unlock(&txq->lock);
spin_unlock_bh(&txq->lock);
}
/*
@ -1371,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
spin_lock(&txq->lock);
spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
@ -1405,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
spin_unlock(&txq->lock);
spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)