lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Wed, 15 Jan 2020 13:54:07 +0000 From: Jérôme Pouiller <Jerome.Pouiller@...abs.com> To: "devel@...verdev.osuosl.org" <devel@...verdev.osuosl.org>, "linux-wireless@...r.kernel.org" <linux-wireless@...r.kernel.org> CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>, "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>, Greg Kroah-Hartman <gregkh@...uxfoundation.org>, Kalle Valo <kvalo@...eaurora.org>, "David S . Miller" <davem@...emloft.net>, Jérôme Pouiller <Jerome.Pouiller@...abs.com> Subject: [PATCH v2 04/65] staging: wfx: send rate policies one by one From: Jérôme Pouiller <jerome.pouiller@...abs.com> Rate policies (aka. tx_rate_retry_policy in hardware API) are sent to device asynchronously from tx requests. So, the device maintains a list of active rate policies and the tx requests only reference an existent rate policy. The device API allows to send multiple rate policies at once. However, this property is very rarely used. We prefer to send rate policies one by one and simplify the architecture. Signed-off-by: Jérôme Pouiller <jerome.pouiller@...abs.com> --- drivers/staging/wfx/data_tx.c | 53 +++++++++++++++++------------------ 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index b2a325c47b2d..fb51c5910ace 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -217,37 +217,34 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) static int wfx_tx_policy_upload(struct wfx_vif *wvif) { - int i; - struct tx_policy_cache *cache = &wvif->tx_policy_cache; struct hif_mib_set_tx_rate_retry_policy *arg = - kzalloc(struct_size(arg, - tx_rate_retry_policy, - HIF_MIB_NUM_TX_RATE_RETRY_POLICIES), - GFP_KERNEL); - struct hif_mib_tx_rate_retry_policy *dst; + kzalloc(struct_size(arg, tx_rate_retry_policy, 1), GFP_KERNEL); + struct tx_policy *policies = wvif->tx_policy_cache.cache; + int i; - spin_lock_bh(&cache->lock); - /* Upload only modified entries. */ - for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) { - struct tx_policy *src = &cache->cache[i]; - - if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) { - dst = arg->tx_rate_retry_policy + - arg->num_tx_rate_policies; - - dst->policy_index = i; - dst->short_retry_count = 255; - dst->long_retry_count = 255; - dst->first_rate_sel = 1; - dst->terminate = 1; - dst->count_init = 1; - memcpy(&dst->rates, src->rates, sizeof(src->rates)); - src->uploaded = true; - arg->num_tx_rate_policies++; + do { + spin_lock_bh(&wvif->tx_policy_cache.lock); + for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) + if (!policies[i].uploaded && + memzcmp(policies[i].rates, sizeof(policies[i].rates))) + break; + if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) { + policies[i].uploaded = 1; + arg->num_tx_rate_policies = 1; + arg->tx_rate_retry_policy[0].policy_index = i; + arg->tx_rate_retry_policy[0].short_retry_count = 255; + arg->tx_rate_retry_policy[0].long_retry_count = 255; + arg->tx_rate_retry_policy[0].first_rate_sel = 1; + arg->tx_rate_retry_policy[0].terminate = 1; + arg->tx_rate_retry_policy[0].count_init = 1; + memcpy(&arg->tx_rate_retry_policy[0].rates, + policies[i].rates, sizeof(policies[i].rates)); + spin_unlock_bh(&wvif->tx_policy_cache.lock); + hif_set_tx_rate_retry_policy(wvif, arg); + } else { + spin_unlock_bh(&wvif->tx_policy_cache.lock); } - } - spin_unlock_bh(&cache->lock); - hif_set_tx_rate_retry_policy(wvif, arg); + } while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES); kfree(arg); return 0; } -- 2.25.0
Powered by blists - more mailing lists