[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1271940702-17064-8-git-send-email-amit.salecha@qlogic.com>
Date: Thu, 22 Apr 2010 05:51:41 -0700
From: Amit Kumar Salecha <amit.salecha@...gic.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org, ameen.rahman@...gic.com
Subject: [PATCH NEXT 7/8] qlcnic: protect resource access
We do netif_device_attach, even if resource allocation fails.
Driver callbacks can be called, if device is attached.
All these callbacks need to be protected by ADAPTER_UP_MAGIC check.
Signed-off-by: Amit Kumar Salecha <amit.salecha@...gic.com>
---
drivers/net/qlcnic/qlcnic_main.c | 11 +++++++++++
1 files changed, 11 insertions(+), 0 deletions(-)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index bfc5510..ee573fe 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -208,6 +208,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
@@ -222,6 +225,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -1573,6 +1579,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
--
1.6.0.2
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists