[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1326473949-22389-7-git-send-email-wei.liu2@citrix.com>
Date: Fri, 13 Jan 2012 16:59:09 +0000
From: Wei Liu <wei.liu2@...rix.com>
To: ian.campbell@...rix.com, konrad.wilk@...cle.com,
xen-devel@...ts.xensource.com, netdev@...r.kernel.org
CC: Wei Liu <wei.liu2@...rix.com>
Subject: [RFC PATCH 6/6] netback: alter internal function/structure names.
Since we've melted xen_netbk into xenvif, so it is better to give
functions clearer names.
Also alter napi poll handler function prototypes a bit.
Signed-off-by: Wei Liu <wei.liu2@...rix.com>
---
drivers/net/xen-netback/common.h | 28 +++---
drivers/net/xen-netback/interface.c | 20 ++--
drivers/net/xen-netback/netback.c | 210 ++++++++++++++++++-----------------
3 files changed, 130 insertions(+), 128 deletions(-)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 6b99246..f7ec35c 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -52,7 +52,7 @@ struct pending_tx_info {
};
typedef unsigned int pending_ring_idx_t;
-struct netbk_rx_meta {
+struct xenvif_rx_meta {
int id;
int size;
int gso_size;
@@ -135,7 +135,7 @@ struct xenvif {
* straddles two buffers in the frontend.
*/
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+ struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
};
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
@@ -156,32 +156,32 @@ void xenvif_xenbus_exit(void);
int xenvif_schedulable(struct xenvif *vif);
-int xen_netbk_rx_ring_full(struct xenvif *vif);
+int xenvif_rx_ring_full(struct xenvif *vif);
-int xen_netbk_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref);
+void xenvif_unmap_frontend_rings(struct xenvif *vif);
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xen_netbk_check_rx_xenvif(struct xenvif *vif);
+void xenvif_check_rx_xenvif(struct xenvif *vif);
/* Receive an SKB from the frontend */
void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
/* Queue an SKB for transmission to the frontend */
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-void xen_netbk_tx_action(struct xenvif *vif, int *work_done, int budget);
-void xen_netbk_rx_action(struct xenvif *vif);
+int xenvif_tx_action(struct xenvif *vif, int budget);
+void xenvif_rx_action(struct xenvif *vif);
-int xen_netbk_kthread(void *data);
+int xenvif_kthread(void *data);
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 69184d1..a71039e 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -48,7 +48,7 @@ int xenvif_schedulable(struct xenvif *vif)
static int xenvif_rx_schedulable(struct xenvif *vif)
{
- return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
+ return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
}
static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
@@ -72,7 +72,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done = 0;
- xen_netbk_tx_action(vif, &work_done, budget);
+ work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
int more_to_do = 0;
@@ -100,12 +100,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
/* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
+ vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
- if (vif->can_queue && xen_netbk_must_stop_queue(vif))
+ if (vif->can_queue && xenvif_must_stop_queue(vif))
netif_stop_queue(dev);
- xen_netbk_queue_tx_skb(vif, skb);
+ xenvif_queue_tx_skb(vif, skb);
return NETDEV_TX_OK;
@@ -136,7 +136,7 @@ static void xenvif_up(struct xenvif *vif)
{
napi_enable(&vif->napi);
enable_irq(vif->irq);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
static void xenvif_down(struct xenvif *vif)
@@ -333,7 +333,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
__module_get(THIS_MODULE);
- err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -346,7 +346,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->irq);
init_waitqueue_head(&vif->wq);
- vif->task = kthread_create(xen_netbk_kthread,
+ vif->task = kthread_create(xenvif_kthread,
(void *)vif,
"vif%d.%d", vif->domid, vif->handle);
if (IS_ERR(vif->task)) {
@@ -370,7 +370,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
err_unbind:
unbind_from_irqhandler(vif->irq, vif);
err_unmap:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
err:
return err;
}
@@ -399,7 +399,7 @@ void xenvif_disconnect(struct xenvif *vif)
unregister_netdev(vif->dev);
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
free_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 133ebb3..6a9b412 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,7 +47,7 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
-static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx);
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
@@ -115,7 +115,7 @@ static int max_required_rx_slots(struct xenvif *vif)
return max;
}
-int xen_netbk_rx_ring_full(struct xenvif *vif)
+int xenvif_rx_ring_full(struct xenvif *vif)
{
RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif);
@@ -124,16 +124,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
}
-int xen_netbk_must_stop_queue(struct xenvif *vif)
+int xenvif_must_stop_queue(struct xenvif *vif)
{
- if (!xen_netbk_rx_ring_full(vif))
+ if (!xenvif_rx_ring_full(vif))
return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */
- return xen_netbk_rx_ring_full(vif);
+ return xenvif_rx_ring_full(vif);
}
/*
@@ -179,9 +179,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
- * netbk_gop_frag_copy.
+ * xenvif_gop_frag_copy.
*/
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
@@ -220,15 +220,15 @@ struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
struct gnttab_copy *copy;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
int copy_off;
grant_ref_t copy_gref;
};
-static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
- struct netrx_pending_operations *npo)
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+ struct netrx_pending_operations *npo)
{
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
@@ -248,13 +248,13 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
-static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
- struct netrx_pending_operations *npo,
- struct page *page, unsigned long size,
- unsigned long offset, int *head)
+static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+ unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
/*
* These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
@@ -335,14 +335,14 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
-static int netbk_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
+static int xenvif_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
@@ -379,30 +379,30 @@ static int netbk_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
- netbk_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
- netbk_gop_frag_copy(vif, skb, npo,
- skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].page_offset,
+ &head);
}
return npo->meta_prod - old_meta_prod;
}
/*
- * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
* used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them.
*/
-static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
- struct netrx_pending_operations *npo)
+static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
+ struct netrx_pending_operations *npo)
{
struct gnttab_copy *copy_op;
int status = XEN_NETIF_RSP_OKAY;
@@ -421,9 +421,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
return status;
}
-static void netbk_add_frag_responses(struct xenvif *vif, int status,
- struct netbk_rx_meta *meta,
- int nr_meta_slots)
+static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+ struct xenvif_rx_meta *meta,
+ int nr_meta_slots)
{
int i;
unsigned long offset;
@@ -451,12 +451,12 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xen_netbk_kick_thread(struct xenvif *vif)
+static void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
}
-void xen_netbk_rx_action(struct xenvif *vif)
+void xenvif_rx_action(struct xenvif *vif)
{
s8 status;
u16 flags;
@@ -485,7 +485,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+ sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
count += nr_frags + 1;
@@ -529,7 +529,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1)
flags = 0;
@@ -565,7 +565,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
gso->flags = 0;
}
- netbk_add_frag_responses(vif, status,
+ xenvif_add_frag_responses(vif, status,
vif->meta + npo.meta_cons + 1,
sco->meta_slots_used);
@@ -583,17 +583,17 @@ void xen_netbk_rx_action(struct xenvif *vif)
notify_remote_via_irq(vif->irq);
if (!skb_queue_empty(&vif->rx_queue))
- xen_netbk_kick_thread(vif);
+ xenvif_kick_thread(vif);
}
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{
skb_queue_tail(&vif->rx_queue, skb);
- xen_netbk_kick_thread(vif);
+ xenvif_kick_thread(vif);
}
-void xen_netbk_check_rx_xenvif(struct xenvif *vif)
+void xenvif_check_rx_xenvif(struct xenvif *vif)
{
int more_to_do;
@@ -630,11 +630,11 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
-static void netbk_tx_err(struct xenvif *vif,
- struct xen_netif_tx_request *txp, RING_IDX end)
+static void xenvif_tx_err(struct xenvif *vif,
+ struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
@@ -645,10 +645,10 @@ static void netbk_tx_err(struct xenvif *vif,
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
-static int netbk_count_requests(struct xenvif *vif,
+static int xenvif_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
int work_to_do)
@@ -689,9 +689,9 @@ static int netbk_count_requests(struct xenvif *vif,
return frags;
}
-static struct page *xen_netbk_alloc_page(struct xenvif *vif,
- struct sk_buff *skb,
- u16 pending_idx)
+static struct page *xenvif_alloc_page(struct xenvif *vif,
+ struct sk_buff *skb,
+ u16 pending_idx)
{
struct page *page;
int idx;
@@ -702,10 +702,10 @@ static struct page *xen_netbk_alloc_page(struct xenvif *vif,
return page;
}
-static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_copy *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
@@ -723,7 +723,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
index = pending_index(vif->pending_cons++);
pending_idx = vif->pending_ring[index];
- page = xen_netbk_alloc_page(vif, skb, pending_idx);
+ page = xenvif_alloc_page(vif, skb, pending_idx);
if (!page)
return NULL;
@@ -747,9 +747,9 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
return gop;
}
-static int xen_netbk_tx_check_gop(struct xenvif *vif,
- struct sk_buff *skb,
- struct gnttab_copy **gopp)
+static int xenvif_tx_check_gop(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
@@ -783,7 +783,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx);
continue;
}
@@ -799,10 +799,10 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -813,7 +813,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
return err;
}
-static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
@@ -834,15 +834,15 @@ static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xen_netbk_idx_release */
+ /* Take an extra reference to offset xenvif_idx_release */
get_page(to_page(vif->mmap_pages[pending_idx]));
- xen_netbk_idx_release(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx);
}
}
-static int xen_netbk_get_extras(struct xenvif *vif,
- struct xen_netif_extra_info *extras,
- int work_to_do)
+static int xenvif_get_extras(struct xenvif *vif,
+ struct xen_netif_extra_info *extras,
+ int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = vif->tx.req_cons;
@@ -870,9 +870,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
return work_to_do;
}
-static int netbk_set_skb_gso(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_extra_info *gso)
+static int xenvif_set_skb_gso(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
@@ -997,7 +997,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif)
{
struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb;
@@ -1036,18 +1036,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
- work_to_do = xen_netbk_get_extras(vif, extras,
+ work_to_do = xenvif_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0)) {
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
}
- ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0)) {
- netbk_tx_err(vif, &txreq, idx - ret);
+ xenvif_tx_err(vif, &txreq, idx - ret);
break;
}
idx += ret;
@@ -1055,7 +1055,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1065,7 +1065,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1081,7 +1081,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1092,18 +1092,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (netbk_set_skb_gso(vif, skb, gso)) {
+ if (xenvif_set_skb_gso(vif, skb, gso)) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
}
/* XXX could copy straight to head */
- page = xen_netbk_alloc_page(vif, skb, pending_idx);
+ page = xenvif_alloc_page(vif, skb, pending_idx);
if (!page) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1140,17 +1140,17 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
vif->pending_cons++;
- request_gop = xen_netbk_get_requests(vif,
+ request_gop = xenvif_get_requests(vif,
skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
gop = request_gop;
vif->tx.req_cons = idx;
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
break;
@@ -1159,13 +1159,13 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
return gop - vif->tx_copy_ops;
}
-static void xen_netbk_tx_submit(struct xenvif *vif,
- int *work_done, int budget)
+static int xenvif_tx_submit(struct xenvif *vif, int budget)
{
struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb;
+ int work_done = 0;
- while ((*work_done < budget) &&
+ while ((work_done < budget) &&
(skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
@@ -1175,7 +1175,7 @@ static void xen_netbk_tx_submit(struct xenvif *vif,
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) {
+ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
@@ -1192,7 +1192,7 @@ static void xen_netbk_tx_submit(struct xenvif *vif,
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(vif, pending_idx);
+ xenvif_idx_release(vif, pending_idx);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1200,7 +1200,7 @@ static void xen_netbk_tx_submit(struct xenvif *vif,
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- xen_netbk_fill_frags(vif, skb);
+ xenvif_fill_frags(vif, skb);
/*
* If the initial fragment was < PKT_PROT_LEN then
@@ -1225,33 +1225,35 @@ static void xen_netbk_tx_submit(struct xenvif *vif,
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;\
- (*work_done)++;
+ work_done++;
xenvif_receive_skb(vif, skb);
}
+
+ return work_done;
}
/* Called after netfront has transmitted */
-void xen_netbk_tx_action(struct xenvif *vif, int *work_done, int budget)
+int xenvif_tx_action(struct xenvif *vif, int budget)
{
unsigned nr_gops;
int ret;
if (unlikely(!tx_work_todo(vif)))
- return;
+ return 0;
- nr_gops = xen_netbk_tx_build_gops(vif);
+ nr_gops = xenvif_tx_build_gops(vif);
if (nr_gops == 0)
- return;
+ return 0;
ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
vif->tx_copy_ops, nr_gops);
BUG_ON(ret);
- xen_netbk_tx_submit(vif, work_done, budget);
+ return xenvif_tx_submit(vif, budget);
}
-static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx)
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
@@ -1330,7 +1332,7 @@ static inline int tx_work_todo(struct xenvif *vif)
return 0;
}
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
@@ -1340,9 +1342,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
vif->rx.sring);
}
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
@@ -1371,11 +1373,11 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
return 0;
err:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
return err;
}
-int xen_netbk_kthread(void *data)
+int xenvif_kthread(void *data)
{
struct xenvif *vif = data;
@@ -1389,7 +1391,7 @@ int xen_netbk_kthread(void *data)
break;
if (rx_work_todo(vif))
- xen_netbk_rx_action(vif);
+ xenvif_rx_action(vif);
}
return 0;
--
1.7.2.5
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists