From: Einar Lueck This patch exploits the QDIO support for asynchronous delivery of storage blocks for Hipersockets. The exploitation is not configured per default and may be enabled via the function qeth_configure_cq. Signed-off-by: Einar Lueck Signed-off-by: Frank Blaschka --- drivers/s390/net/qeth_core.h | 28 +- drivers/s390/net/qeth_core_main.c | 495 ++++++++++++++++++++++++++++++++++---- 2 files changed, 480 insertions(+), 43 deletions(-) --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -110,6 +110,10 @@ struct qeth_perf_stats { unsigned int sc_dp_p; unsigned int sc_p_dp; + /* qdio_cq_handler: number of times called, time spent in */ + __u64 cq_start_time; + unsigned int cq_cnt; + unsigned int cq_time; /* qdio_input_handler: number of times called, time spent in */ __u64 inbound_start_time; unsigned int inbound_cnt; @@ -376,6 +380,11 @@ enum qeth_qdio_buffer_states { * outbound: filled by driver; owned by hardware in order to be sent */ QETH_QDIO_BUF_PRIMED, + /* + * inbound: not applicable + * outbound: handled via transfer pending / completion queue + */ + QETH_QDIO_BUF_HANDLED_DELAYED, }; enum qeth_qdio_info_states { @@ -413,8 +422,11 @@ struct qeth_qdio_out_buffer { atomic_t state; int next_element_to_fill; struct sk_buff_head skb_list; - struct list_head ctx_list; int is_header[16]; + + struct qaob *aob; + struct qeth_qdio_out_q *q; + struct qeth_qdio_out_buffer *next_pending; }; struct qeth_card; @@ -427,7 +439,8 @@ enum qeth_out_q_states { struct qeth_qdio_out_q { struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; - struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_outbuf_state *bufstates; /* convenience pointer */ int queue_no; struct qeth_card *card; atomic_t state; @@ -448,7 +461,9 @@ struct qeth_qdio_out_q { struct qeth_qdio_info { atomic_t state; /* input */ + int no_in_queues; struct qeth_qdio_q *in_q; + struct qeth_qdio_q *c_q; struct qeth_qdio_buffer_pool in_buf_pool; struct qeth_qdio_buffer_pool init_pool; int in_buf_size; @@ -456,6 +471,7 @@ struct qeth_qdio_info { /* output */ int no_out_queues; struct qeth_qdio_out_q **out_qs; + struct qdio_outbuf_state *out_bufstates; /* priority queueing */ int do_prio_queueing; @@ -527,6 +543,12 @@ enum qeth_cmd_buffer_state { BUF_STATE_PROCESSED, }; +enum qeth_cq { + QETH_CQ_DISABLED = 0, + QETH_CQ_ENABLED = 1, + QETH_CQ_NOTAVAILABLE = 2, +}; + struct qeth_ipato { int enabled; int invert4; @@ -651,6 +673,7 @@ struct qeth_card_options { int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; int sniffer; + enum qeth_cq cq; }; /* @@ -888,6 +911,7 @@ void qeth_dbf_longtext(debug_info_t *id, int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); int qeth_set_access_ctrl_online(struct qeth_card *card); int qeth_hdr_chk_and_bounce(struct sk_buff *, int); +int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -44,6 +44,7 @@ struct qeth_card_list_struct qeth_core_c EXPORT_SYMBOL_GPL(qeth_core_card_list); struct kmem_cache *qeth_core_header_cache; EXPORT_SYMBOL_GPL(qeth_core_header_cache); +static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; @@ -56,6 +57,10 @@ static struct qeth_cmd_buffer *qeth_get_ static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); +static void qeth_free_qdio_buffers(struct qeth_card *); +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum qeth_qdio_buffer_states newbufstate); static inline const char *qeth_get_cardname(struct qeth_card *card) @@ -239,6 +244,150 @@ int qeth_realloc_buffer_pool(struct qeth } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +static inline int qeth_cq_init(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + QETH_DBF_TEXT(SETUP, 2, "cqinit"); + memset(card->qdio.c_q->qdio_bufs, 0, + QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + card->qdio.c_q->next_buf_to_init = 127; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, + card->qdio.no_in_queues - 1, 0, + 127); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + goto out; + } + } + rc = 0; +out: + return rc; +} + +static inline int qeth_alloc_cq(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + int i; + struct qdio_outbuf_state *outbuf_states; + + QETH_DBF_TEXT(SETUP, 2, "cqon"); + card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), + GFP_KERNEL); + if (!card->qdio.c_q) { + rc = -1; + goto kmsg_out; + } + QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *)); + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { + card->qdio.c_q->bufs[i].buffer = + &card->qdio.c_q->qdio_bufs[i]; + } + + card->qdio.no_in_queues = 2; + + card->qdio.out_bufstates = (struct qdio_outbuf_state *) + kzalloc(card->qdio.no_out_queues * + QDIO_MAX_BUFFERS_PER_Q * + sizeof(struct qdio_outbuf_state), GFP_KERNEL); + outbuf_states = card->qdio.out_bufstates; + if (outbuf_states == NULL) { + rc = -1; + goto free_cq_out; + } + for (i = 0; i < card->qdio.no_out_queues; ++i) { + card->qdio.out_qs[i]->bufstates = outbuf_states; + outbuf_states += QDIO_MAX_BUFFERS_PER_Q; + } + } else { + QETH_DBF_TEXT(SETUP, 2, "nocq"); + card->qdio.c_q = NULL; + card->qdio.no_in_queues = 1; + } + QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); + rc = 0; +out: + return rc; +free_cq_out: + kfree(card->qdio.c_q); + card->qdio.c_q = NULL; +kmsg_out: + dev_err(&card->gdev->dev, "Failed to create completion queue\n"); + goto out; +} + +static inline void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + --card->qdio.no_in_queues; + kfree(card->qdio.c_q); + card->qdio.c_q = NULL; + } + kfree(card->qdio.out_bufstates); + card->qdio.out_bufstates = NULL; +} + +static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, + int bidx, int forced_cleanup) +{ + if (q->bufs[bidx]->next_pending != NULL) { + struct qeth_qdio_out_buffer *head = q->bufs[bidx]; + struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; + + while (c) { + if (forced_cleanup || + atomic_read(&c->state) == + QETH_QDIO_BUF_HANDLED_DELAYED) { + struct qeth_qdio_out_buffer *f = c; + QETH_CARD_TEXT(f->q->card, 5, "fp"); + QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); + c = f->next_pending; + BUG_ON(head->next_pending != f); + head->next_pending = c; + kmem_cache_free(qeth_qdio_outbuf_cache, f); + } else { + head = c; + c = c->next_pending; + } + + } + } +} + + +static inline void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) { + struct qaob *aob; + struct qeth_qdio_out_buffer *buffer; + + aob = (struct qaob *) phys_to_virt(phys_aob_addr); + QETH_CARD_TEXT(card, 5, "haob"); + QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); + buffer = (struct qeth_qdio_out_buffer *) aob->user1; + QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); + + BUG_ON(buffer == NULL); + + buffer->aob = NULL; + qeth_clear_output_buffer(buffer->q, buffer, + QETH_QDIO_BUF_HANDLED_DELAYED); + /* from here on: do not touch buffer anymore */ + qdio_release_aob(aob); +} + +static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) +{ + return card->options.cq == QETH_CQ_ENABLED && + card->qdio.c_q != NULL && + queue != 0 && + queue == card->qdio.no_in_queues - 1; +} + + static int qeth_issue_next_read(struct qeth_card *card) { int rc; @@ -681,6 +830,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "startrec"); + WARN_ON(1); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } @@ -884,7 +1034,8 @@ out: } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) + struct qeth_qdio_out_buffer *buf, + enum qeth_qdio_buffer_states newbufstate) { int i; struct sk_buff *skb; @@ -912,21 +1063,36 @@ static void qeth_clear_output_buffer(str buf->buffer->element[15].eflags = 0; buf->buffer->element[15].sflags = 0; buf->next_element_to_fill = 0; - atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); + atomic_set(&buf->state, newbufstate); +} + +static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) +{ + int j; + + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (!q->bufs[j]) + continue; + qeth_cleanup_handled_pending(q, j, free); + qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); + if (free) { + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); + q->bufs[j] = NULL; + } + } } void qeth_clear_qdio_buffers(struct qeth_card *card) { - int i, j; + int i; QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ - for (i = 0; i < card->qdio.no_out_queues; ++i) + for (i = 0; i < card->qdio.no_out_queues; ++i) { if (card->qdio.out_qs[i]) { - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); } + } } EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); @@ -945,11 +1111,14 @@ static void qeth_free_buffer_pool(struct static void qeth_free_qdio_buffers(struct qeth_card *card) { - int i, j; + int i; if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; + + qeth_free_cq(card); + kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ @@ -957,9 +1126,7 @@ static void qeth_free_qdio_buffers(struc /* free outbound qdio_qs */ if (card->qdio.out_qs) { for (i = 0; i < card->qdio.no_out_queues; ++i) { - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); kfree(card->qdio.out_qs[i]); } kfree(card->qdio.out_qs); @@ -1051,6 +1218,7 @@ static void qeth_set_intial_options(stru card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; + card->options.cq = QETH_CQ_DISABLED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) @@ -1180,6 +1348,7 @@ static int qeth_determine_card_type(stru card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; card->qdio.no_out_queues = known_devices[i][QETH_QUEUE_NO_IND]; + card->qdio.no_in_queues = 1; card->info.is_multicast_different = known_devices[i][QETH_MULTICAST_IND]; qeth_get_channel_path_desc(card); @@ -2027,6 +2196,37 @@ static int qeth_ulp_setup(struct qeth_ca return rc; } +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +{ + int rc; + struct qeth_qdio_out_buffer *newbuf; + + rc = 0; + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + if (!newbuf) { + rc = -ENOMEM; + goto out; + } + newbuf->buffer = &q->qdio_bufs[bidx]; + skb_queue_head_init(&newbuf->skb_list); + lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); + newbuf->q = q; + newbuf->aob = NULL; + newbuf->next_pending = q->bufs[bidx]; + atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); + q->bufs[bidx] = newbuf; + if (q->bufstates) { + q->bufstates[bidx].user = newbuf; + QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); + QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); + QETH_CARD_TEXT_(q->card, 2, "%lx", + (long) newbuf->next_pending); + } +out: + return rc; +} + + static int qeth_alloc_qdio_buffers(struct qeth_card *card) { int i, j; @@ -2038,7 +2238,7 @@ static int qeth_alloc_qdio_buffers(struc return 0; card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), - GFP_KERNEL); + GFP_KERNEL); if (!card->qdio.in_q) goto out_nomem; QETH_DBF_TEXT(SETUP, 2, "inq"); @@ -2051,6 +2251,7 @@ static int qeth_alloc_qdio_buffers(struc /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; + /* outbound */ card->qdio.out_qs = kmalloc(card->qdio.no_out_queues * @@ -2068,21 +2269,30 @@ static int qeth_alloc_qdio_buffers(struc card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - card->qdio.out_qs[i]->bufs[j].buffer = - &card->qdio.out_qs[i]->qdio_bufs[j]; - skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. - skb_list); - lockdep_set_class( - &card->qdio.out_qs[i]->bufs[j].skb_list.lock, - &qdio_out_skb_queue_key); - INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); + BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL); + if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + goto out_freeoutqbufs; } } + + /* completion */ + if (qeth_alloc_cq(card)) + goto out_freeoutq; + return 0; +out_freeoutqbufs: + while (j > 0) { + --j; + kmem_cache_free(qeth_qdio_outbuf_cache, + card->qdio.out_qs[i]->bufs[j]); + card->qdio.out_qs[i]->bufs[j] = NULL; + } out_freeoutq: - while (i > 0) + while (i > 0) { kfree(card->qdio.out_qs[--i]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); + } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: @@ -2399,13 +2609,21 @@ int qeth_init_qdio_queues(struct qeth_ca QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } + + /* completion */ + rc = qeth_cq_init(card); + if (rc) { + return rc; + } + /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + card->qdio.out_qs[i]->bufs[j], + QETH_QDIO_BUF_EMPTY); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; @@ -2787,8 +3005,6 @@ void qeth_queue_input_buffer(struct qeth qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { - dev_warn(&card->gdev->dev, - "QDIO reported an error, rc=%i\n", rc); QETH_CARD_TEXT(card, 2, "qinberr"); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % @@ -2862,12 +3078,12 @@ static int qeth_switch_to_nonpacking_if_ queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; /* flush packing buffers */ - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { atomic_set(&buffer->state, - QETH_QDIO_BUF_PRIMED); + QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % @@ -2878,6 +3094,7 @@ static int qeth_switch_to_nonpacking_if_ return flush_count; } + /* * Called to flush a packing buffer if no more pci flags are on the queue. * Checks if there is a packing buffer and prepares it to be flushed. @@ -2887,7 +3104,7 @@ static int qeth_flush_buffers_on_no_pci( { struct qeth_qdio_out_buffer *buffer; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { /* it's a packing buffer */ @@ -2908,10 +3125,14 @@ static void qeth_flush_buffers(struct qe unsigned int qdio_flags; for (i = index; i < index + count; ++i) { - buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buf = queue->bufs[bidx]; buf->buffer->element[buf->next_element_to_fill - 1].eflags |= SBAL_EFLAGS_LAST_ENTRY; + if (queue->bufstates) + queue->bufstates[bidx].user = buf; + if (queue->card->info.type == QETH_CARD_TYPE_IQD) continue; @@ -2963,6 +3184,9 @@ static void qeth_flush_buffers(struct qe if (rc == QDIO_ERROR_SIGA_TARGET) return; QETH_CARD_TEXT(queue->card, 2, "flushbuf"); + QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); + QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); + QETH_CARD_TEXT_(queue->card, 2, " c%d", count); QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); /* this must not happen under normal circumstances. if it @@ -3024,14 +3248,120 @@ void qeth_qdio_start_poll(struct ccw_dev } EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); +int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) +{ + int rc; + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -1; + goto out; + } else { + if (card->options.cq == cq) { + rc = 0; + goto out; + } + + if (card->state != CARD_STATE_DOWN && + card->state != CARD_STATE_RECOVER) { + rc = -1; + goto out; + } + + qeth_free_qdio_buffers(card); + card->options.cq = cq; + rc = 0; + } +out: + return rc; + +} +EXPORT_SYMBOL_GPL(qeth_configure_cq); + + +static void qeth_qdio_cq_handler(struct qeth_card *card, + unsigned int qdio_err, + unsigned int queue, int first_element, int count) { + struct qeth_qdio_q *cq = card->qdio.c_q; + int i; + int rc; + + if (!qeth_is_cq(card, queue)) + goto out; + + QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); + QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); + QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); + + if (qdio_err) { + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + goto out; + } + + if (card->options.performance_stats) { + card->perf_stats.cq_cnt++; + card->perf_stats.cq_start_time = qeth_get_micros(); + } + + for (i = first_element; i < first_element + count; ++i) { + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; + int e; + + e = 0; + while (buffer->element[e].addr) { + unsigned long phys_aob_addr; + + phys_aob_addr = (unsigned long) buffer->element[e].addr; + qeth_qdio_handle_aob(card, phys_aob_addr); + buffer->element[e].addr = NULL; + buffer->element[e].eflags = 0; + buffer->element[e].sflags = 0; + buffer->element[e].length = 0; + + ++e; + } + + buffer->element[15].eflags = 0; + buffer->element[15].sflags = 0; + } + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, + card->qdio.c_q->next_buf_to_init, + count); + if (rc) { + dev_warn(&card->gdev->dev, + "QDIO reported an error, rc=%i\n", rc); + QETH_CARD_TEXT(card, 2, "qcqherr"); + } + card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + + count) % QDIO_MAX_BUFFERS_PER_Q; + + netif_wake_queue(card->dev); + + if (card->options.performance_stats) { + int delta_t = qeth_get_micros(); + delta_t -= card->perf_stats.cq_start_time; + card->perf_stats.cq_time += delta_t; + } +out: + return; +} + void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, - unsigned int queue, int first_element, int count, + unsigned int queue, int first_elem, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; - if (qdio_err) + QETH_CARD_TEXT_(card, 2, "qihq%d", queue); + QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); + + if (qeth_is_cq(card, queue)) + qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); + else if (qdio_err) qeth_schedule_recovery(card); + + } EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); @@ -3057,9 +3387,26 @@ void qeth_qdio_output_handler(struct ccw qeth_get_micros(); } for (i = first_element; i < (first_element + count); ++i) { - buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buffer = queue->bufs[bidx]; qeth_handle_send_error(card, buffer, qdio_error); - qeth_clear_output_buffer(queue, buffer); + + if (queue->bufstates && + (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { + buffer->aob = queue->bufstates[bidx].aob; + QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); + QETH_CARD_TEXT_(queue->card, 5, "aob"); + QETH_CARD_TEXT_(queue->card, 5, "%lx", + virt_to_phys(buffer->aob)); + BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q); + if (qeth_init_qdio_out_buf(queue, bidx)) + qeth_schedule_recovery(card); + } else { + qeth_clear_output_buffer(queue, buffer, + QETH_QDIO_BUF_EMPTY); + } + qeth_cleanup_handled_pending(queue, bidx, 0); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ @@ -3291,7 +3638,7 @@ int qeth_do_send_packet_fast(struct qeth QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed @@ -3325,7 +3672,7 @@ int qeth_do_send_packet(struct qeth_card while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed @@ -3347,7 +3694,7 @@ int qeth_do_send_packet(struct qeth_card queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* we did a step forward, so check buffer state * again */ if (atomic_read(&buffer->state) != @@ -3925,6 +4272,20 @@ static void qeth_determine_capabilities( if (rc) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); + QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); + QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); + QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); + if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || + ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || + ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { + dev_info(&card->gdev->dev, + "Completion Queueing supported\n"); + } else { + card->options.cq = QETH_CQ_NOTAVAILABLE; + } + + out_offline: if (ddev_offline == 1) ccw_device_set_offline(ddev); @@ -3932,6 +4293,24 @@ out: return; } +static inline void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { + int i; + + if (card->options.cq == QETH_CQ_ENABLED) { + int offset = QDIO_MAX_BUFFERS_PER_Q * + (card->qdio.no_in_queues - 1); + i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { + in_sbal_ptrs[offset + i] = (struct qdio_buffer *) + virt_to_phys(card->qdio.c_q->bufs[i].buffer); + } + + queue_start_poll[card->qdio.no_in_queues - 1] = NULL; + } +} + static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; @@ -3954,22 +4333,28 @@ static int qeth_qdio_establish(struct qe qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); - in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), + in_sbal_ptrs = kmalloc(card->qdio.no_in_queues * + QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { rc = -ENOMEM; goto out_free_qib_param; } - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[i] = (struct qdio_buffer *) virt_to_phys(card->qdio.in_q->bufs[i].buffer); + } - queue_start_poll = kmalloc(sizeof(void *) * 1, GFP_KERNEL); + queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues, + GFP_KERNEL); if (!queue_start_poll) { rc = -ENOMEM; goto out_free_in_sbals; } - queue_start_poll[0] = card->discipline.start_poll; + for (i = 0; i < card->qdio.no_in_queues; ++i) + queue_start_poll[i] = card->discipline.start_poll; + + qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); out_sbal_ptrs = kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * @@ -3981,7 +4366,7 @@ static int qeth_qdio_establish(struct qe for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( - card->qdio.out_qs[i]->bufs[j].buffer); + card->qdio.out_qs[i]->bufs[j]->buffer); } memset(&init_data, 0, sizeof(struct qdio_initialize)); @@ -3989,7 +4374,7 @@ static int qeth_qdio_establish(struct qe init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; - init_data.no_input_qs = 1; + init_data.no_input_qs = card->qdio.no_in_queues; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline.input_handler; init_data.output_handler = card->discipline.output_handler; @@ -3997,6 +4382,7 @@ static int qeth_qdio_establish(struct qe init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; + init_data.output_sbal_state_array = card->qdio.out_bufstates; init_data.scan_threshold = (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32; @@ -4013,6 +4399,17 @@ static int qeth_qdio_establish(struct qe qdio_free(CARD_DDEV(card)); } } + + switch (card->options.cq) { + case QETH_CQ_ENABLED: + dev_info(&card->gdev->dev, "Completion Queue support enabled"); + break; + case QETH_CQ_DISABLED: + dev_info(&card->gdev->dev, "Completion Queue support disabled"); + break; + default: + break; + } out: kfree(out_sbal_ptrs); out_free_queue_start_poll: @@ -4191,6 +4588,8 @@ static inline int qeth_create_skb_frag(s (*pskb)->truesize += data_len; (*pfrag)++; } + + return 0; } @@ -4664,6 +5063,8 @@ static struct { {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, + {"cq handler count"}, + {"cq handler time"} }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) @@ -4722,6 +5123,8 @@ void qeth_core_get_ethtool_stats(struct data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; + data[35] = card->perf_stats.cq_cnt; + data[36] = card->perf_stats.cq_time; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); @@ -4880,7 +5283,16 @@ static int __init qeth_core_init(void) goto slab_err; } + qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", + sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); + if (!qeth_qdio_outbuf_cache) { + rc = -ENOMEM; + goto cqslab_err; + } + return 0; +cqslab_err: + kmem_cache_destroy(qeth_core_header_cache); slab_err: root_device_unregister(qeth_core_root_dev); register_err: @@ -4905,6 +5317,7 @@ static void __exit qeth_core_exit(void) &driver_attr_group); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); + kmem_cache_destroy(qeth_qdio_outbuf_cache); kmem_cache_destroy(qeth_core_header_cache); qeth_unregister_dbf_views(); pr_info("core functions removed\n"); -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html