[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAF8QhUiKmRoEk8pCaYDi9DAfsYU2W2BcO4fbh3c=CL3A=6e_VA@mail.gmail.com>
Date: Fri, 15 Feb 2019 11:03:47 +0800
From: fei phung <feiphung27@...il.com>
To: "Michael S. Tsirkin" <mst@...hat.com>
Cc: feiphung@...mail.com, netdev@...r.kernel.org
Subject: Re: Question on ptr_ring linux header
Hi Michael,
> If I had to guess I'd say the way you play with indices is probably racy
> so you are producing an invalid index.
You are probably right.
I am suspecting item_recv_push_index and item_send_push_index in
https://gist.github.com/promach/7716ee8addcaa33fda140d74d1ad94d6#file-riffa_driver_ptr_ring-c-L254-L404
causing data race (illegal index) for consume() function
note that this interrupt handler could be interrupted again by another
hardware interrupt
I do not think we can use lock within interrupt handler, right ?
Code:
~~~
struct item item_recv_push[CIRC_BUFF_SIZE];
struct item item_send_push[CIRC_BUFF_SIZE];
unsigned int item_send_push_index;
unsigned int item_recv_push_index;
///////////////////////////////////////////////////////
// INTERRUPT HANDLER
///////////////////////////////////////////////////////
/**
* Reads the interrupt vector and processes it. If processing VECT0, off will
* be 0. If processing VECT1, off will be 6.
*/
static inline void process_intr_vector(struct fpga_state * sc, int off,
unsigned int vect)
{
// VECT_0/VECT_1 are organized from right to left (LSB to MSB) as:
// [ 0] TX_TXN for channel 0 in VECT_0, channel 6 in VECT_1
// [ 1] TX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1
// [ 2] TX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1
// [ 3] RX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1
// [ 4] RX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1
// ...
// [25] TX_TXN for channel 5 in VECT_0, channel 11 in VECT_1
// [26] TX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1
// [27] TX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1
// [28] RX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1
// [29] RX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1
// Positions 30 - 31 in both VECT_0 and VECT_1 are zero.
unsigned int offlast;
unsigned int len;
int recv;
int send;
int chnl;
int i;
offlast = 0;
len = 0;
//printk(KERN_INFO "riffa: intrpt_handler received:%08x\n", vect);
if (vect & 0xC0000000) {
printk(KERN_ERR "riffa: fpga:%d, received bad interrupt
vector:%08x\n", sc->id, vect);
return;
}
for (i = 0; i < 6 && (i+off) < sc->num_chnls; ++i) {
chnl = i + off;
recv = 0;
send = 0;
// TX (PC receive) scatter gather buffer is read.
if (vect & (1<<((5*i)+1))) {
recv = 1;
item_recv_push[item_recv_push_index].val1 = EVENT_SG_BUF_READ;
item_recv_push[item_recv_push_index].val2 = 0;
// Keep track so the thread can handle this.
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push[item_recv_push_index])) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv sg buf read msg queue
full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv sg buf read\n", sc->id, chnl);
item_recv_push_index++;
}
// TX (PC receive) transaction done.
if (vect & (1<<((5*i)+2))) {
recv = 1;
item_recv_push[item_recv_push_index].val1 = EVENT_TXN_DONE;
item_recv_push[item_recv_push_index].val2 = len;
// Read the transferred amount.
len = read_reg(sc, CHNL_REG(chnl, TX_TNFR_LEN_REG_OFF));
// Notify the thread.
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push[item_recv_push_index])) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn done msg queue
full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv txn done\n", sc->id, chnl);
item_recv_push_index++;
}
// New TX (PC receive) transaction.
if (vect & (1<<((5*i)+0))) {
recv = 1;
recv_sg_buf_populated = 0; // resets for new transaction
// Read the offset/last and length
offlast = read_reg(sc, CHNL_REG(chnl, TX_OFFLAST_REG_OFF));
tx_len = read_reg(sc, CHNL_REG(chnl, TX_LEN_REG_OFF));
item_recv_push[item_recv_push_index].val1 = EVENT_TXN_OFFLAST;
item_recv_push[item_recv_push_index].val2 = offlast;
// Keep track of this transaction
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push[item_recv_push_index])) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn offlast msg queue
full\n", sc->id, chnl);
}
/*if (push_circ_queue(sc->recv[chnl]->msgs, EVENT_TXN_LEN, len)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn len msg queue
full\n", sc->id, chnl);
}*/
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv txn (len:%d off:%d
last:%d)\n", sc->id, chnl, tx_len, (offlast>>1), (offlast & 0x1));
item_recv_push_index++;
}
// RX (PC send) scatter gather buffer is read.
if (vect & (1<<((5*i)+3))) {
send = 1;
item_send_push[item_send_push_index].val1 = EVENT_SG_BUF_READ;
item_send_push[item_send_push_index].val2 = 0;
// Keep track so the thread can handle this.
if (ptr_ring_produce_any(sc->send[chnl]->msgs,
&item_send_push[item_send_push_index])) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, send sg buf read msg queue
full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, send sg buf read\n", sc->id, chnl);
item_send_push_index++;
}
// RX (PC send) transaction done.
if (vect & (1<<((5*i)+4))) {
send = 1;
item_send_push[item_send_push_index].val1 = EVENT_TXN_DONE;
item_send_push[item_send_push_index].val2 = len;
// Read the transferred amount.
len = read_reg(sc, CHNL_REG(chnl, RX_TNFR_LEN_REG_OFF));
// Notify the thread.
if (ptr_ring_produce_any(sc->send[chnl]->msgs,
&item_send_push[item_send_push_index])) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, send txn done msg queue
full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, send txn done\n", sc->id, chnl);
item_send_push_index++;
}
// Wake up the thread?
if (recv)
wake_up(&sc->recv[chnl]->waitq);
if (send)
wake_up(&sc->send[chnl]->waitq);
}
}
~~~
Regards,
Cheng Fei
Powered by blists - more mailing lists