[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAF8QhUj9m0-Sm9i7Z7X2jtSs9fJ5Dmkg0LeTpwrJRcGD=wVCtQ@mail.gmail.com>
Date: Fri, 1 Mar 2019 11:20:30 +0800
From: fei phung <feiphung27@...il.com>
To: "Michael S. Tsirkin" <mst@...hat.com>, netdev@...r.kernel.org,
feiphung@...mail.com
Subject: Re: Question on ptr_ring linux header
Hi Michael ,
I have solved all the data race issue , it seems.
The data race culprit is the ring index which I have removed
in the latest datarace-free code.
The diff can be found at https://www.diffchecker.com/w0Pxp2mF
I plan to study the internal implementation of ptr_ring c coding
in the coming weeks and months.
See the ptr_ring code as follows:
struct item item_recv_push_EVENT_SG_BUF_READ = {EVENT_SG_BUF_READ, 0};
struct item item_recv_push_EVENT_TXN_DONE = {EVENT_TXN_DONE, 0};
struct item item_recv_push_EVENT_TXN_OFFLAST = {EVENT_TXN_OFFLAST, 0};
struct item item_send_push_EVENT_SG_BUF_READ = {EVENT_SG_BUF_READ, 0};
struct item item_send_push_EVENT_TXN_DONE = {EVENT_TXN_DONE, 0};
///////////////////////////////////////////////////////
// INTERRUPT HANDLER
///////////////////////////////////////////////////////
/**
* Reads the interrupt vector and processes it. If processing VECT0, off will
* be 0. If processing VECT1, off will be 6.
*/
static inline void process_intr_vector(struct fpga_state * sc, int off,
unsigned int vect)
{
// VECT_0/VECT_1 are organized from right to left (LSB to MSB) as:
// [ 0] TX_TXN for channel 0 in VECT_0, channel 6 in VECT_1
// [ 1] TX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1
// [ 2] TX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1
// [ 3] RX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1
// [ 4] RX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1
// ...
// [25] TX_TXN for channel 5 in VECT_0, channel 11 in VECT_1
// [26] TX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1
// [27] TX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1
// [28] RX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1
// [29] RX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1
// Positions 30 - 31 in both VECT_0 and VECT_1 are zero.
unsigned int offlast;
unsigned int len;
int recv;
int send;
int chnl;
int i;
offlast = 0;
len = 0;
//printk(KERN_INFO "riffa: intrpt_handler received:%08x\n", vect);
if (vect & 0xC0000000) {
printk(KERN_ERR "riffa: fpga:%d, received bad interrupt
vector:%08x\n", sc->id, vect);
return;
}
for (i = 0; i < 6 && (i+off) < sc->num_chnls; ++i) {
chnl = i + off;
recv = 0;
send = 0;
// TX (PC receive) scatter gather buffer is read.
if (vect & (1<<((5*i)+1))) {
recv = 1;
// Keep track so the thread can handle this.
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push_EVENT_SG_BUF_READ)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv sg buf
read msg queue full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv sg buf
read\n", sc->id, chnl);
}
// TX (PC receive) transaction done.
if (vect & (1<<((5*i)+2))) {
recv = 1;
// Read the transferred amount.
len = read_reg(sc, CHNL_REG(chnl, TX_TNFR_LEN_REG_OFF));
item_recv_push_EVENT_TXN_DONE.val2 = len;
// Notify the thread.
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push_EVENT_TXN_DONE)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn done
msg queue full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv txn
done\n", sc->id, chnl);
}
// New TX (PC receive) transaction.
if (vect & (1<<((5*i)+0))) {
recv = 1;
recv_sg_buf_populated = 0; // resets for new transaction
// Read the offset/last and length
offlast = read_reg(sc, CHNL_REG(chnl, TX_OFFLAST_REG_OFF));
tx_len = read_reg(sc, CHNL_REG(chnl, TX_LEN_REG_OFF));
item_recv_push_EVENT_TXN_OFFLAST.val2 = offlast;
// Keep track of this transaction
if (ptr_ring_produce_any(sc->recv[chnl]->msgs,
&item_recv_push_EVENT_TXN_OFFLAST)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn
offlast msg queue full\n", sc->id, chnl);
}
/*if (push_circ_queue(sc->recv[chnl]->msgs, EVENT_TXN_LEN, len)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, recv txn len
msg queue full\n", sc->id, chnl);
}*/
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, recv txn
(len:%d off:%d last:%d)\n", sc->id, chnl, tx_len, (offlast>>1),
(offlast & 0x1));
}
// RX (PC send) scatter gather buffer is read.
if (vect & (1<<((5*i)+3))) {
send = 1;
// Keep track so the thread can handle this.
if (ptr_ring_produce_any(sc->send[chnl]->msgs,
&item_send_push_EVENT_SG_BUF_READ)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, send sg buf
read msg queue full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, send sg buf
read\n", sc->id, chnl);
}
// RX (PC send) transaction done.
if (vect & (1<<((5*i)+4))) {
send = 1;
// Read the transferred amount.
len = read_reg(sc, CHNL_REG(chnl, RX_TNFR_LEN_REG_OFF));
item_send_push_EVENT_TXN_DONE.val2 = len;
// Notify the thread.
if (ptr_ring_produce_any(sc->send[chnl]->msgs,
&item_send_push_EVENT_TXN_DONE)) {
printk(KERN_ERR "riffa: fpga:%d chnl:%d, send txn done
msg queue full\n", sc->id, chnl);
}
DEBUG_MSG(KERN_INFO "riffa: fpga:%d chnl:%d, send txn
done\n", sc->id, chnl);
}
// Wake up the thread?
if (recv)
wake_up(&sc->recv[chnl]->waitq);
if (send)
wake_up(&sc->send[chnl]->waitq);
}
}
Regards,
Cheng Fei
Powered by blists - more mailing lists