[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9cc1e367-bf5a-929b-2abe-e368d2109291@gmail.com>
Date: Thu, 17 Dec 2020 17:57:47 -0700
From: David Ahern <dsahern@...il.com>
To: Boris Pismenny <borisp@...lanox.com>, kuba@...nel.org,
davem@...emloft.net, saeedm@...dia.com, hch@....de,
sagi@...mberg.me, axboe@...com, kbusch@...nel.org,
viro@...iv.linux.org.uk, edumazet@...gle.com
Cc: boris.pismenny@...il.com, linux-nvme@...ts.infradead.org,
netdev@...r.kernel.org, benishay@...dia.com, ogerlitz@...dia.com,
yorayz@...dia.com, Ben Ben-Ishay <benishay@...lanox.com>,
Or Gerlitz <ogerlitz@...lanox.com>,
Yoray Zack <yorayz@...lanox.com>
Subject: Re: [PATCH v1 net-next 13/15] net/mlx5e: NVMEoTCP, data-path for DDP
offload
On 12/7/20 2:06 PM, Boris Pismenny wrote:
> +struct sk_buff*
> +mlx5e_nvmeotcp_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
> + struct mlx5_cqe64 *cqe, u32 cqe_bcnt,
> + bool linear)
> +{
> + int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
> + struct mlx5e_priv *priv = netdev_priv(netdev);
> + skb_frag_t org_frags[MAX_SKB_FRAGS];
> + struct mlx5e_nvmeotcp_queue *queue;
> + struct nvmeotcp_queue_entry *nqe;
> + int org_nr_frags, frag_index;
> + struct mlx5e_cqe128 *cqe128;
> + u32 queue_id;
> +
> + queue_id = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
> + queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
> + if (unlikely(!queue)) {
> + dev_kfree_skb_any(skb);
> + return NULL;
> + }
> +
> + cqe128 = (struct mlx5e_cqe128 *)((char *)cqe - 64);
> + if (cqe_is_nvmeotcp_resync(cqe)) {
> + nvmeotcp_update_resync(queue, cqe128);
> + mlx5e_nvmeotcp_put_queue(queue);
> + return skb;
> + }
> +
> + /* cc ddp from cqe */
> + ccid = be16_to_cpu(cqe128->ccid);
> + ccoff = be32_to_cpu(cqe128->ccoff);
> + cclen = be16_to_cpu(cqe128->cclen);
> + hlen = be16_to_cpu(cqe128->hlen);
> +
> + /* carve a hole in the skb for DDP data */
> + if (linear) {
> + skb_trim(skb, hlen);
> + } else {
> + org_nr_frags = skb_shinfo(skb)->nr_frags;
> + mlx5_nvmeotcp_trim_nonlinear(skb, org_frags, &frag_index,
> + cclen);
> + }
mlx5e_skb_from_cqe_mpwrq_linear and mlx5e_skb_from_cqe_mpwrq_nolinear
create an skb and then this function comes behind it, strips any frags
originally added to the skb, ...
> +
> + nqe = &queue->ccid_table[ccid];
> +
> + /* packet starts new ccid? */
> + if (queue->ccid != ccid || queue->ccid_gen != nqe->ccid_gen) {
> + queue->ccid = ccid;
> + queue->ccoff = 0;
> + queue->ccoff_inner = 0;
> + queue->ccsglidx = 0;
> + queue->ccid_gen = nqe->ccid_gen;
> + }
> +
> + /* skip inside cc until the ccoff in the cqe */
> + while (queue->ccoff + queue->ccoff_inner < ccoff) {
> + remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
> + fragsz = min_t(off_t, remaining,
> + ccoff - (queue->ccoff + queue->ccoff_inner));
> +
> + if (fragsz == remaining)
> + mlx5e_nvmeotcp_advance_sgl_iter(queue);
> + else
> + queue->ccoff_inner += fragsz;
> + }
> +
> + /* adjust the skb according to the cqe cc */
> + while (to_copy < cclen) {
> + if (skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS) {
> + dev_kfree_skb_any(skb);
> + mlx5e_nvmeotcp_put_queue(queue);
> + return NULL;
> + }
> +
> + remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
> + fragsz = min_t(int, remaining, cclen - to_copy);
> +
> + mlx5e_nvmeotcp_add_skb_frag(netdev, skb, queue, nqe, fragsz);
> + to_copy += fragsz;
> + if (fragsz == remaining)
> + mlx5e_nvmeotcp_advance_sgl_iter(queue);
> + else
> + queue->ccoff_inner += fragsz;
> + }
... adds the frags for the sgls, ...
> +
> + if (cqe_bcnt > hlen + cclen) {
> + remaining = cqe_bcnt - hlen - cclen;
> + if (linear)
> + skb = mlx5_nvmeotcp_add_tail(queue, skb,
> + offset_in_page(skb->data) +
> + hlen + cclen,
> + remaining);
> + else
> + skb = mlx5_nvmeotcp_add_tail_nonlinear(queue, skb,
> + org_frags,
> + org_nr_frags,
> + frag_index);
... and then re-adds the original frags.
Why is this needed? Why can't the skb be created with all of the frags
in proper order?
It seems like this dance is not needed if you had generic header/payload
splits with the payload written to less retrictive SGLs.
Powered by blists - more mailing lists