[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202109031928.8Uax34Yf-lkp@intel.com>
Date: Fri, 3 Sep 2021 20:05:55 +0800
From: kernel test robot <lkp@...el.com>
To: Arseny Krasnov <arseny.krasnov@...persky.com>,
Stefan Hajnoczi <stefanha@...hat.com>,
Stefano Garzarella <sgarzare@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Norbert Slusarek <nslusarek@....net>,
Colin Ian King <colin.king@...onical.com>
Cc: kbuild-all@...ts.01.org, netdev@...r.kernel.org
Subject: Re: [PATCH net-next v4 3/6] vhost/vsock: support MSG_EOR bit
processing
Hi Arseny,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Arseny-Krasnov/virtio-vsock-introduce-MSG_EOR-flag-for-SEQPACKET/20210903-141720
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 29ce8f9701072fc221d9c38ad952de1a9578f95c
config: i386-randconfig-s001-20210903 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.4-rc1-dirty
# https://github.com/0day-ci/linux/commit/18c4eca4204f01fb1f94bf35e760436cb537d9b3
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Arseny-Krasnov/virtio-vsock-introduce-MSG_EOR-flag-for-SEQPACKET/20210903-141720
git checkout 18c4eca4204f01fb1f94bf35e760436cb537d9b3
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=i386 SHELL=/bin/bash drivers/vhost/
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
sparse warnings: (new ones prefixed by >>)
>> drivers/vhost/vsock.c:192:37: sparse: sparse: restricted __le32 degrades to integer
>> drivers/vhost/vsock.c:192:37: sparse: sparse: cast to restricted __le32
vim +192 drivers/vhost/vsock.c
89
90 static void
91 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
92 struct vhost_virtqueue *vq)
93 {
94 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
95 int pkts = 0, total_len = 0;
96 bool added = false;
97 bool restart_tx = false;
98
99 mutex_lock(&vq->mutex);
100
101 if (!vhost_vq_get_backend(vq))
102 goto out;
103
104 if (!vq_meta_prefetch(vq))
105 goto out;
106
107 /* Avoid further vmexits, we're already processing the virtqueue */
108 vhost_disable_notify(&vsock->dev, vq);
109
110 do {
111 struct virtio_vsock_pkt *pkt;
112 struct iov_iter iov_iter;
113 unsigned out, in;
114 size_t nbytes;
115 size_t iov_len, payload_len;
116 int head;
117 u32 flags_to_restore = 0;
118
119 spin_lock_bh(&vsock->send_pkt_list_lock);
120 if (list_empty(&vsock->send_pkt_list)) {
121 spin_unlock_bh(&vsock->send_pkt_list_lock);
122 vhost_enable_notify(&vsock->dev, vq);
123 break;
124 }
125
126 pkt = list_first_entry(&vsock->send_pkt_list,
127 struct virtio_vsock_pkt, list);
128 list_del_init(&pkt->list);
129 spin_unlock_bh(&vsock->send_pkt_list_lock);
130
131 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
132 &out, &in, NULL, NULL);
133 if (head < 0) {
134 spin_lock_bh(&vsock->send_pkt_list_lock);
135 list_add(&pkt->list, &vsock->send_pkt_list);
136 spin_unlock_bh(&vsock->send_pkt_list_lock);
137 break;
138 }
139
140 if (head == vq->num) {
141 spin_lock_bh(&vsock->send_pkt_list_lock);
142 list_add(&pkt->list, &vsock->send_pkt_list);
143 spin_unlock_bh(&vsock->send_pkt_list_lock);
144
145 /* We cannot finish yet if more buffers snuck in while
146 * re-enabling notify.
147 */
148 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
149 vhost_disable_notify(&vsock->dev, vq);
150 continue;
151 }
152 break;
153 }
154
155 if (out) {
156 virtio_transport_free_pkt(pkt);
157 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
158 break;
159 }
160
161 iov_len = iov_length(&vq->iov[out], in);
162 if (iov_len < sizeof(pkt->hdr)) {
163 virtio_transport_free_pkt(pkt);
164 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
165 break;
166 }
167
168 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
169 payload_len = pkt->len - pkt->off;
170
171 /* If the packet is greater than the space available in the
172 * buffer, we split it using multiple buffers.
173 */
174 if (payload_len > iov_len - sizeof(pkt->hdr)) {
175 payload_len = iov_len - sizeof(pkt->hdr);
176
177 /* As we are copying pieces of large packet's buffer to
178 * small rx buffers, headers of packets in rx queue are
179 * created dynamically and are initialized with header
180 * of current packet(except length). But in case of
181 * SOCK_SEQPACKET, we also must clear message delimeter
182 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
183 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
184 * there will be sequence of packets with these
185 * bits set. After initialized header will be copied to
186 * rx buffer, these required bits will be restored.
187 */
188 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
189 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
190 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
191
> 192 if (le32_to_cpu(pkt->hdr.flags & VIRTIO_VSOCK_SEQ_EOR)) {
193 pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
194 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
195 }
196 }
197 }
198
199 /* Set the correct length in the header */
200 pkt->hdr.len = cpu_to_le32(payload_len);
201
202 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
203 if (nbytes != sizeof(pkt->hdr)) {
204 virtio_transport_free_pkt(pkt);
205 vq_err(vq, "Faulted on copying pkt hdr\n");
206 break;
207 }
208
209 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
210 &iov_iter);
211 if (nbytes != payload_len) {
212 virtio_transport_free_pkt(pkt);
213 vq_err(vq, "Faulted on copying pkt buf\n");
214 break;
215 }
216
217 /* Deliver to monitoring devices all packets that we
218 * will transmit.
219 */
220 virtio_transport_deliver_tap_pkt(pkt);
221
222 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
223 added = true;
224
225 pkt->off += payload_len;
226 total_len += payload_len;
227
228 /* If we didn't send all the payload we can requeue the packet
229 * to send it with the next available buffer.
230 */
231 if (pkt->off < pkt->len) {
232 pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
233
234 /* We are queueing the same virtio_vsock_pkt to handle
235 * the remaining bytes, and we want to deliver it
236 * to monitoring devices in the next iteration.
237 */
238 pkt->tap_delivered = false;
239
240 spin_lock_bh(&vsock->send_pkt_list_lock);
241 list_add(&pkt->list, &vsock->send_pkt_list);
242 spin_unlock_bh(&vsock->send_pkt_list_lock);
243 } else {
244 if (pkt->reply) {
245 int val;
246
247 val = atomic_dec_return(&vsock->queued_replies);
248
249 /* Do we have resources to resume tx
250 * processing?
251 */
252 if (val + 1 == tx_vq->num)
253 restart_tx = true;
254 }
255
256 virtio_transport_free_pkt(pkt);
257 }
258 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
259 if (added)
260 vhost_signal(&vsock->dev, vq);
261
262 out:
263 mutex_unlock(&vq->mutex);
264
265 if (restart_tx)
266 vhost_poll_queue(&tx_vq->poll);
267 }
268
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (33807 bytes)
Powered by blists - more mailing lists