[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <201712191431.KGfH0D3q%fengguang.wu@intel.com>
Date: Tue, 19 Dec 2017 15:00:26 +0800
From: kbuild test robot <lkp@...el.com>
To: Ilya Lesokhin <ilyal@...lanox.com>
Cc: kbuild-all@...org, netdev@...r.kernel.org, davem@...emloft.net,
davejwatson@...com, tom@...bertland.com,
hannes@...essinduktion.org, borisp@...lanox.com,
aviadye@...lanox.com, liranl@...lanox.com,
Ilya Lesokhin <ilyal@...lanox.com>
Subject: Re: [PATCH v3 net-next 6/6] tls: Add generic NIC offload
infrastructure.
Hi Ilya,
I love your patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Ilya-Lesokhin/tls-Add-generic-NIC-offload-infrastructure/20171219-140819
config: xtensa-allmodconfig (attached as .config)
compiler: xtensa-linux-gcc (GCC) 7.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=xtensa
All warnings (new ones prefixed by >>):
net//tls/tls_device_fallback.c: In function 'tls_sw_fallback':
>> net//tls/tls_device_fallback.c:360:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]
}
^
vim +360 net//tls/tls_device_fallback.c
214
215 /* This function may be called after the user socket is already
216 * closed so make sure we don't use anything freed during
217 * tls_sk_proto_close here
218 */
219 struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
220 {
221 int tcp_header_size = tcp_hdrlen(skb);
222 int tcp_payload_offset = skb_transport_offset(skb) + tcp_header_size;
223 int payload_len = skb->len - tcp_payload_offset;
224 struct tls_context *tls_ctx = tls_get_ctx(sk);
225 struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
226 int remaining, buf_len, resync_sgs, rc, i = 0;
227 void *buf, *dummy_buf, *iv, *aad;
228 struct scatterlist sg_in[2 * (MAX_SKB_FRAGS + 1)];
229 struct scatterlist sg_out[3];
230 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
231 struct aead_request *aead_req;
232 struct sk_buff *nskb = NULL;
233 struct tls_record_info *record;
234 unsigned long flags;
235 s32 sync_size;
236 u64 rcd_sn;
237
238 if (!payload_len)
239 return skb;
240
241 sg_init_table(sg_in, ARRAY_SIZE(sg_in));
242 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
243
244 spin_lock_irqsave(&ctx->lock, flags);
245 record = tls_get_record(ctx, tcp_seq, &rcd_sn);
246 if (!record) {
247 spin_unlock_irqrestore(&ctx->lock, flags);
248 WARN(1, "Record not found for seq %u\n", tcp_seq);
249 goto free_orig;
250 }
251
252 sync_size = tcp_seq - tls_record_start_seq(record);
253 if (sync_size < 0) {
254 int is_start_marker = tls_record_is_start_marker(record);
255
256 spin_unlock_irqrestore(&ctx->lock, flags);
257 if (!is_start_marker)
258 /* This should only occur if the relevant record was
259 * already acked. In that case it should be ok
260 * to drop the packet and avoid retransmission.
261 *
262 * There is a corner case where the packet contains
263 * both an acked and a non-acked record.
264 * We currently don't handle that case and rely
265 * on TCP to retranmit a packet that doesn't contain
266 * already acked payload.
267 */
268 goto free_orig;
269
270 if (payload_len > -sync_size) {
271 WARN(1, "Fallback of partially offloaded packets is not supported\n");
272 goto free_orig;
273 } else {
274 return skb;
275 }
276 }
277
278 remaining = sync_size;
279 while (remaining > 0) {
280 skb_frag_t *frag = &record->frags[i];
281
282 __skb_frag_ref(frag);
283 sg_set_page(sg_in + i, skb_frag_page(frag),
284 skb_frag_size(frag), frag->page_offset);
285
286 remaining -= skb_frag_size(frag);
287
288 if (remaining < 0)
289 sg_in[i].length += remaining;
290
291 i++;
292 }
293 spin_unlock_irqrestore(&ctx->lock, flags);
294 resync_sgs = i;
295
296 aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
297 if (!aead_req)
298 goto put_sg;
299
300 buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
301 TLS_CIPHER_AES_GCM_128_IV_SIZE +
302 TLS_AAD_SPACE_SIZE +
303 sync_size +
304 tls_ctx->tag_size;
305 buf = kmalloc(buf_len, GFP_ATOMIC);
306 if (!buf)
307 goto free_req;
308
309 nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
310 if (!nskb)
311 goto free_req;
312
313 skb_reserve(nskb, skb_headroom(skb));
314
315 iv = buf;
316
317 memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
318 TLS_CIPHER_AES_GCM_128_SALT_SIZE);
319 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
320 TLS_CIPHER_AES_GCM_128_IV_SIZE;
321 dummy_buf = aad + TLS_AAD_SPACE_SIZE;
322
323 sg_set_buf(&sg_out[0], dummy_buf, sync_size);
324 sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset,
325 payload_len);
326 /* Add room for authentication tag produced by crypto */
327 dummy_buf += sync_size;
328 sg_set_buf(&sg_out[2], dummy_buf, tls_ctx->tag_size);
329 rc = skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset,
330 payload_len);
331 if (rc < 0)
332 goto free_nskb;
333
334 rc = tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
335 rcd_sn, sync_size + payload_len);
336 if (rc < 0)
337 goto free_nskb;
338
339 complete_skb(nskb, skb, tcp_payload_offset);
340
341 /* validate_xmit_skb_list assumes that if the skb wasn't segmented
342 * nskb->prev will point to the skb itself
343 */
344 nskb->prev = nskb;
345 free_buf:
346 kfree(buf);
347 free_req:
348 kfree(aead_req);
349 put_sg:
350 for (i = 0; i < resync_sgs; i++)
351 put_page(sg_page(&sg_in[i]));
352 free_orig:
353 kfree_skb(skb);
354 return nskb;
355
356 free_nskb:
357 kfree_skb(nskb);
358 nskb = NULL;
359 goto free_buf;
> 360 }
361
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
Download attachment ".config.gz" of type "application/gzip" (52638 bytes)
Powered by blists - more mailing lists