[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <c04b96f2ed41c099d4f43fb1782e1ef84c41daf3.1624239422.git.lucien.xin@gmail.com>
Date: Sun, 20 Jun 2021 21:38:47 -0400
From: Xin Long <lucien.xin@...il.com>
To: network dev <netdev@...r.kernel.org>, davem@...emloft.net,
kuba@...nel.org,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>,
linux-sctp@...r.kernel.org
Subject: [PATCH net-next 12/14] sctp: extract sctp_v6_err_handle function from sctp_v6_err
This patch is to extract sctp_v6_err_handle() from sctp_v6_err() to
only handle the icmp err after the sock lookup, and it also makes
the code clearer.
sctp_v6_err_handle() will be used in sctp over udp's err handling
in the following patch.
Signed-off-by: Xin Long <lucien.xin@...il.com>
---
net/sctp/ipv6.c | 76 ++++++++++++++++++++++++++-----------------------
1 file changed, 40 insertions(+), 36 deletions(-)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 50ed4de18069..6ad422f2d0d0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -122,50 +122,28 @@ static struct notifier_block sctp_inet6addr_notifier = {
.notifier_call = sctp_inet6addr_event,
};
-/* ICMP error handler. */
-static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
{
- struct sock *sk;
- struct sctp_association *asoc;
- struct sctp_transport *transport;
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
struct ipv6_pinfo *np;
- __u16 saveip, savesctp;
- int err, ret = 0;
- struct net *net = dev_net(skb->dev);
-
- /* Fix up skb to look at the embedded net header. */
- saveip = skb->network_header;
- savesctp = skb->transport_header;
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, offset);
- sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
- /* Put back, the original pointers. */
- skb->network_header = saveip;
- skb->transport_header = savesctp;
- if (!sk) {
- __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
- return -ENOENT;
- }
-
- /* Warning: The sock lock is held. Remember to call
- * sctp_err_finish!
- */
+ int err = 0;
switch (type) {
case ICMPV6_PKT_TOOBIG:
if (ip6_sk_accept_pmtu(sk))
- sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
- goto out_unlock;
+ sctp_icmp_frag_needed(sk, asoc, t, info);
+ return;
case ICMPV6_PARAMPROB:
if (ICMPV6_UNK_NEXTHDR == code) {
- sctp_icmp_proto_unreachable(sk, asoc, transport);
- goto out_unlock;
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
}
break;
case NDISC_REDIRECT:
- sctp_icmp_redirect(sk, transport, skb);
- goto out_unlock;
+ sctp_icmp_redirect(sk, t, skb);
+ return;
default:
break;
}
@@ -175,13 +153,39 @@ static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
- } else { /* Only an error on timeout */
+ } else {
sk->sk_err_soft = err;
}
+}
+
+/* ICMP error handler. */
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_transport *transport;
+ struct sctp_association *asoc;
+ __u16 saveip, savesctp;
+ struct sock *sk;
+
+ /* Fix up skb to look at the embedded net header. */
+ saveip = skb->network_header;
+ savesctp = skb->transport_header;
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, offset);
+ sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+ /* Put back, the original pointers. */
+ skb->network_header = saveip;
+ skb->transport_header = savesctp;
+ if (!sk) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return -ENOENT;
+ }
-out_unlock:
+ sctp_v6_err_handle(transport, skb, type, code, ntohl(info));
sctp_err_finish(sk, transport);
- return ret;
+
+ return 0;
}
static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
--
2.27.0
Powered by blists - more mailing lists