diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 62c42e9..98a71f1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -638,7 +638,8 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, /* Resolve race on insertion if this protocol allows this. */ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, enum ip_conntrack_info ctinfo, - struct nf_conntrack_tuple_hash *h) + struct nf_conntrack_tuple_hash *h, + struct nf_conn **old_ct) { /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); @@ -649,7 +650,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); - nf_conntrack_put(skb->nfct); + *old_ct = (struct nf_conn *)skb->nfct; /* Assign conntrack already in hashes to this skbuff. Don't * modify skb->nfctinfo to ensure consistent stateful filtering. */ @@ -667,7 +668,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) const struct nf_conntrack_zone *zone; unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; - struct nf_conn *ct; + struct nf_conn *ct, *old_ct = NULL; struct nf_conn_help *help; struct nf_conn_tstamp *tstamp; struct hlist_nulls_node *n; @@ -771,11 +772,14 @@ __nf_conntrack_confirm(struct sk_buff *skb) out: nf_ct_add_to_dying_list(ct); - ret = nf_ct_resolve_clash(net, skb, ctinfo, h); + ret = nf_ct_resolve_clash(net, skb, ctinfo, h, &old_ct); dying: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); + if (old_ct) + nf_ct_put(old_ct); + return ret; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);