lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Wed, 5 Aug 2020 14:05:55 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     David Miller <davem@...emloft.net>,
        Networking <netdev@...r.kernel.org>
Cc:     Kees Cook <keescook@...omium.org>,
        Linux Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Christoph Hellwig <hch@....de>
Subject: Re: linux-next: manual merge of the kspp tree with the net-next
 tree

Hi all,

On Mon, 27 Jul 2020 19:27:21 +1000 Stephen Rothwell <sfr@...b.auug.org.au> wrote:
>
> Today's linux-next merge of the kspp tree got a conflict in:
> 
>   net/ipv6/ip6_flowlabel.c
> 
> between commit:
> 
>   ff6a4cf214ef ("net/ipv6: split up ipv6_flowlabel_opt")
> 
> from the net-next tree and commit:
> 
>   3f649ab728cd ("treewide: Remove uninitialized_var() usage")
> 
> from the kspp tree.
> 
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging.  You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
> 
> diff --cc net/ipv6/ip6_flowlabel.c
> index 215b6f5e733e,73bb047e6037..000000000000
> --- a/net/ipv6/ip6_flowlabel.c
> +++ b/net/ipv6/ip6_flowlabel.c
> @@@ -534,184 -533,181 +534,184 @@@ int ipv6_flowlabel_opt_get(struct sock 
>   	return -ENOENT;
>   }
>   
>  -int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
>  +#define socklist_dereference(__sflp) \
>  +	rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock))
>  +
>  +static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq)
>   {
>  -	int err;
>  -	struct net *net = sock_net(sk);
>   	struct ipv6_pinfo *np = inet6_sk(sk);
>  -	struct in6_flowlabel_req freq;
>  -	struct ipv6_fl_socklist *sfl1 = NULL;
>  -	struct ipv6_fl_socklist *sfl;
>   	struct ipv6_fl_socklist __rcu **sflp;
>  -	struct ip6_flowlabel *fl, *fl1 = NULL;
>  +	struct ipv6_fl_socklist *sfl;
>   
>  +	if (freq->flr_flags & IPV6_FL_F_REFLECT) {
>  +		if (sk->sk_protocol != IPPROTO_TCP)
>  +			return -ENOPROTOOPT;
>  +		if (!np->repflow)
>  +			return -ESRCH;
>  +		np->flow_label = 0;
>  +		np->repflow = 0;
>  +		return 0;
>  +	}
>   
>  -	if (optlen < sizeof(freq))
>  -		return -EINVAL;
>  +	spin_lock_bh(&ip6_sk_fl_lock);
>  +	for (sflp = &np->ipv6_fl_list;
>  +	     (sfl = socklist_dereference(*sflp)) != NULL;
>  +	     sflp = &sfl->next) {
>  +		if (sfl->fl->label == freq->flr_label)
>  +			goto found;
>  +	}
>  +	spin_unlock_bh(&ip6_sk_fl_lock);
>  +	return -ESRCH;
>  +found:
>  +	if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK))
>  +		np->flow_label &= ~IPV6_FLOWLABEL_MASK;
>  +	*sflp = sfl->next;
>  +	spin_unlock_bh(&ip6_sk_fl_lock);
>  +	fl_release(sfl->fl);
>  +	kfree_rcu(sfl, rcu);
>  +	return 0;
>  +}
>   
>  -	if (copy_from_user(&freq, optval, sizeof(freq)))
>  -		return -EFAULT;
>  +static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq)
>  +{
>  +	struct ipv6_pinfo *np = inet6_sk(sk);
>  +	struct net *net = sock_net(sk);
>  +	struct ipv6_fl_socklist *sfl;
>  +	int err;
>   
>  -	switch (freq.flr_action) {
>  -	case IPV6_FL_A_PUT:
>  -		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
>  -			if (sk->sk_protocol != IPPROTO_TCP)
>  -				return -ENOPROTOOPT;
>  -			if (!np->repflow)
>  -				return -ESRCH;
>  -			np->flow_label = 0;
>  -			np->repflow = 0;
>  -			return 0;
>  -		}
>  -		spin_lock_bh(&ip6_sk_fl_lock);
>  -		for (sflp = &np->ipv6_fl_list;
>  -		     (sfl = rcu_dereference_protected(*sflp,
>  -						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
>  -		     sflp = &sfl->next) {
>  -			if (sfl->fl->label == freq.flr_label) {
>  -				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
>  -					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
>  -				*sflp = sfl->next;
>  -				spin_unlock_bh(&ip6_sk_fl_lock);
>  -				fl_release(sfl->fl);
>  -				kfree_rcu(sfl, rcu);
>  -				return 0;
>  -			}
>  +	rcu_read_lock_bh();
>  +	for_each_sk_fl_rcu(np, sfl) {
>  +		if (sfl->fl->label == freq->flr_label) {
>  +			err = fl6_renew(sfl->fl, freq->flr_linger,
>  +					freq->flr_expires);
>  +			rcu_read_unlock_bh();
>  +			return err;
>   		}
>  -		spin_unlock_bh(&ip6_sk_fl_lock);
>  -		return -ESRCH;
>  +	}
>  +	rcu_read_unlock_bh();
>   
>  -	case IPV6_FL_A_RENEW:
>  -		rcu_read_lock_bh();
>  -		for_each_sk_fl_rcu(np, sfl) {
>  -			if (sfl->fl->label == freq.flr_label) {
>  -				err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
>  -				rcu_read_unlock_bh();
>  -				return err;
>  -			}
>  -		}
>  -		rcu_read_unlock_bh();
>  +	if (freq->flr_share == IPV6_FL_S_NONE &&
>  +	    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
>  +		struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
>   
>  -		if (freq.flr_share == IPV6_FL_S_NONE &&
>  -		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
>  -			fl = fl_lookup(net, freq.flr_label);
>  -			if (fl) {
>  -				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
>  -				fl_release(fl);
>  -				return err;
>  -			}
>  +		if (fl) {
>  +			err = fl6_renew(fl, freq->flr_linger,
>  +					freq->flr_expires);
>  +			fl_release(fl);
>  +			return err;
>   		}
>  -		return -ESRCH;
>  -
>  -	case IPV6_FL_A_GET:
>  -		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
>  -			struct net *net = sock_net(sk);
>  -			if (net->ipv6.sysctl.flowlabel_consistency) {
>  -				net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
>  -				return -EPERM;
>  -			}
>  +	}
>  +	return -ESRCH;
>  +}
>   
>  -			if (sk->sk_protocol != IPPROTO_TCP)
>  -				return -ENOPROTOOPT;
>  +static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
>  +		sockptr_t optval, int optlen)
>  +{
>  +	struct ipv6_fl_socklist *sfl, *sfl1 = NULL;
>  +	struct ip6_flowlabel *fl, *fl1 = NULL;
>  +	struct ipv6_pinfo *np = inet6_sk(sk);
>  +	struct net *net = sock_net(sk);
> - 	int uninitialized_var(err);
> ++	int err;
>   
>  -			np->repflow = 1;
>  -			return 0;
>  +	if (freq->flr_flags & IPV6_FL_F_REFLECT) {
>  +		if (net->ipv6.sysctl.flowlabel_consistency) {
>  +			net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
>  +			return -EPERM;
>   		}
>   
>  -		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
>  -			return -EINVAL;
>  +		if (sk->sk_protocol != IPPROTO_TCP)
>  +			return -ENOPROTOOPT;
>  +		np->repflow = 1;
>  +		return 0;
>  +	}
>   
>  -		if (net->ipv6.sysctl.flowlabel_state_ranges &&
>  -		    (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
>  -			return -ERANGE;
>  +	if (freq->flr_label & ~IPV6_FLOWLABEL_MASK)
>  +		return -EINVAL;
>  +	if (net->ipv6.sysctl.flowlabel_state_ranges &&
>  +	    (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
>  +		return -ERANGE;
>   
>  -		fl = fl_create(net, sk, &freq, optval, optlen, &err);
>  -		if (!fl)
>  -			return err;
>  -		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
>  +	fl = fl_create(net, sk, freq, optval, optlen, &err);
>  +	if (!fl)
>  +		return err;
>   
>  -		if (freq.flr_label) {
>  -			err = -EEXIST;
>  -			rcu_read_lock_bh();
>  -			for_each_sk_fl_rcu(np, sfl) {
>  -				if (sfl->fl->label == freq.flr_label) {
>  -					if (freq.flr_flags&IPV6_FL_F_EXCL) {
>  -						rcu_read_unlock_bh();
>  -						goto done;
>  -					}
>  -					fl1 = sfl->fl;
>  -					if (!atomic_inc_not_zero(&fl1->users))
>  -						fl1 = NULL;
>  -					break;
>  +	sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
>  +
>  +	if (freq->flr_label) {
>  +		err = -EEXIST;
>  +		rcu_read_lock_bh();
>  +		for_each_sk_fl_rcu(np, sfl) {
>  +			if (sfl->fl->label == freq->flr_label) {
>  +				if (freq->flr_flags & IPV6_FL_F_EXCL) {
>  +					rcu_read_unlock_bh();
>  +					goto done;
>   				}
>  +				fl1 = sfl->fl;
>  +				if (!atomic_inc_not_zero(&fl1->users))
>  +					fl1 = NULL;
>  +				break;
>   			}
>  -			rcu_read_unlock_bh();
>  +		}
>  +		rcu_read_unlock_bh();
>   
>  -			if (!fl1)
>  -				fl1 = fl_lookup(net, freq.flr_label);
>  -			if (fl1) {
>  +		if (!fl1)
>  +			fl1 = fl_lookup(net, freq->flr_label);
>  +		if (fl1) {
>   recheck:
>  -				err = -EEXIST;
>  -				if (freq.flr_flags&IPV6_FL_F_EXCL)
>  -					goto release;
>  -				err = -EPERM;
>  -				if (fl1->share == IPV6_FL_S_EXCL ||
>  -				    fl1->share != fl->share ||
>  -				    ((fl1->share == IPV6_FL_S_PROCESS) &&
>  -				     (fl1->owner.pid != fl->owner.pid)) ||
>  -				    ((fl1->share == IPV6_FL_S_USER) &&
>  -				     !uid_eq(fl1->owner.uid, fl->owner.uid)))
>  -					goto release;
>  -
>  -				err = -ENOMEM;
>  -				if (!sfl1)
>  -					goto release;
>  -				if (fl->linger > fl1->linger)
>  -					fl1->linger = fl->linger;
>  -				if ((long)(fl->expires - fl1->expires) > 0)
>  -					fl1->expires = fl->expires;
>  -				fl_link(np, sfl1, fl1);
>  -				fl_free(fl);
>  -				return 0;
>  +			err = -EEXIST;
>  +			if (freq->flr_flags&IPV6_FL_F_EXCL)
>  +				goto release;
>  +			err = -EPERM;
>  +			if (fl1->share == IPV6_FL_S_EXCL ||
>  +			    fl1->share != fl->share ||
>  +			    ((fl1->share == IPV6_FL_S_PROCESS) &&
>  +			     (fl1->owner.pid != fl->owner.pid)) ||
>  +			    ((fl1->share == IPV6_FL_S_USER) &&
>  +			     !uid_eq(fl1->owner.uid, fl->owner.uid)))
>  +				goto release;
>  +
>  +			err = -ENOMEM;
>  +			if (!sfl1)
>  +				goto release;
>  +			if (fl->linger > fl1->linger)
>  +				fl1->linger = fl->linger;
>  +			if ((long)(fl->expires - fl1->expires) > 0)
>  +				fl1->expires = fl->expires;
>  +			fl_link(np, sfl1, fl1);
>  +			fl_free(fl);
>  +			return 0;
>   
>   release:
>  -				fl_release(fl1);
>  -				goto done;
>  -			}
>  -		}
>  -		err = -ENOENT;
>  -		if (!(freq.flr_flags&IPV6_FL_F_CREATE))
>  +			fl_release(fl1);
>   			goto done;
>  +		}
>  +	}
>  +	err = -ENOENT;
>  +	if (!(freq->flr_flags & IPV6_FL_F_CREATE))
>  +		goto done;
>   
>  -		err = -ENOMEM;
>  -		if (!sfl1)
>  -			goto done;
>  +	err = -ENOMEM;
>  +	if (!sfl1)
>  +		goto done;
>   
>  -		err = mem_check(sk);
>  -		if (err != 0)
>  -			goto done;
>  +	err = mem_check(sk);
>  +	if (err != 0)
>  +		goto done;
>   
>  -		fl1 = fl_intern(net, fl, freq.flr_label);
>  -		if (fl1)
>  -			goto recheck;
>  +	fl1 = fl_intern(net, fl, freq->flr_label);
>  +	if (fl1)
>  +		goto recheck;
>   
>  -		if (!freq.flr_label) {
>  -			if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
>  -					 &fl->label, sizeof(fl->label))) {
>  -				/* Intentionally ignore fault. */
>  -			}
>  +	if (!freq->flr_label) {
>  +		sockptr_advance(optval,
>  +				offsetof(struct in6_flowlabel_req, flr_label));
>  +		if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) {
>  +			/* Intentionally ignore fault. */
>   		}
>  -
>  -		fl_link(np, sfl1, fl);
>  -		return 0;
>  -
>  -	default:
>  -		return -EINVAL;
>   	}
>   
>  +	fl_link(np, sfl1, fl);
>  +	return 0;
>   done:
>   	fl_free(fl);
>   	kfree(sfl1);

This is now a conflict between the net-next tree and Linus' tree.

-- 
Cheers,
Stephen Rothwell

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ