lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1433774798.3152487.289784457.4E5FCE86@webmail.messagingengine.com>
Date:	Mon, 08 Jun 2015 16:46:38 +0200
From:	Hannes Frederic Sowa <hannes@...essinduktion.org>
To:	mleitner@...hat.com, Neil Horman <nhorman@...driver.com>,
	netdev@...r.kernel.org
Cc:	linux-sctp@...r.kernel.org, Daniel Borkmann <daniel@...earbox.net>,
	Vlad Yasevich <vyasevich@...il.com>,
	Michio Honda <micchie@....wide.ad.jp>
Subject: Re: [PATCH v3 1/2] sctp: rcu-ify addr_waitq

Hi Marcelo,

a few hints on rcuification, sorry I reviewed the code so late:

On Fri, Jun 5, 2015, at 19:08, mleitner@...hat.com wrote:
> From: Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
> 
> That's needed for the next patch, so we break the lock inversion between
> netns_sctp->addr_wq_lock and socket lock on
> sctp_addr_wq_timeout_handler(). With this, we can traverse addr_waitq
> without taking addr_wq_lock, taking it just for the write operations.
> 
> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
> ---
> 
> Notes:
>     v2->v3:
>       placed break statement on sctp_free_addr_wq_entry()
>       removed unnecessary spin_lock noticed by Neil
> 
>  include/net/netns/sctp.h |  2 +-
>  net/sctp/protocol.c      | 80
>  +++++++++++++++++++++++++++++-------------------
>  2 files changed, 49 insertions(+), 33 deletions(-)
> 
> diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
> index
> 3573a81815ad9e0efb6ceb721eb066d3726419f0..9e53412c4ed829e8e45777a6d95406d490dbaa75
> 100644
> --- a/include/net/netns/sctp.h
> +++ b/include/net/netns/sctp.h
> @@ -28,7 +28,7 @@ struct netns_sctp {
>  	 * It is a list of sctp_sockaddr_entry.
>  	 */
>  	struct list_head local_addr_list;
> -       struct list_head addr_waitq;
> +       struct list_head __rcu addr_waitq;
>  	struct timer_list addr_wq_timer;
>  	struct list_head auto_asconf_splist;
>  	spinlock_t addr_wq_lock;
> diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
> index
> 53b7acde9aa37bf3d4029c459421564d5270f4c0..9954fb8c9a9455d5ad7a627e2d7f9a1fef861fc2
> 100644
> --- a/net/sctp/protocol.c
> +++ b/net/sctp/protocol.c
> @@ -593,15 +593,47 @@ static void sctp_v4_ecn_capable(struct sock *sk)
>  	INET_ECN_xmit(sk);
>  }
>  
> +static void sctp_free_addr_wq(struct net *net)
> +{
> +       struct sctp_sockaddr_entry *addrw;
> +
> +       spin_lock_bh(&net->sctp.addr_wq_lock);

Instead of holding spin_lock_bh you need to hold rcu_read_lock_bh, so
kfree_rcu does not call free function at once (in theory ;) ).

> +       del_timer(&net->sctp.addr_wq_timer);
> +       list_for_each_entry_rcu(addrw, &net->sctp.addr_waitq, list) {
> +               list_del_rcu(&addrw->list);
> +               kfree_rcu(addrw, rcu);
> +       }
> +       spin_unlock_bh(&net->sctp.addr_wq_lock);
> +}
> +
> +/* As there is no refcnt on sctp_sockaddr_entry, we must check inside
> + * the lock if it wasn't removed from addr_waitq already, otherwise we
> + * could double-free it.
> + */
> +static void sctp_free_addr_wq_entry(struct net *net,
> +                                   struct sctp_sockaddr_entry *addrw)
> +{
> +       struct sctp_sockaddr_entry *temp;
> +
> +       spin_lock_bh(&net->sctp.addr_wq_lock);

I don't think this spin_lock operation is needed. The del_timer
functions do synchronize themselves.

> +       list_for_each_entry_rcu(temp, &net->sctp.addr_waitq, list) {
> +               if (temp == addrw) {
> +                       list_del_rcu(&addrw->list);
> +                       kfree_rcu(addrw, rcu);
> +                       break;
> +               }
> +       }
> +       spin_unlock_bh(&net->sctp.addr_wq_lock);
> +}
> +
>  static void sctp_addr_wq_timeout_handler(unsigned long arg)
>  {
>  	struct net *net = (struct net *)arg;
> -       struct sctp_sockaddr_entry *addrw, *temp;
> +       struct sctp_sockaddr_entry *addrw;
>  	struct sctp_sock *sp;
>  
> -       spin_lock_bh(&net->sctp.addr_wq_lock);
> -
> -       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq,
> list) {
> +       rcu_read_lock_bh();
> +       list_for_each_entry_rcu(addrw, &net->sctp.addr_waitq, list) {
>  		pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
>  			 "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa,
>  			 addrw->state, addrw);
> @@ -647,35 +679,20 @@ static void sctp_addr_wq_timeout_handler(unsigned
> long arg)
>  #if IS_ENABLED(CONFIG_IPV6)
>  free_next:
>  #endif
> -               list_del(&addrw->list);
> -               kfree(addrw);
> -       }
> -       spin_unlock_bh(&net->sctp.addr_wq_lock);
> -}
> -
> -static void sctp_free_addr_wq(struct net *net)
> -{
> -       struct sctp_sockaddr_entry *addrw;
> -       struct sctp_sockaddr_entry *temp;
> -
> -       spin_lock_bh(&net->sctp.addr_wq_lock);
> -       del_timer(&net->sctp.addr_wq_timer);
> -       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq,
> list) {
> -               list_del(&addrw->list);
> -               kfree(addrw);
> +               sctp_free_addr_wq_entry(net, addrw);
>  	}
> -       spin_unlock_bh(&net->sctp.addr_wq_lock);
> +       rcu_read_unlock_bh();
>  }
>  

This code looks strange to me: You rcu_read_lock_bh and walk addr_waitq
list just to pass in pointers to the sctp_free_addr_wq_entry free
function, which then walks the list again just to compare the pointer?

>  /* lookup the entry for the same address in the addr_waitq
> - * sctp_addr_wq MUST be locked
> + * rcu read MUST be locked
>   */
>  static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
>  					struct sctp_sockaddr_entry *addr)
>  {
>  	struct sctp_sockaddr_entry *addrw;
>  
> -       list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
> +       list_for_each_entry_rcu(addrw, &net->sctp.addr_waitq, list) {
>  		if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
>  			continue;
>  		if (addrw->a.sa.sa_family == AF_INET) {
> @@ -702,7 +719,7 @@ void sctp_addr_wq_mgmt(struct net *net, struct
> sctp_sockaddr_entry *addr, int cm
>  	 * new address after a couple of addition and deletion of that address
>  	 */
>  
> -       spin_lock_bh(&net->sctp.addr_wq_lock);
> +       rcu_read_lock_bh();
>  	/* Offsets existing events in addr_wq */
>  	addrw = sctp_addr_wq_lookup(net, addr);
>  	if (addrw) {
> @@ -710,22 +727,21 @@ void sctp_addr_wq_mgmt(struct net *net, struct
> sctp_sockaddr_entry *addr, int cm
>  			pr_debug("%s: offsets existing entry for %d, addr:%pISc "
>  				 "in wq:%p\n", __func__, addrw->state, &addrw->a.sa,
>  				 &net->sctp.addr_waitq);
> -
> -                       list_del(&addrw->list);
> -                       kfree(addrw);
> +                       sctp_free_addr_wq_entry(net, addrw);
>  		}
> -               spin_unlock_bh(&net->sctp.addr_wq_lock);
> +               rcu_read_unlock_bh();
>  		return;
>  	}
> +       rcu_read_unlock_bh();
>  
>  	/* OK, we have to add the new address to the wait queue */
>  	addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
> -       if (addrw == NULL) {
> -               spin_unlock_bh(&net->sctp.addr_wq_lock);
> +       if (!addrw)
>  		return;
> -       }
>  	addrw->state = cmd;
> -       list_add_tail(&addrw->list, &net->sctp.addr_waitq);
> +
> +       spin_lock_bh(&net->sctp.addr_wq_lock);
> +       list_add_tail_rcu(&addrw->list, &net->sctp.addr_waitq);

list_rcu functions can in general run concurrently without spin_locks
taken, is this one necessary?

Bye,
Hannes
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ