lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 30 Mar 2018 15:18:14 +0800
From:   Herbert Xu <herbert@...dor.apana.org.au>
To:     Eric Dumazet <edumazet@...gle.com>
Cc:     "David S . Miller" <davem@...emloft.net>,
        netdev <netdev@...r.kernel.org>, Florian Westphal <fw@...len.de>,
        Thomas Graf <tgraf@...g.ch>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Alexander Aring <alex.aring@...il.com>,
        Stefan Schmidt <stefan@....samsung.com>,
        Eric Dumazet <eric.dumazet@...il.com>,
        Nikolay Aleksandrov <nikolay@...hat.com>
Subject: Re: [PATCH net-next 4/6] inet: frags: use rhashtables for reassembly
 units

On Thu, Mar 29, 2018 at 10:22:39PM -0700, Eric Dumazet wrote:
>
>  void inet_frags_exit_net(struct netns_frags *nf)
>  {
> -	struct inet_frags *f =nf->f;
> -	unsigned int seq;
> -	int i;
> -
> -	nf->low_thresh = 0;
> +	struct rhashtable_iter hti;
> +	struct inet_frag_queue *fq;
>  
> -evict_again:
> -	local_bh_disable();
> -	seq = read_seqbegin(&f->rnd_seqlock);
> +	nf->low_thresh = 0; /* prevent creation of new frags */
>  
> -	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
> -		inet_evict_bucket(f, &f->hash[i]);
> +	rhashtable_walk_enter(&nf->rhashtable, &hti);
> +	do {
> +		rhashtable_walk_start(&hti);
>  
> -	local_bh_enable();
> -	cond_resched();
> +		while ((fq = rhashtable_walk_next(&hti)) && !IS_ERR(fq)) {
> +			if (refcount_inc_not_zero(&fq->refcnt)) {
> +				spin_lock_bh(&fq->lock);
> +				inet_frag_kill(fq);
> +				spin_unlock_bh(&fq->lock);
> +				inet_frag_put(fq);
> +			}
> +		}
>  
> -	if (read_seqretry(&f->rnd_seqlock, seq) ||
> -	    sum_frag_mem_limit(nf))
> -		goto evict_again;
> +		rhashtable_walk_stop(&hti);
> +	} while (cond_resched(), fq == ERR_PTR(-EAGAIN));
> +	rhashtable_walk_exit(&hti);
> +	rhashtable_destroy(&nf->rhashtable);
>  }
>  EXPORT_SYMBOL(inet_frags_exit_net);

Instead of using the walk interface, how about
rhashtable_free_and_destroy?

>  void inet_frag_kill(struct inet_frag_queue *fq)
>  {
>  	if (del_timer(&fq->timer))
>  		refcount_dec(&fq->refcnt);
>  
>  	if (!(fq->flags & INET_FRAG_COMPLETE)) {
> -		fq_unlink(fq);
> +		struct netns_frags *nf = fq->net;
> +
> +		fq->flags |= INET_FRAG_COMPLETE;
> +		rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
>  		refcount_dec(&fq->refcnt);
>  	}
>  }

This means that the hash won't inline properly.  Don't know big
of an issue it is to you.  But you could fix it by doing the same
hack as rhashtable by making inet_frag_kill an inline function and
take the rhash_params as an explicit argument.

> -static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
> -						struct inet_frags *f,
> -						void *arg)
> -{
> -	struct inet_frag_queue *q;
> +	timer_setup(&fq->timer, f->frag_expire, 0);
> +	spin_lock_init(&fq->lock);
> +	refcount_set(&fq->refcnt, 3);
> +	mod_timer(&fq->timer, jiffies + nf->timeout);
>  
> -	q = inet_frag_alloc(nf, f, arg);
> -	if (!q)
> +	err = rhashtable_insert_fast(&nf->rhashtable, &fq->node,
> +				     f->rhash_params);

Ditto.

> -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
> -				       struct inet_frags *f, void *key,
> -				       unsigned int hash)
> +/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
> +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
>  {
> -	struct inet_frag_bucket *hb;
> -	struct inet_frag_queue *q;
> -	int depth = 0;
> -
> -	if (frag_mem_limit(nf) > nf->low_thresh)
> -		inet_frag_schedule_worker(f);
> -
> -	hash &= (INETFRAGS_HASHSZ - 1);
> -	hb = &f->hash[hash];
> -
> -	spin_lock(&hb->chain_lock);
> -	hlist_for_each_entry(q, &hb->chain, list) {
> -		if (q->net == nf && f->match(q, key)) {
> -			refcount_inc(&q->refcnt);
> -			spin_unlock(&hb->chain_lock);
> -			return q;
> -		}
> -		depth++;
> -	}
> -	spin_unlock(&hb->chain_lock);
> +	struct inet_frag_queue *fq;
>  
> -	if (depth <= INETFRAGS_MAXDEPTH)
> -		return inet_frag_create(nf, f, key);
> +	rcu_read_lock();
>  
> -	if (inet_frag_may_rebuild(f)) {
> -		if (!f->rebuild)
> -			f->rebuild = true;
> -		inet_frag_schedule_worker(f);
> +	fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);

Ditto.

Thanks,
-- 
Email: Herbert Xu <herbert@...dor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ