lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7654c76a-bc47-e0f7-7b94-90e36b337ee0@ucloud.cn>
Date:   Wed, 7 Aug 2019 07:36:55 +0800
From:   wenxu <wenxu@...oud.cn>
To:     Pablo Neira Ayuso <pablo@...filter.org>
Cc:     jakub.kicinski@...ronome.com, jiri@...nulli.us,
        netfilter-devel@...r.kernel.org, netdev@...r.kernel.org
Subject: Re: [PATCH net-next v6 5/6] flow_offload: support get multi-subsystem
 block


在 2019/8/7 0:10, Pablo Neira Ayuso 写道:
> On Sun, Aug 04, 2019 at 09:24:00PM +0800, wenxu@...oud.cn wrote:
>> diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
>> index 8f1a7b8..6022dd0 100644
>> --- a/include/net/flow_offload.h
>> +++ b/include/net/flow_offload.h
> [...]
>> @@ -282,6 +282,8 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
>>  }
>>  EXPORT_SYMBOL(flow_block_cb_setup_simple);
>>  
>> +static LIST_HEAD(block_ing_cb_list);
>> +
>>  static struct rhashtable indr_setup_block_ht;
>>  
>>  struct flow_indr_block_cb {
>> @@ -295,7 +297,6 @@ struct flow_indr_block_dev {
>>  	struct rhash_head ht_node;
>>  	struct net_device *dev;
>>  	unsigned int refcnt;
>> -	flow_indr_block_ing_cmd_t  *block_ing_cmd_cb;
>>  	struct list_head cb_list;
>>  };
>>  
>> @@ -389,6 +390,22 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
>>  	kfree(indr_block_cb);
>>  }
>>  
>> +static void flow_block_ing_cmd(struct net_device *dev,
>> +			       flow_indr_block_bind_cb_t *cb,
>> +			       void *cb_priv,
>> +			       enum flow_block_command command)
>> +{
>> +	struct flow_indr_block_ing_entry *entry;
>> +
>> +	rcu_read_lock();
>> +
> unnecessary empty line.
>
>> +	list_for_each_entry_rcu(entry, &block_ing_cb_list, list) {
>> +		entry->cb(dev, cb, cb_priv, command);
>> +	}
>> +
>> +	rcu_read_unlock();
> OK, there's rcu_read_lock here...
>
>> +}
>> +
>>  int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
>>  				  flow_indr_block_bind_cb_t *cb,
>>  				  void *cb_ident)
>> @@ -406,10 +423,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
>>  	if (err)
>>  		goto err_dev_put;
>>  
>> -	if (indr_dev->block_ing_cmd_cb)
>> -		indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
>> -					   indr_block_cb->cb_priv,
>> -					   FLOW_BLOCK_BIND);
>> +	flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
>> +			   FLOW_BLOCK_BIND);
>>  
>>  	return 0;
>>  
>> @@ -448,10 +463,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev,
>>  	if (!indr_block_cb)
>>  		return;
>>  
>> -	if (indr_dev->block_ing_cmd_cb)
>> -		indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
>> -					   indr_block_cb->cb_priv,
>> -					   FLOW_BLOCK_UNBIND);
>> +	flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
>> +			   FLOW_BLOCK_UNBIND);
>>  
>>  	flow_indr_block_cb_del(indr_block_cb);
>>  	flow_indr_block_dev_put(indr_dev);
>> @@ -469,7 +482,6 @@ void flow_indr_block_cb_unregister(struct net_device *dev,
>>  EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
>>  
>>  void flow_indr_block_call(struct net_device *dev,
>> -			  flow_indr_block_ing_cmd_t cb,
>>  			  struct flow_block_offload *bo,
>>  			  enum flow_block_command command)
>>  {
>> @@ -480,15 +492,24 @@ void flow_indr_block_call(struct net_device *dev,
>>  	if (!indr_dev)
>>  		return;
>>  
>> -	indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND
>> -				     ? cb : NULL;
>> -
>>  	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
>>  		indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
>>  				  bo);
>>  }
>>  EXPORT_SYMBOL_GPL(flow_indr_block_call);
>>  
>> +void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
>> +{
> ... but registration does not protect the list with a mutex.
>
>> +	list_add_tail_rcu(&entry->list, &block_ing_cb_list);
>> +}
>> +EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);

flow_indr_add_block_ing_cb called from tc and nft in different order.
 subsys_initcall(tc_filter_init) and nf_tables_module_init 
It will be called at the same time? 

And any nft need flow_indr_del_block_ing_cb. It also does nedd the lock?

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ