lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5102145D.7030607@redhat.com>
Date:	Fri, 25 Jan 2013 13:13:01 +0800
From:	Jason Wang <jasowang@...hat.com>
To:	gaowanlong@...fujitsu.com
CC:	linux-kernel@...r.kernel.org,
	Rusty Russell <rusty@...tcorp.com.au>,
	"Michael S. Tsirkin" <mst@...hat.com>,
	Eric Dumazet <erdnetdev@...il.com>,
	virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org
Subject: Re: [PATCH V6 2/3] virtio-net: split out clean affinity function

On 01/25/2013 12:20 PM, Wanlong Gao wrote:
> On 01/25/2013 11:28 AM, Jason Wang wrote:
>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>
>>> Cc: Rusty Russell <rusty@...tcorp.com.au>
>>> Cc: "Michael S. Tsirkin" <mst@...hat.com>
>>> Cc: Jason Wang <jasowang@...hat.com>
>>> Cc: Eric Dumazet <erdnetdev@...il.com>
>>> Cc: virtualization@...ts.linux-foundation.org
>>> Cc: netdev@...r.kernel.org
>>> Signed-off-by: Wanlong Gao <gaowanlong@...fujitsu.com>
>>> ---
>>> V5->V6: NEW
>>>
>>>  drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>  1 file changed, 38 insertions(+), 29 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>> index 70cd957..1a35a8c 100644
>>> --- a/drivers/net/virtio_net.c
>>> +++ b/drivers/net/virtio_net.c
>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>  	return 0;
>>>  }
>>>  
>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>  {
>>>  	int i;
>>>  	int cpu;
>>>  
>>> -	/* In multiqueue mode, when the number of cpu is equal to the number of
>>> -	 * queue pairs, we let the queue pairs to be private to one cpu by
>>> -	 * setting the affinity hint to eliminate the contention.
>>> -	 */
>>> -	if ((vi->curr_queue_pairs == 1 ||
>>> -	     vi->max_queue_pairs != num_online_cpus()) && set) {
>>> -		if (vi->affinity_hint_set)
>>> -			set = false;
>>> -		else
>>> -			return;
>>> -	}
>>> -
>>> -	if (set) {
>>> -		i = 0;
>>> -		for_each_online_cpu(cpu) {
>>> -			virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>> -			virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>> -			*per_cpu_ptr(vi->vq_index, cpu) = i;
>>> -			i++;
>>> -		}
>>> -
>>> -		vi->affinity_hint_set = true;
>>> -	} else {
>>> -		for(i = 0; i < vi->max_queue_pairs; i++) {
>>> +	if (vi->affinity_hint_set) {
>>> +		for (i = 0; i < vi->max_queue_pairs; i++) {
>>>  			virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>  			virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>  		}
>>>  
>>>  		i = 0;
>>> -		for_each_online_cpu(cpu)
>>> +		for_each_online_cpu(cpu) {
>>> +			if (cpu == hcpu)
>>> +				continue;
>>>  			*per_cpu_ptr(vi->vq_index, cpu) =
>>>  				++i % vi->curr_queue_pairs;
>>> +		}
>>>  
>> Some questions here:
>>
>> - Did we need reset the affinity of the queue here like the this?
>>
>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
> I think no, we are going to unset the affinity of all the set queues,
> include hcpu.
>
>> - Looks like we need also reset the percpu index when
>> vi->affinity_hint_set is false.
> Yes, follow this and the comment on [1/3].
>
>> - Does this really need this reset? Consider we're going to reset the
>> percpu in CPU_DEAD?
> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
> on the dying CPU.

Didn't understand this. What does 'wrong queue' here mean? Looks like
you didn't change the preferable queue of the dying CPU and just change
all others.
>
> Thanks,
> Wanlong Gao
>
>> Thanks
>>>  		vi->affinity_hint_set = false;
>>>  	}
>>>  }
>>>  
>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>> +{
>>> +	int i;
>>> +	int cpu;
>>> +
>>> +	/* In multiqueue mode, when the number of cpu is equal to the number of
>>> +	 * queue pairs, we let the queue pairs to be private to one cpu by
>>> +	 * setting the affinity hint to eliminate the contention.
>>> +	 */
>>> +	if (vi->curr_queue_pairs == 1 ||
>>> +	    vi->max_queue_pairs != num_online_cpus()) {
>>> +		if (vi->affinity_hint_set)
>>> +			virtnet_clean_affinity(vi, -1);
>>> +		else
>>> +			return;
>>> +	}
>>> +
>>> +	i = 0;
>>> +	for_each_online_cpu(cpu) {
>>> +		virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>> +		virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>> +		*per_cpu_ptr(vi->vq_index, cpu) = i;
>>> +		i++;
>>> +	}
>>> +
>>> +	vi->affinity_hint_set = true;
>>> +}
>>> +
>>>  static void virtnet_get_ringparam(struct net_device *dev,
>>>  				struct ethtool_ringparam *ring)
>>>  {
>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>  		netif_set_real_num_rx_queues(dev, queue_pairs);
>>>  
>>>  		get_online_cpus();
>>> -		virtnet_set_affinity(vi, true);
>>> +		virtnet_set_affinity(vi);
>>>  		put_online_cpus();
>>>  	}
>>>  
>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>  {
>>>  	struct virtio_device *vdev = vi->vdev;
>>>  
>>> -	virtnet_set_affinity(vi, false);
>>> +	virtnet_clean_affinity(vi, -1);
>>>  
>>>  	vdev->config->del_vqs(vdev);
>>>  
>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>  		goto err_free;
>>>  
>>>  	get_online_cpus();
>>> -	virtnet_set_affinity(vi, true);
>>> +	virtnet_set_affinity(vi);
>>>  	put_online_cpus();
>>>  
>>>  	return 0;
>>

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ