lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 09 Dec 2020 20:50:21 -0800
From:   Saeed Mahameed <saeed@...nel.org>
To:     Huazhong Tan <tanhuazhong@...wei.com>, davem@...emloft.net
Cc:     netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        kuba@...nel.org, huangdaode@...wei.com,
        Jian Shen <shenjian15@...wei.com>
Subject: Re: [PATCH net-next 2/7] net: hns3: add support for tc mqprio
 offload

On Thu, 2020-12-10 at 11:42 +0800, Huazhong Tan wrote:
> From: Jian Shen <shenjian15@...wei.com>
> 
> Currently, the HNS3 driver only supports offload for tc number
> and prio_tc. This patch adds support for other qopts, including
> queues count and offset for each tc.
> 
> When enable tc mqprio offload, it's not allowed to change
> queue numbers by ethtool. For hardware limitation, the queue
> number of each tc should be power of 2.
> 
> For the queues is not assigned to each tc by average, so it's
> should return vport->alloc_tqps for hclge_get_max_channels().
> 

The commit message needs some improvements, it is not really clear what
the last two sentences are about.

> Signed-off-by: Jian Shen <shenjian15@...wei.com>
> Signed-off-by: Huazhong Tan <tanhuazhong@...wei.com>
> ---
> 
... 

>  
> +	if (kinfo->tc_info.mqprio_active) {
> +		dev_err(&netdev->dev,

why not use netdev_err() and friends ?
anyway I see your driver is using dev_err(&netdev->dev, ...)
intensively, 
maybe submit a follow up patch to fix all your prints ? 

...]
>  
> +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
> +				   struct tc_mqprio_qopt_offload
> *mqprio_qopt)
> +{
> +	u16 queue_sum = 0;
> +	int ret;
> +	int i;
> +
> +	if (!mqprio_qopt->qopt.num_tc) {
> +		mqprio_qopt->qopt.num_tc = 1;
> +		return 0;
> +	}
> +
> +	ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
> +					mqprio_qopt->qopt.prio_tc_map);
> +	if (ret)
> +		return ret;
> +
> +	for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
> +		if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
> +			dev_err(&hdev->pdev->dev,
> +				"qopt queue count must be power of
> 2\n");
> +			return -EINVAL;
> +		}
> +
> +		if (mqprio_qopt->qopt.count[i] > hdev->rss_size_max) {
> +			dev_err(&hdev->pdev->dev,
> +				"qopt queue count should be no more
> than %u\n",
> +				hdev->rss_size_max);
> +			return -EINVAL;
> +		}
> +
> +		if (mqprio_qopt->qopt.offset[i] != queue_sum) {
> +			dev_err(&hdev->pdev->dev,
> +				"qopt queue offset must start from 0,
> and being continuous\n");
> +			return -EINVAL;
> +		}
> +
> +		if (mqprio_qopt->min_rate[i] || mqprio_qopt-
> >max_rate[i]) {
> +			dev_err(&hdev->pdev->dev,
> +				"qopt tx_rate is not supported\n");
> +			return -EOPNOTSUPP;
> +		}
> +
> +		queue_sum = mqprio_qopt->qopt.offset[i];
> +		queue_sum += mqprio_qopt->qopt.count[i];

it will make more sense if you moved this queue summing outside of the
loop

> +	}
> +	if (hdev->vport[0].alloc_tqps < queue_sum) {

can't you just allocate new tqps according to the new mqprio input like
other drivers do ? how the user allocates those tqps ? 

> +		dev_err(&hdev->pdev->dev,
> +			"qopt queue count sum should be less than
> %u\n",
> +			hdev->vport[0].alloc_tqps);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
> +				   struct tc_mqprio_qopt_offload
> *mqprio_qopt)
> +{
> +	int i;
> +
> +	memset(tc_info, 0, sizeof(*tc_info));
> +	tc_info->num_tc = mqprio_qopt->qopt.num_tc;
> +	memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
> +	       sizeof_field(struct hnae3_tc_info, prio_tc));
> +	memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
> +	       sizeof_field(struct hnae3_tc_info, tqp_count));
> +	memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
> +	       sizeof_field(struct hnae3_tc_info, tqp_offset));
> +

isn't it much easier to just store a copy of tc_mqprio_qopt in you
tc_info and then just:
tc_info->qopt = mqprio->qopt;

[...] 
> -	hclge_tm_schd_info_update(hdev, tc);
> -	hclge_tm_prio_tc_info_update(hdev, prio_tc);
> -
> -	ret = hclge_tm_init_hw(hdev, false);
> -	if (ret)
> -		goto err_out;
> +	kinfo = &vport->nic.kinfo;
> +	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));

if those are of the same kind, just normal assignment would be much
cleaner. 
> +	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
> +	kinfo->tc_info.mqprio_active = tc > 0;
>  
> -	ret = hclge_client_setup_tc(hdev);
> +	ret = hclge_config_tc(hdev, &kinfo->tc_info);
>  	if (ret)
>  		goto err_out;
>  
> @@ -436,6 +534,12 @@ static int hclge_setup_tc(struct hnae3_handle
> *h, u8 tc, u8 *prio_tc)
>  	return hclge_notify_init_up(hdev);
>  
>  err_out:
> +	/* roll-back */
> +	memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
same.


Powered by blists - more mailing lists