lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <c1f55917-c7b9-140e-29ed-937f1c2b656e@nvidia.com>
Date:   Tue, 22 Mar 2022 11:07:32 -0700
From:   Dipen Patel <dipenp@...dia.com>
To:     Hillf Danton <hdanton@...a.com>
Cc:     linux-kernel@...r.kernel.org, robh+dt@...nel.org
Subject: Re: [PATCH v4 02/11] drivers: Add hardware timestamp engine (HTE)

Hi Hillf,

The reason I used kernel thread as it will not be shared by others and can deliver payload at the earliest of course when scheduler decides to run it. The workqueue system_unbound_wq is shared by multiple subsystem and depends on the prior works scheduled in that queue. Is it ok if I retain thread approach?

On 2/1/22 7:52 PM, Hillf Danton wrote:
> On Tue, 1 Feb 2022 14:26:21 -0800 Dipen Patel wrote:
>> +/**
>> + * hte_release_ts() - Release and disable timestamp for the given desc.
>> + *
>> + * @desc: timestamp descriptor.
>> + *
>> + * Context: debugfs_remove_recursive() function call may use sleeping locks,
>> + *	    not suitable from atomic context.
>> + * Returns: 0 on success or a negative error code on failure.
>> + */
>> +int hte_release_ts(struct hte_ts_desc *desc)
>> +{
>> +	int ret = 0;
>> +	unsigned long flag;
>> +	struct hte_device *gdev;
>> +	struct hte_ts_info *ei;
>> +
>> +	if (!desc)
>> +		return -EINVAL;
>> +
>> +	ei = desc->hte_data;
>> +
>> +	if (!ei || !ei->gdev)
>> +		return -EINVAL;
>> +
>> +	gdev = ei->gdev;
>> +
>> +	mutex_lock(&ei->req_mlock);
>> +
>> +	if (!test_bit(HTE_TS_REGISTERED, &ei->flags)) {
>> +		dev_info(gdev->sdev, "id:%d is not registered",
>> +			 desc->attr.line_id);
>> +		ret = -EUSERS;
>> +		goto unlock;
>> +	}
>> +
>> +	ret = gdev->chip->ops->release(gdev->chip, desc, ei->xlated_id);
>> +	if (ret) {
>> +		dev_err(gdev->sdev, "id: %d free failed\n",
>> +			desc->attr.line_id);
>> +		goto unlock;
>> +	}
>> +
>> +	kfree(ei->line_name);
>> +
>> +	debugfs_remove_recursive(ei->ts_dbg_root);
>> +
>> +	spin_lock_irqsave(&ei->slock, flag);
>> +
>> +	atomic_dec(&gdev->ts_req);
>> +	atomic_set(&ei->dropped_ts, 0);
>> +
>> +	ei->seq = 1;
>> +	desc->hte_data = NULL;
>> +
>> +	clear_bit(HTE_TS_REGISTERED, &ei->flags);
>> +
>> +	spin_unlock_irqrestore(&ei->slock, flag);
>> +
>> +	if (ei->tcb) {
>> +		kthread_stop(ei->thread);
>> +		put_task_struct(ei->thread);
> The code becomes simpler if the thread is replaced with a workqueue work
> alternatively.
>
> 		cancel_work_sync(&ei->cb_work);
>> +	}
>> +
>> +	ei->cb = NULL;
>> +	ei->tcb = NULL;
>> +	ei->thread = NULL;
>> +	ei->cl_data = NULL;
>> +
>> +	module_put(gdev->owner);
>> +unlock:
>> +	mutex_unlock(&ei->req_mlock);
>> +	dev_dbg(gdev->sdev, "release id: %d\n", desc->attr.line_id);
>> +
>> +	return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(hte_release_ts);
> ...
>> +
>> +static int _hte_threadfn(void *data)
>> +{
>> +	struct hte_ts_info *ei = data;
>> +
>> +	while (!_hte_wait_for_ts_data(ei))
>> +		ei->tcb(ei->cl_data);
>> +
>> +	return 0;
>> +}
>> +
> static void hte_do_cb_work(struct work_struct *w)
> {
> 	struct hte_ts_info *ei = container_of(w, struct hte_ts_info, cb_work);
>
> 	ei->tcb(ei->cl_data);
> }
>
>> +static int _hte_setup_thread(struct hte_ts_info *ei, u32 id)
>> +{
>> +	struct task_struct *t;
>> +
>> +	t = kthread_create(_hte_threadfn, ei, "hte-%u", id);
>> +	if (IS_ERR(t))
>> +		return PTR_ERR(t);
>> +
>> +	ei->thread = get_task_struct(t);
>> +
>> +	return 0;
>> +}
>> +
>> +static int ___hte_req_ts(struct hte_device *gdev, struct hte_ts_desc *desc,
>> +			 u32 xlated_id, hte_ts_cb_t cb,
>> +			 hte_ts_threaded_cb_t tcb, void *data)
>> +{
>> +	struct hte_ts_info *ei;
>> +	int ret;
>> +
>> +	if (!try_module_get(gdev->owner))
>> +		return -ENODEV;
>> +
>> +	ei = &gdev->ei[xlated_id];
>> +	ei->xlated_id = xlated_id;
>> +
>> +	/*
>> +	 * There is a chance that multiple consumers requesting same entity,
>> +	 * lock here.
>> +	 */
>> +	mutex_lock(&ei->req_mlock);
>> +
>> +	if (test_bit(HTE_TS_REGISTERED, &ei->flags)) {
>> +		dev_dbg(gdev->chip->dev, "id:%u is already registered",
>> +			xlated_id);
>> +		ret = -EUSERS;
>> +		goto unlock;
>> +	}
>> +
>> +	ei->cb = cb;
>> +	ei->tcb = tcb;
>> +	if (tcb) {
> 		INIT_WORK(&ei->cb_work, hte_do_cb_work);
>
>> +		ret = _hte_setup_thread(ei, xlated_id);
>> +		if (ret < 0) {
>> +			dev_err(gdev->chip->dev, "setting thread failed\n");
>> +			goto unlock;
>> +		}
>> +	}
>> +
>> +	ret = gdev->chip->ops->request(gdev->chip, desc, xlated_id);
>> +	if (ret < 0) {
>> +		dev_err(gdev->chip->dev, "ts request failed\n");
>> +		goto unlock;
>> +	}
>> +
>> +	desc->hte_data = ei;
>> +	ei->cl_data = data;
>> +	ei->seq = 1;
>> +
>> +	atomic_inc(&gdev->ts_req);
>> +
>> +	ei->line_name = NULL;
>> +	if (!desc->attr.name) {
>> +		ei->line_name = kzalloc(HTE_TS_NAME_LEN, GFP_KERNEL);
>> +		if (ei->line_name)
>> +			scnprintf(ei->line_name, HTE_TS_NAME_LEN, "ts_%u",
>> +				  desc->attr.line_id);
>> +	}
>> +
>> +	hte_ts_dbgfs_init(desc->attr.name == NULL ?
>> +			  ei->line_name : desc->attr.name, ei);
>> +	set_bit(HTE_TS_REGISTERED, &ei->flags);
>> +
>> +	mutex_unlock(&ei->req_mlock);
>> +
>> +	dev_dbg(gdev->chip->dev, "id: %u, xlated id:%u",
>> +		desc->attr.line_id, xlated_id);
>> +
>> +	return 0;
>> +
>> +unlock:
>> +	module_put(gdev->owner);
>> +	mutex_unlock(&ei->req_mlock);
>> +
>> +	return ret;
>> +}
> ...
>> +/**
>> + * hte_push_ts_ns() - Push timestamp data in nanoseconds.
>> + *
>> + * It is used by the provider to push timestamp data.
>> + *
>> + * @chip: The HTE chip, used during the registration.
>> + * @xlated_id: entity id understood by both subsystem and provider, this is
>> + * obtained from xlate callback during request API.
>> + * @data: timestamp data.
>> + *
>> + * Returns: 0 on success or a negative error code on failure.
>> + */
>> +int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
>> +		   struct hte_ts_data *data)
>> +{
>> +	hte_return_t ret;
>> +	int st = 0;
>> +	struct hte_ts_info *ei;
>> +	unsigned long flag;
>> +
>> +	if (!chip || !data || !chip->gdev)
>> +		return -EINVAL;
>> +
>> +	if (xlated_id > chip->nlines)
>> +		return -EINVAL;
>> +
>> +	ei = &chip->gdev->ei[xlated_id];
>> +
>> +	spin_lock_irqsave(&ei->slock, flag);
>> +
>> +	/* timestamp sequence counter */
>> +	data->seq = ei->seq++;
>> +
>> +	if (!test_bit(HTE_TS_REGISTERED, &ei->flags) ||
>> +	    test_bit(HTE_TS_DISABLE, &ei->flags)) {
>> +		dev_dbg(chip->dev, "Unknown timestamp push\n");
>> +		atomic_inc(&ei->dropped_ts);
>> +		st = -EINVAL;
>> +		goto unlock;
>> +	}
>> +
>> +	ret = ei->cb(data, ei->cl_data);
>> +	if (ret == HTE_RUN_THREADED_CB && ei->thread) {
>> +		if (test_and_set_bit(HTE_CB_RUN_THREAD, &ei->hte_cb_flags))
>> +			goto unlock;
>> +		else
>> +			wake_up_process(ei->thread);
> 			queue_work(system_unbound_wq, &ei->cb_work);
>> +	}
>> +
>> +unlock:
>> +	spin_unlock_irqrestore(&ei->slock, flag);
>> +
>> +	return st;
>> +}
>> +EXPORT_SYMBOL_GPL(hte_push_ts_ns);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ