[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20101217161756.664b8d8b.akpm@linux-foundation.org>
Date: Fri, 17 Dec 2010 16:17:56 -0800
From: Andrew Morton <akpm@...ux-foundation.org>
To: Alexander Gordeev <lasaine@....cs.msu.su>
Cc: linux-kernel@...r.kernel.org,
"Nikita V\. Youshchenko" <yoush@...msu.su>,
linuxpps@...enneenne.com, Rodolfo Giometti <giometti@...eenne.com>
Subject: Re: [PATCHv6 15/16] pps: add parallel port PPS client
On Fri, 17 Dec 2010 22:54:39 +0300
Alexander Gordeev <lasaine@....cs.msu.su> wrote:
> Add parallel port PPS client. It uses a standard method for capturing
> timestamps for assert edge transitions: getting a timestamp soon after
> an interrupt has happened. This is not a very precise source of time
> information due to interrupt handling delays. However, timestamps for
> clear edge transitions are much more precise because the interrupt
> handler continuously polls hardware port until the transition is done.
> Hardware port operations require only about 1us so the maximum error
> should not exceed this value. This was my primary goal when developing
> this client.
> Clear edge capture could be disabled using clear_wait parameter.
>
> ...
>
> +/* parport interrupt handler */
> +static void parport_irq(void *handle)
> +{
> + struct pps_event_time ts_assert, ts_clear;
> + struct pps_client_pp *dev = handle;
> + struct parport *port = dev->pardev->port;
> + unsigned int i;
> + unsigned long flags;
> +
> + /* first of all we get the time stamp... */
> + pps_get_ts(&ts_assert);
> +
> + if (dev->cw == 0)
> + /* clear edge capture disabled */
> + goto out_assert;
> +
> + /* try capture the clear edge */
> + local_irq_save(flags);
> + /* check the signal (no signal means the pulse is lost this time) */
> + if (!signal_is_set(port)) {
> + local_irq_restore(flags);
> + dev_err(dev->pps->dev, "lost the signal\n");
> + goto out_assert;
> + }
> +
> + /* poll the port until the signal is unset */
> + for (i = dev->cw; i; i--)
> + if (!signal_is_set(port)) {
> + pps_get_ts(&ts_clear);
> + local_irq_restore(flags);
> + dev->cw_err = 0;
> + goto out_both;
> + }
> + local_irq_restore(flags);
Why is this function paying around with local_irq_save()? It's unusual
and looks buggy because local_irq_save() doesn't stop other CPUs from
taking an interrupt and coming in and playing with the "protected" data.
I have a feeling I asked this before, but you didn't add a comment
explaining it, so I went and asked it again! Cause, effect.
> +static void parport_attach(struct parport *port)
> +{
> + struct pps_client_pp *device;
> + struct pps_source_info info = {
> + .name = KBUILD_MODNAME,
> + .path = "",
> + .mode = PPS_CAPTUREBOTH | \
> + PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
> + PPS_ECHOASSERT | PPS_ECHOCLEAR | \
> + PPS_CANWAIT | PPS_TSFMT_TSPEC,
> + .echo = pps_echo,
> + .owner = THIS_MODULE,
> + .dev = NULL
> + };
> +
> + device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
> + if (!device) {
> + pr_err("memory allocation failed, not attaching\n");
> + return;
And the void-returning parport_driver.attach() still sucks.
> + }
> +
> + device->pardev = parport_register_device(port, KBUILD_MODNAME,
> + NULL, NULL, parport_irq, 0, device);
> + if (!device->pardev) {
> + pr_err("couldn't register with %s\n", port->name);
> + goto err_free;
> + }
> +
> + if (parport_claim_or_block(device->pardev) < 0) {
> + pr_err("couldn't claim %s\n", port->name);
> + goto err_unregister_dev;
> + }
> +
> + device->pps = pps_register_source(&info,
> + PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
> + if (device->pps == NULL) {
> + pr_err("couldn't register PPS source\n");
> + goto err_release_dev;
> + }
> +
> + device->cw = clear_wait;
> +
> + port->ops->enable_irq(port);
> +
> + pr_info("attached to %s\n", port->name);
> +
> + return;
> +
> +err_release_dev:
> + parport_release(device->pardev);
> +err_unregister_dev:
> + parport_unregister_device(device->pardev);
> +err_free:
> + kfree(device);
> +}
> +
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists