lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210119155703.7064800d@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com>
Date:   Tue, 19 Jan 2021 15:57:03 -0800
From:   Jakub Kicinski <kuba@...nel.org>
To:     Kurt Kanzenbach <kurt@...utronix.de>,
        Vladimir Oltean <olteanv@...il.com>
Cc:     Andrew Lunn <andrew@...n.ch>,
        Vivien Didelot <vivien.didelot@...il.com>,
        Florian Fainelli <f.fainelli@...il.com>,
        "David S. Miller" <davem@...emloft.net>,
        Vinicius Costa Gomes <vinicius.gomes@...el.com>,
        netdev@...r.kernel.org
Subject: Re: [PATCH v2 net-next 1/1] net: dsa: hellcreek: Add TAPRIO
 offloading support

On Sat, 16 Jan 2021 13:49:22 +0100 Kurt Kanzenbach wrote:
> The switch has support for the 802.1Qbv Time Aware Shaper (TAS). Traffic
> schedules may be configured individually on each front port. Each port has eight
> egress queues. The traffic is mapped to a traffic class respectively via the PCP
> field of a VLAN tagged frame.
> 
> The TAPRIO Qdisc already implements that. Therefore, this interface can simply
> be reused. Add .port_setup_tc() accordingly.
> 
> The activation of a schedule on a port is split into two parts:
> 
>  * Programming the necessary gate control list (GCL)
>  * Setup delayed work for starting the schedule
> 
> The hardware supports starting a schedule up to eight seconds in the future. The
> TAPRIO interface provides an absolute base time. Therefore, periodic delayed
> work is leveraged to check whether a schedule may be started or not.
> 
> Signed-off-by: Kurt Kanzenbach <kurt@...utronix.de>

> +static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
> +{
> +	struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
> +	s64 base_time_ns, current_ns;
> +
> +	/* The switch allows a schedule to be started only eight seconds within
> +	 * the future. Therefore, check the current PTP time if the schedule is
> +	 * startable or not.
> +	 */
> +
> +	/* Use the "cached" time. That should be alright, as it's updated quite
> +	 * frequently in the PTP code.
> +	 */
> +	mutex_lock(&hellcreek->ptp_lock);
> +	current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
> +	mutex_unlock(&hellcreek->ptp_lock);
> +
> +	/* Calculate difference to admin base time */
> +	base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
> +
> +	if (base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC)
> +		return true;
> +
> +	return false;

nit:
	return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;

> +static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
> +				       struct tc_taprio_qopt_offload *taprio)
> +{
> +	struct hellcreek *hellcreek = ds->priv;
> +	struct hellcreek_port *hellcreek_port;
> +	bool startable;
> +	u16 ctrl;
> +
> +	hellcreek_port = &hellcreek->ports[port];
> +
> +	dev_dbg(hellcreek->dev, "Configure traffic schedule on port %d\n",
> +		port);
> +
> +	/* First cancel delayed work */
> +	cancel_delayed_work_sync(&hellcreek_port->schedule_work);
> +
> +	mutex_lock(&hellcreek->reg_lock);
> +
> +	if (hellcreek_port->current_schedule) {
> +		taprio_offload_free(hellcreek_port->current_schedule);
> +		hellcreek_port->current_schedule = NULL;
> +	}
> +	hellcreek_port->current_schedule = taprio_offload_get(taprio);
> +
> +	/* Then select port */
> +	hellcreek_select_tgd(hellcreek, port);
> +
> +	/* Enable gating and keep defaults */
> +	ctrl = (0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT) | TR_TGDCTRL_GATE_EN;
> +	hellcreek_write(hellcreek, ctrl, TR_TGDCTRL);
> +
> +	/* Cancel pending schedule */
> +	hellcreek_write(hellcreek, 0x00, TR_ESTCMD);
> +
> +	/* Setup a new schedule */
> +	hellcreek_setup_gcl(hellcreek, port, hellcreek_port->current_schedule);
> +
> +	/* Configure cycle time */
> +	hellcreek_set_cycle_time(hellcreek, hellcreek_port->current_schedule);
> +
> +	/* Check starting time */
> +	startable = hellcreek_schedule_startable(hellcreek, port);
> +	if (startable) {
> +		hellcreek_start_schedule(hellcreek, port);
> +		mutex_unlock(&hellcreek->reg_lock);
> +		return 0;
> +	}
> +
> +	mutex_unlock(&hellcreek->reg_lock);
> +
> +	/* Schedule periodic schedule check */
> +	schedule_delayed_work(&hellcreek_port->schedule_work,
> +			      HELLCREEK_SCHEDULE_PERIOD);

Why schedule this work every 2 seconds rather than scheduling it
$start_time - 8 sec + epsilon?

> +static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
> +					struct tc_taprio_qopt_offload *schedule)
> +{
> +	/* Does this hellcreek version support Qbv in hardware? */
> +	if (!hellcreek->pdata->qbv_support)
> +		return false;
> +
> +	/* cycle time can only be 32bit */
> +	if (schedule->cycle_time > (u32)-1)
> +		return false;
> +
> +	/* cycle time extension is not supported */
> +	if (schedule->cycle_time_extension)
> +		return false;

What's the story with entries[i].command? I see most drivers validate
the command is what they expect.

> +	return true;
> +}
> +
> +static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
> +				   enum tc_setup_type type, void *type_data)
> +{
> +	struct tc_taprio_qopt_offload *taprio = type_data;
> +	struct hellcreek *hellcreek = ds->priv;
> +
> +	if (type != TC_SETUP_QDISC_TAPRIO)
> +		return -EOPNOTSUPP;
> +
> +	if (!hellcreek_validate_schedule(hellcreek, taprio))
> +		return -EOPNOTSUPP;
> +
> +	if (taprio->enable)
> +		return hellcreek_port_set_schedule(ds, port, taprio);
> +
> +	return hellcreek_port_del_schedule(ds, port);
> +}
> +
>  static const struct dsa_switch_ops hellcreek_ds_ops = {
>  	.get_ethtool_stats   = hellcreek_get_ethtool_stats,
>  	.get_sset_count	     = hellcreek_get_sset_count,

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ