lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9505941e-73a1-249b-08c2-83b86e7ef5c6@linux.intel.com>
Date:   Mon, 13 Feb 2023 12:15:44 -0600
From:   Pierre-Louis Bossart <pierre-louis.bossart@...ux.intel.com>
To:     Vijendar Mukunda <Vijendar.Mukunda@....com>, vkoul@...nel.org
Cc:     amadeuszx.slawinski@...ux.intel.com, Mario.Limonciello@....com,
        Sunil-kumar.Dommati@....com, Basavaraj.Hiregoudar@....com,
        Mastan.Katragadda@....com, Arungopal.kondaveeti@....com,
        Bard Liao <yung-chuan.liao@...ux.intel.com>,
        Sanyog Kale <sanyog.r.kale@...el.com>,
        "moderated list:SOUNDWIRE SUBSYSTEM" <alsa-devel@...a-project.org>,
        open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH V2 5/8] soundwire: amd: add soundwire manager interrupt
 handling



On 2/13/23 03:40, Vijendar Mukunda wrote:
> Add support for handling soundwire manager interrupts.

Try using the MIPI spelling: SoundWire

> 
> Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@....com>
> Signed-off-by: Mastan Katragadda <Mastan.Katragadda@....com>
> ---
>  drivers/soundwire/amd_manager.c   | 132 ++++++++++++++++++++++++++++++
>  drivers/soundwire/amd_manager.h   |   1 +
>  include/linux/soundwire/sdw_amd.h |   7 ++
>  3 files changed, 140 insertions(+)
> 
> diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
> index 14c88b80ab6d..87f9a987d93a 100644
> --- a/drivers/soundwire/amd_manager.c
> +++ b/drivers/soundwire/amd_manager.c
> @@ -417,6 +417,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
>  	return SDW_CMD_OK;
>  }
>  
> +static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
> +{
> +	u64 slave_stat = 0;

useless init

> +	u32 val = 0;

useless init

> +	u16 dev_index;
> +
> +	/* slave status response*/

response */

> +	slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
> +
> +	dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);

newline?

> +	for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> +		val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
> +		dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);

you don't need __func__ in dev_dbg() logs, they can be added e.g. with
the option dyndbg=+pmf

> +		switch (val) {
> +		case SDW_SLAVE_ATTACHED:
> +			amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> +			break;
> +		case SDW_SLAVE_UNATTACHED:
> +			amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> +			break;
> +		case SDW_SLAVE_ALERT:
> +			amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> +			break;
> +		default:
> +			amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> +			break;
> +		}
> +	}
> +}
> +
> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
> +{
> +	u64 response = 0;

useless init

> +
> +	mutex_lock(&amd_manager->bus.msg_lock);
> +	response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
> +	mutex_unlock(&amd_manager->bus.msg_lock);
> +	amd_sdw_process_ping_status(response, amd_manager);
> +}
> +
>  static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
>  {
>  	struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
> @@ -817,6 +858,95 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
>  					       dais, num_dais);
>  }
>  
> +static void amd_sdw_update_slave_status_work(struct work_struct *work)
> +{
> +	struct amd_sdw_manager *amd_manager =
> +		container_of(work, struct amd_sdw_manager, amd_sdw_work);
> +	int retry_count = 0;
> +
> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> +	}
> +
> +update_status:
> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> +		if (retry_count++ < SDW_MAX_DEVICES) {
> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> +			amd_sdw_read_and_process_ping_status(amd_manager);
> +			goto update_status;
> +		} else {
> +			dev_err_ratelimited(amd_manager->dev,
> +					    "Device0 detected after %d iterations\n",
> +					    retry_count);
> +		}
> +	}

this seems rather inspired by the Cadence code, but is there really a
case where you need to re-check for devices? In the Cadence case, this
was added because we have a logical OR and new devices would not be handled.
> +}
> +
> +static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
> +					struct amd_sdw_manager *amd_manager)
> +{
> +	u64 slave_stat = 0;

useless init

> +	u32 val = 0;

useless init

> +	int dev_index;
> +
> +	if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
> +		memset(amd_manager->status, 0, sizeof(amd_manager->status));
> +	slave_stat = status_change_0to7;
> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
> +	dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
> +		__func__, status_change_0to7, status_change_8to11);
> +	if (slave_stat) {
> +		for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> +			if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
> +				val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
> +				      AMD_SDW_MCP_SLAVE_STATUS_MASK;
> +				switch (val) {
> +				case SDW_SLAVE_ATTACHED:
> +					amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> +					break;
> +				case SDW_SLAVE_UNATTACHED:
> +					amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> +					break;
> +				case SDW_SLAVE_ALERT:
> +					amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> +					break;
> +				default:
> +					amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> +					break;
> +				}

the code seems identical to that in amd_sdw_process_ping_status(), is
there a need for a helper function?

> +			}
> +		}
> +	}
> +}
> +
> +static void amd_sdw_irq_thread(struct work_struct *work)
> +{
> +	struct amd_sdw_manager *amd_manager =
> +			container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
> +	u32 status_change_8to11;
> +	u32 status_change_0to7;
> +
> +	status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> +	status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> +	dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
> +		__func__, amd_manager->instance, status_change_0to7, status_change_8to11);

remove __func__

> +	if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
> +		amd_sdw_read_and_process_ping_status(amd_manager);
> +	} else {
> +		/* Check for the updated status on peripheral device */
> +		amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
> +	}
> +	if (status_change_8to11 || status_change_0to7)
> +		schedule_work(&amd_manager->amd_sdw_work);
> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ