[<prev] [next>] [day] [month] [year] [list]
Message-ID: <627ef0ef-5e4c-b310-92e1-a9bb57d1aa96@xilinx.com>
Date: Mon, 23 Nov 2020 19:24:28 -0800
From: Wendy Liang <wendy.liang@...inx.com>
To: Hillf Danton <hdanton@...a.com>,
Wendy Liang <wendy.liang@...inx.com>
CC: <robh+dt@...nel.org>, <michal.simek@...inx.com>, <arnd@...db.de>,
<gregkh@...uxfoundation.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <linux-media@...r.kernel.org>,
<dri-devel@...ts.freedesktop.org>,
Nishad Saraf <nishad.saraf@...inx.com>
Subject: Re: [PATCH v2 9/9] misc: xilinx-ai-engine: Add support for servicing
error interrupts
On 11/19/20 12:36 AM, Hillf Danton wrote:
> On Wed, 18 Nov 2020 15:48:09 -0800 Wendy Liang wrote:
>> +/**
>> + * aie_interrupt() - interrupt handler for AIE.
>> + * @irq: Interrupt number.
>> + * @data: AI engine device structure.
>> + * @return: IRQ_HANDLED.
>> + *
>> + * This thread function disables level 2 interrupt controllers and schedules a
>> + * task in workqueue to backtrack the source of error interrupt. Disabled
>> + * interrupts are re-enabled after successful completion of bottom half.
>> + */
>> +irqreturn_t aie_interrupt(int irq, void *data)
>> +{
>> + struct aie_device *adev = data;
>> + struct aie_partition *apart;
>> + int ret;
>> + bool sched_work = false;
>> +
>> + ret = mutex_lock_interruptible(&adev->mlock);
>> + if (ret) {
>> + dev_err(&adev->dev,
>> + "Failed to acquire lock. Process was interrupted by fatal signals\n");
>> + return IRQ_NONE;
>> + }
>> +
>> + list_for_each_entry(apart, &adev->partitions, node) {
>> + struct aie_location loc;
>> + u32 ttype, l2_mask, l2_status, l2_bitmap_offset = 0;
>> +
>> + ret = mutex_lock_interruptible(&apart->mlock);
>> + if (ret) {
>> + dev_err(&apart->dev,
>> + "Failed to acquire lock. Process was interrupted by fatal signals\n");
>> + return IRQ_NONE;
>
> Though quite unlikely, you need to release adev->mlock before
> going home.
Thanks to point it out, I will change in next version
Thanks,
Wendy
>
>> + }
>> +
>> + for (loc.col = apart->range.start.col, loc.row = 0;
>> + loc.col < apart->range.start.col + apart->range.size.col;
>> + loc.col++) {
>> + ttype = apart->adev->ops->get_tile_type(&loc);
>> + if (ttype != AIE_TILE_TYPE_SHIMNOC)
>> + continue;
>> +
>> + l2_mask = aie_get_l2_mask(apart, &loc);
>> + if (l2_mask) {
>> + aie_resource_cpy_from_arr32(&apart->l2_mask,
>> + l2_bitmap_offset *
>> + 32, &l2_mask, 32);
>> + aie_disable_l2_ctrl(apart, &loc, l2_mask);
>> + }
>> + l2_bitmap_offset++;
>> +
>> + l2_status = aie_get_l2_status(apart, &loc);
>> + if (l2_status) {
>> + aie_clear_l2_intr(apart, &loc, l2_status);
>> + sched_work = true;
>> + } else {
>> + aie_enable_l2_ctrl(apart, &loc, l2_mask);
>> + }
>> + }
>> + mutex_unlock(&apart->mlock);
>> + }
>> +
>> + /* For ES1 silicon, interrupts are latched in NPI */
>> + if (adev->version == VERSAL_ES1_REV_ID) {
>> + ret = zynqmp_pm_clear_aie_npi_isr(adev->pm_node_id,
>> + AIE_NPI_ERROR_ID);
>> + if (ret < 0)
>> + dev_err(&adev->dev, "Failed to clear NPI ISR\n");
>> + }
>> +
>> + mutex_unlock(&adev->mlock);
>> +
>> + if (sched_work)
>> + schedule_work(&adev->backtrack);
>> +
>> + return IRQ_HANDLED;
>> +}
Powered by blists - more mailing lists