[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e674c70380ceda953e0e45a77334c5d22e69938f.1712062203.git.petrm@nvidia.com>
Date: Tue, 2 Apr 2024 15:54:20 +0200
From: Petr Machata <petrm@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, <netdev@...r.kernel.org>
CC: Ido Schimmel <idosch@...dia.com>, Amit Cohen <amcohen@...dia.com>, "Petr
Machata" <petrm@...dia.com>, <mlxsw@...dia.com>
Subject: [PATCH net-next 07/15] mlxsw: pci: Poll command interface for each cmd_exec()
From: Amit Cohen <amcohen@...dia.com>
Command interface is used for configuring and querying FW when EMADs are
not available. During the time that the driver sets up the asynchronous
queues, it polls the command interface for getting completions. Then,
there is a short period when asynchronous queues work, but EMADs are not
available (marked in the code as nopoll = true). During this time, we
send commands via command interface, but we do not poll it, as we can get
an interrupt for the completion. Completions of command interface are
received from HW in EQ0 (event queue 0).
The usage of EQ0 instead of polling is done only 4 times during
initialization and one time during tear down, but it makes an overhead
during lifetime of the driver. For each interrupt, we have to check if
we get events in EQ0 or EQ1 and handle them. This is really ineffective,
especially because of the fact that EQ0 is used only as part of driver
init/fini.
Instead, we can poll command interface for each call of cmd_exec(). It
means that when we send a command via command interface (as EMADs are
not available), we will poll it, regardless of availability of the
asynchronous queues. This will allow us to configure later only EQ1 and
simplify the flow.
Remove 'nopoll' indication and change mlxsw_pci_cmd_exec() to poll till
answer/timeout regardless of queues' state. For now, completions are
handled also by EQ0, but it will be removed in next patch. Additional
cleanups will be added in next patches.
Signed-off-by: Amit Cohen <amcohen@...dia.com>
Reviewed-by: Ido Schimmel <idosch@...dia.com>
Signed-off-by: Petr Machata <petrm@...dia.com>
---
drivers/net/ethernet/mellanox/mlxsw/pci.c | 48 ++++++++---------------
1 file changed, 17 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c9bd9a98cf1e..b7a83b9ab495 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -111,7 +111,6 @@ struct mlxsw_pci {
struct mlxsw_pci_mem_item out_mbox;
struct mlxsw_pci_mem_item in_mbox;
struct mutex lock; /* Lock access to command registers */
- bool nopoll;
wait_queue_head_t wait;
bool wait_done;
struct {
@@ -1105,8 +1104,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
goto err_rdqs_init;
}
- /* We have to poll in command interface until queues are initialized */
- mlxsw_pci->cmd.nopoll = true;
return 0;
err_rdqs_init:
@@ -1120,7 +1117,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
{
- mlxsw_pci->cmd.nopoll = false;
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
@@ -1846,9 +1842,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
- bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ unsigned long end;
int err;
*p_status = MLXSW_CMD_STATUS_OK;
@@ -1877,28 +1873,20 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
wmb(); /* all needs to be written before we write control register */
mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
MLXSW_PCI_CIR_CTRL_GO_BIT |
- (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
(opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
opcode);
- if (!evreq) {
- unsigned long end;
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
- end = jiffies + timeout;
- do {
- u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
-
- if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
- *p_wait_done = true;
- *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
- break;
- }
- cond_resched();
- } while (time_before(jiffies, end));
- } else {
- wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
- *p_status = mlxsw_pci->cmd.comp.status;
- }
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
err = 0;
if (*p_wait_done) {
@@ -1915,14 +1903,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*/
__be32 tmp;
- if (!evreq) {
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_HI));
- memcpy(out_mbox, &tmp, sizeof(tmp));
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_LO));
- memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
- }
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
--
2.43.0
Powered by blists - more mailing lists