[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190630060500.7882-12-idosch@idosch.org>
Date: Sun, 30 Jun 2019 09:04:55 +0300
From: Ido Schimmel <idosch@...sch.org>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, richardcochran@...il.com, jiri@...lanox.com,
petrm@...lanox.com, mlxsw@...lanox.com,
Ido Schimmel <idosch@...lanox.com>
Subject: [PATCH net-next v2 11/16] mlxsw: spectrum: PTP: Disable BH when working with PHC
From: Petr Machata <petrm@...lanox.com>
Up until now, the PTP hardware clock code was only invoked in the process
context (SYS_clock_adjtime -> do_clock_adjtime -> k_clock::clock_adj ->
pc_clock_adjtime -> posix_clock_operations::clock_adjtime ->
ptp_clock_info::adjtime -> mlxsw_spectrum).
In order to enable HW timestamping, which is tied into trap handling, it
will be necessary to take the clock lock from the PCI queue handler
tasklets as well.
Therefore use the _bh variants when handling the clock lock. Incidentally,
Documentation/ptp/ptp.txt recommends _irqsave variants, but that's
unnecessarily strong for our needs.
Signed-off-by: Petr Machata <petrm@...lanox.com>
Acked-by: Jiri Pirko <jiri@...lanox.com>
Signed-off-by: Ido Schimmel <idosch@...lanox.com>
---
.../ethernet/mellanox/mlxsw/spectrum_ptp.c | 24 +++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 6725a4d53f87..1eb6eefa1afc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -117,9 +117,9 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
next_sec_in_nsec = next_sec * NSEC_PER_SEC;
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
@@ -152,11 +152,11 @@ static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
adj *= ppb;
diff = div_u64(adj, NSEC_PER_SEC);
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
}
@@ -167,10 +167,10 @@ static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
u64 nsec;
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
timecounter_adjtime(&clock->tc, delta);
nsec = timecounter_read(&clock->tc);
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
return mlxsw_sp1_ptp_phc_settime(clock, nsec);
}
@@ -183,10 +183,10 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
u64 cycles, nsec;
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
nsec = timecounter_cyc2time(&clock->tc, cycles);
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
*ts = ns_to_timespec64(nsec);
@@ -200,10 +200,10 @@ static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
u64 nsec = timespec64_to_ns(ts);
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
timecounter_init(&clock->tc, &clock->cycles, nsec);
nsec = timecounter_read(&clock->tc);
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
return mlxsw_sp1_ptp_phc_settime(clock, nsec);
}
@@ -225,9 +225,9 @@ static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
- spin_lock(&clock->lock);
+ spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
- spin_unlock(&clock->lock);
+ spin_unlock_bh(&clock->lock);
mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
}
--
2.20.1
Powered by blists - more mailing lists