[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200411115509.463222109@linuxfoundation.org>
Date: Sat, 11 Apr 2020 14:08:54 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Lucas Stach <l.stach@...gutronix.de>,
Philipp Zabel <p.zabel@...gutronix.de>,
Guido Günther <agx@...xcpu.org>,
Robert Beckett <bob.beckett@...labora.com>
Subject: [PATCH 4.19 12/54] drm/etnaviv: replace MMU flush marker with flush sequence
From: Lucas Stach <l.stach@...gutronix.de>
commit 4900dda90af2cb13bc1d4c12ce94b98acc8fe64e upstream.
If a MMU is shared between multiple GPUs, all of them need to flush their
TLBs, so a single marker that gets reset on the first flush won't do.
Replace the flush marker with a sequence number, so that it's possible to
check if the TLB is in sync with the current page table state for each GPU.
Signed-off-by: Lucas Stach <l.stach@...gutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@...gutronix.de>
Reviewed-by: Guido Günther <agx@...xcpu.org>
Signed-off-by: Robert Beckett <bob.beckett@...labora.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 10 ++++++----
drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 +
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 6 +++---
drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 2 +-
4 files changed, 11 insertions(+), 8 deletions(-)
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -311,6 +311,8 @@ void etnaviv_buffer_queue(struct etnaviv
u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
+ bool need_flush = gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock);
@@ -325,14 +327,14 @@ void etnaviv_buffer_queue(struct etnaviv
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || switch_context) {
+ if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
@@ -345,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
/* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
@@ -365,7 +367,7 @@ void etnaviv_buffer_queue(struct etnaviv
SYNC_RECIPIENT_PE);
}
- gpu->mmu->need_flush = false;
+ gpu->flush_seq = new_flush_seq;
}
if (switch_context) {
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -139,6 +139,7 @@ struct etnaviv_gpu {
struct etnaviv_iommu *mmu;
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ unsigned int flush_seq;
/* Power Control: */
struct clk *clk_bus;
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -261,7 +261,7 @@ int etnaviv_iommu_map_gem(struct etnaviv
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
+ mmu->flush_seq++;
unlock:
mutex_unlock(&mmu->lock);
@@ -280,7 +280,7 @@ void etnaviv_iommu_unmap_gem(struct etna
etnaviv_iommu_remove_mapping(mmu, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
+ mmu->flush_seq++;
mutex_unlock(&mmu->lock);
}
@@ -357,7 +357,7 @@ int etnaviv_iommu_get_suballoc_va(struct
mutex_unlock(&mmu->lock);
return ret;
}
- gpu->mmu->need_flush = true;
+ mmu->flush_seq++;
mutex_unlock(&mmu->lock);
*iova = (u32)vram_node->start;
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -48,7 +48,7 @@ struct etnaviv_iommu {
struct mutex lock;
struct list_head mappings;
struct drm_mm mm;
- bool need_flush;
+ unsigned int flush_seq;
};
struct etnaviv_gem_object;
Powered by blists - more mailing lists