[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240325021705.249769-4-baolu.lu@linux.intel.com>
Date: Mon, 25 Mar 2024 10:16:56 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Kevin Tian <kevin.tian@...el.com>,
Jason Gunthorpe <jgg@...pe.ca>
Cc: Tina Zhang <tina.zhang@...el.com>,
Yi Liu <yi.l.liu@...el.com>,
iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH 03/12] iommu/vt-d: Add trace events for cache tag interface
Add trace events for cache tag assign/unassign/flush operations and trace
the events in the interfaces. These trace events will improve debugging
capabilities by providing detailed information about cache tag activity.
A sample of the traced messages looks like below [messages have been
stripped and wrapped to make the line short].
cache_tag_assign: dmar9/0000:00:01.0 type iotlb did 1 pasid 9 ref 1
cache_tag_assign: dmar9/0000:00:01.0 type devtlb did 1 pasid 9 ref 1
cache_tag_flush_all: dmar6/0000:8a:00.0 type iotlb did 7 pasid 0 ref 1
cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
[0xeab00000-0xeab1afff] addr 0xeab00000 pages 0x20 mask 0x5
cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
[0xeab20000-0xeab31fff] addr 0xeab20000 pages 0x20 mask 0x5
cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
[0xeaa40000-0xeaa51fff] addr 0xeaa40000 pages 0x20 mask 0x5
cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
[0x98de0000-0x98de4fff] addr 0x98de0000 pages 0x8 mask 0x3
cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
[0xe9828000-0xe9828fff] addr 0xe9828000 pages 0x1 mask 0x0
cache_tag_unassign: dmar9/0000:00:01.0 type iotlb did 1 pasid 9 ref 1
cache_tag_unassign: dmar9/0000:00:01.0 type devtlb did 1 pasid 9 ref 1
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/trace.h | 97 +++++++++++++++++++++++++++++++++++++
drivers/iommu/intel/cache.c | 8 +++
2 files changed, 105 insertions(+)
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
index 93d96f93a89b..b8c2ebace8af 100644
--- a/drivers/iommu/intel/trace.h
+++ b/drivers/iommu/intel/trace.h
@@ -89,6 +89,103 @@ TRACE_EVENT(prq_report,
__entry->dw1, __entry->dw2, __entry->dw3)
)
);
+
+DECLARE_EVENT_CLASS(cache_tag_log,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag),
+ TP_STRUCT__entry(
+ __string(iommu, tag->iommu->name)
+ __string(dev, dev_name(tag->dev))
+ __field(u16, type)
+ __field(u16, domain_id)
+ __field(u32, pasid)
+ __field(u32, users)
+ ),
+ TP_fast_assign(
+ __assign_str(iommu, tag->iommu->name);
+ __assign_str(dev, dev_name(tag->dev));
+ __entry->type = tag->type;
+ __entry->domain_id = tag->domain_id;
+ __entry->pasid = tag->pasid;
+ __entry->users = tag->users;
+ ),
+ TP_printk("%s/%s type %s did %d pasid %d ref %d",
+ __get_str(iommu), __get_str(dev),
+ __print_symbolic(__entry->type,
+ { CACHE_TAG_TYPE_IOTLB, "iotlb" },
+ { CACHE_TAG_TYPE_DEVTLB, "devtlb" },
+ { CACHE_TAG_TYPE_PARENT_IOTLB, "parent_iotlb" },
+ { CACHE_TAG_TYPE_PARENT_DEVTLB, "parent_devtlb" }),
+ __entry->domain_id, __entry->pasid, __entry->users
+ )
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_assign,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
+ TP_PROTO(struct cache_tag *tag),
+ TP_ARGS(tag)
+);
+
+DECLARE_EVENT_CLASS(cache_tag_flush,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask),
+ TP_STRUCT__entry(
+ __string(iommu, tag->iommu->name)
+ __string(dev, dev_name(tag->dev))
+ __field(u16, type)
+ __field(u16, domain_id)
+ __field(u32, pasid)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned long, addr)
+ __field(unsigned long, pages)
+ __field(unsigned long, mask)
+ ),
+ TP_fast_assign(
+ __assign_str(iommu, tag->iommu->name);
+ __assign_str(dev, dev_name(tag->dev));
+ __entry->type = tag->type;
+ __entry->domain_id = tag->domain_id;
+ __entry->pasid = tag->pasid;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->addr = addr;
+ __entry->pages = pages;
+ __entry->mask = mask;
+ ),
+ TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
+ __get_str(iommu), __get_str(dev), __entry->pasid,
+ __print_symbolic(__entry->type,
+ { CACHE_TAG_TYPE_IOTLB, "iotlb" },
+ { CACHE_TAG_TYPE_DEVTLB, "devtlb" },
+ { CACHE_TAG_TYPE_PARENT_IOTLB, "parent_iotlb" },
+ { CACHE_TAG_TYPE_PARENT_DEVTLB, "parent_devtlb" }),
+ __entry->domain_id, __entry->start, __entry->end,
+ __entry->addr, __entry->pages, __entry->mask
+ )
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask)
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_cm_range,
+ TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+ unsigned long addr, unsigned long pages, unsigned long mask),
+ TP_ARGS(tag, start, end, addr, pages, mask)
+);
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 4c245d39faf2..b35ef2ee2aca 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -17,6 +17,7 @@
#include "iommu.h"
#include "pasid.h"
+#include "trace.h"
/* Checks if an existing cache tag can be reused for a new association. */
static bool cache_tag_reusable(struct cache_tag *tag, u16 domain_id,
@@ -65,11 +66,13 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
temp->users++;
spin_unlock_irqrestore(&domain->cache_lock, flags);
kfree(tag);
+ trace_cache_tag_assign(temp);
return 0;
}
}
list_add_tail(&tag->node, &domain->cache_tags);
spin_unlock_irqrestore(&domain->cache_lock, flags);
+ trace_cache_tag_assign(tag);
return 0;
}
@@ -87,6 +90,7 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
if (cache_tag_reusable(tag, did, iommu, dev, pasid, type)) {
+ trace_cache_tag_unassign(tag);
if (--tag->users == 0) {
list_del(&tag->node);
kfree(tag);
@@ -293,6 +297,8 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
IOMMU_NO_PASID, info->ats_qdep);
break;
}
+
+ trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
}
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
@@ -330,6 +336,7 @@ void cache_tag_flush_all(struct dmar_domain *domain)
IOMMU_NO_PASID, info->ats_qdep);
break;
}
+ trace_cache_tag_flush_all(tag);
}
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
@@ -376,6 +383,7 @@ void cache_tag_flush_cm_range(struct dmar_domain *domain, unsigned long start,
addr, mask,
DMA_TLB_PSI_FLUSH);
}
+ trace_cache_tag_flush_cm_range(tag, start, end, addr, pages, mask);
}
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
--
2.34.1
Powered by blists - more mailing lists