[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.22.394.2004141704050.68516@chino.kir.corp.google.com>
Date: Tue, 14 Apr 2020 17:04:59 -0700 (PDT)
From: David Rientjes <rientjes@...gle.com>
To: Christoph Hellwig <hch@....de>,
Tom Lendacky <thomas.lendacky@....com>
cc: Brijesh Singh <brijesh.singh@....com>,
Jon Grimm <jon.grimm@....com>, Joerg Roedel <joro@...tes.org>,
linux-kernel@...r.kernel.org, iommu@...ts.linux-foundation.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org
Subject: [patch 5/7] dma-pool: add pool sizes to debugfs
The atomic DMA pools can dynamically expand based on non-blocking
allocations that need to use it.
Export the sizes of each of these pools, in bytes, through debugfs for
measurement.
Suggested-by: Christoph Hellwig <hch@....de>
Signed-off-by: David Rientjes <rientjes@...gle.com>
---
kernel/dma/pool.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index cf052314d9e4..3e22022c933b 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020 Google LLC
*/
+#include <linux/debugfs.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
@@ -15,6 +16,11 @@
static struct gen_pool *atomic_pool_dma __ro_after_init;
static struct gen_pool *atomic_pool_dma32 __ro_after_init;
static struct gen_pool *atomic_pool_kernel __ro_after_init;
+#ifdef CONFIG_DEBUG_FS
+static unsigned long pool_size_dma;
+static unsigned long pool_size_dma32;
+static unsigned long pool_size_kernel;
+#endif
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
@@ -29,6 +35,38 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
+#ifdef CONFIG_DEBUG_FS
+static void __init dma_atomic_pool_debugfs_init(void)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("dma_pools", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return;
+
+ debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
+ debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
+ debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
+}
+
+static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
+{
+ if (gfp & __GFP_DMA)
+ pool_size_dma += size;
+ else if (gfp & __GFP_DMA32)
+ pool_size_dma32 += size;
+ else
+ pool_size_kernel += size;
+}
+#else
+static inline void dma_atomic_pool_debugfs_init(void)
+{
+}
+static inline void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
gfp_t gfp)
{
@@ -76,6 +114,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
if (ret)
goto encrypt_mapping;
+ dma_atomic_pool_size_add(gfp, pool_size);
return 0;
encrypt_mapping:
@@ -160,6 +199,8 @@ static int __init dma_atomic_pool_init(void)
if (!atomic_pool_dma32)
ret = -ENOMEM;
}
+
+ dma_atomic_pool_debugfs_init();
return ret;
}
postcore_initcall(dma_atomic_pool_init);
Powered by blists - more mailing lists