lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 11 Jun 2010 19:35:14 -0700
From:	Divyesh Shah <dpshah@...gle.com>
To:	jaxboe@...ionio.com
Cc:	peterz@...radead.org, mingo@...e.hu, piotr@...owicz.com,
	linux-kernel@...r.kernel.org, vgoyal@...hat.com
Subject: [PATCH 2/2] Use ktime_get() instead of sched_clock() for blkio cgroup
	stats.

This will take care of the pre-emptive kernel issue and the unbounded
TSC drift problem. We will lose resolution though in some cases.

Signed-off-by: Divyesh Shah <dpshah@...gle.com>
---
 block/blk-cgroup.c     |   22 +++++++++++-----------
 include/linux/blkdev.h |    4 ++--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a680964..711766d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -135,19 +135,19 @@ static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
 		return;
 	if (blkg == curr_blkg)
 		return;
-	blkg->stats.start_group_wait_time = sched_clock();
+	blkg->stats.start_group_wait_time = ktime_to_ns(ktime_get());
 	blkio_mark_blkg_waiting(&blkg->stats);
 }
 
 /* This should be called with the blkg->stats_lock held. */
 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
 {
-	unsigned long long now;
+	u64 now;
 
 	if (!blkio_blkg_waiting(stats))
 		return;
 
-	now = sched_clock();
+	now = ktime_to_ns(ktime_get());
 	if (time_after64(now, stats->start_group_wait_time))
 		stats->group_wait_time += now - stats->start_group_wait_time;
 	blkio_clear_blkg_waiting(stats);
@@ -156,12 +156,12 @@ static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
 /* This should be called with the blkg->stats_lock held. */
 static void blkio_end_empty_time(struct blkio_group_stats *stats)
 {
-	unsigned long long now;
+	u64 now;
 
 	if (!blkio_blkg_empty(stats))
 		return;
 
-	now = sched_clock();
+	now = ktime_to_ns(ktime_get());
 	if (time_after64(now, stats->start_empty_time))
 		stats->empty_time += now - stats->start_empty_time;
 	blkio_clear_blkg_empty(stats);
@@ -173,7 +173,7 @@ void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
 
 	spin_lock_irqsave(&blkg->stats_lock, flags);
 	BUG_ON(blkio_blkg_idling(&blkg->stats));
-	blkg->stats.start_idle_time = sched_clock();
+	blkg->stats.start_idle_time = ktime_to_ns(ktime_get());
 	blkio_mark_blkg_idling(&blkg->stats);
 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
 }
@@ -182,13 +182,13 @@ EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
 {
 	unsigned long flags;
-	unsigned long long now;
+	u64 now;
 	struct blkio_group_stats *stats;
 
 	spin_lock_irqsave(&blkg->stats_lock, flags);
 	stats = &blkg->stats;
 	if (blkio_blkg_idling(stats)) {
-		now = sched_clock();
+		now = ktime_to_ns(ktime_get());
 		if (time_after64(now, stats->start_idle_time))
 			stats->idle_time += now - stats->start_idle_time;
 		blkio_clear_blkg_idling(stats);
@@ -237,7 +237,7 @@ void blkiocg_set_start_empty_time(struct blkio_group *blkg)
 		return;
 	}
 
-	stats->start_empty_time = sched_clock();
+	stats->start_empty_time = ktime_to_ns(ktime_get());
 	blkio_mark_blkg_empty(stats);
 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
 }
@@ -314,7 +314,7 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
 {
 	struct blkio_group_stats *stats;
 	unsigned long flags;
-	unsigned long long now = sched_clock();
+	u64 now = ktime_to_ns(ktime_get());
 
 	spin_lock_irqsave(&blkg->stats_lock, flags);
 	stats = &blkg->stats;
@@ -464,7 +464,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
 	int i;
 #ifdef CONFIG_DEBUG_BLK_CGROUP
 	bool idling, waiting, empty;
-	unsigned long long now = sched_clock();
+	u64 now = ktime_to_ns(ktime_get());
 #endif
 
 	blkcg = cgroup_to_blkio_cgroup(cgroup);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ebe788e..f174b34 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1218,12 +1218,12 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
  */
 static inline void set_start_time_ns(struct request *req)
 {
-	req->start_time_ns = sched_clock();
+	req->start_time_ns = ktime_to_ns(ktime_get());
 }
 
 static inline void set_io_start_time_ns(struct request *req)
 {
-	req->io_start_time_ns = sched_clock();
+	req->io_start_time_ns = ktime_to_ns(ktime_get());
 }
 
 static inline uint64_t rq_start_time_ns(struct request *req)

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists