lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1239557566-3411-2-git-send-email-tytso@mit.edu>
Date:	Sun, 12 Apr 2009 13:32:46 -0400
From:	Theodore Ts'o <tytso@....edu>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	Linux Kernel Developers List <linux-kernel@...r.kernel.org>,
	Theodore Ts'o <tytso@....edu>
Subject: [PATCH 2/2] ext4: Convert instrumentation from markers to tracepoints

Signed-off-by: "Theodore Ts'o" <tytso@....edu>
---
 fs/ext4/ext4.h                    |   22 +--
 fs/ext4/ext4_i.h                  |   12 -
 fs/ext4/fsync.c                   |    8 +-
 fs/ext4/ialloc.c                  |   17 +-
 fs/ext4/inode.c                   |   79 ++---
 fs/ext4/mballoc.c                 |   84 ++---
 fs/ext4/mballoc.h                 |   68 +----
 fs/ext4/super.c                   |    6 +-
 include/linux/ext4_trace_types.h  |  108 ++++++
 include/trace/ext4.h              |    8 +
 include/trace/ext4_event_types.h  |  690 +++++++++++++++++++++++++++++++++++++
 include/trace/trace_event_types.h |    1 +
 include/trace/trace_events.h      |    1 +
 13 files changed, 886 insertions(+), 218 deletions(-)
 create mode 100644 include/linux/ext4_trace_types.h
 create mode 100644 include/trace/ext4.h
 create mode 100644 include/trace/ext4_event_types.h

diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index d0f15ef..791da89 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -21,6 +21,7 @@
 #include <linux/magic.h>
 #include <linux/jbd2.h>
 #include <linux/quota.h>
+#include <linux/ext4_trace_types.h>
 #include "ext4_i.h"
 
 /*
@@ -70,27 +71,6 @@
 #define EXT4_MB_DELALLOC_RESERVED      1024
 
 
-struct ext4_allocation_request {
-	/* target inode for block we're allocating */
-	struct inode *inode;
-	/* logical block in target inode */
-	ext4_lblk_t logical;
-	/* phys. target (a hint) */
-	ext4_fsblk_t goal;
-	/* the closest logical allocated block to the left */
-	ext4_lblk_t lleft;
-	/* phys. block for ^^^ */
-	ext4_fsblk_t pleft;
-	/* the closest logical allocated block to the right */
-	ext4_lblk_t lright;
-	/* phys. block for ^^^ */
-	ext4_fsblk_t pright;
-	/* how many blocks we want to allocate */
-	unsigned int len;
-	/* flags. see above EXT4_MB_HINT_* */
-	unsigned int flags;
-};
-
 /*
  * Special inodes numbers
  */
diff --git a/fs/ext4/ext4_i.h b/fs/ext4/ext4_i.h
index 4ce2187..48bf4bd 100644
--- a/fs/ext4/ext4_i.h
+++ b/fs/ext4/ext4_i.h
@@ -21,18 +21,6 @@
 #include <linux/seqlock.h>
 #include <linux/mutex.h>
 
-/* data type for block offset of block group */
-typedef int ext4_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long long ext4_fsblk_t;
-
-/* data type for file logical block number */
-typedef __u32 ext4_lblk_t;
-
-/* data type for block group number */
-typedef unsigned int ext4_group_t;
-
 /*
  * storage for cached extent
  */
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5afe437..783efcb 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -28,10 +28,12 @@
 #include <linux/writeback.h>
 #include <linux/jbd2.h>
 #include <linux/blkdev.h>
-#include <linux/marker.h>
+#include <trace/ext4.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
 
+DEFINE_TRACE(ext4_sync_file);
+
 /*
  * akpm: A new design for ext4_sync_file().
  *
@@ -52,9 +54,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
 
 	J_ASSERT(ext4_journal_current_handle() == NULL);
 
-	trace_mark(ext4_sync_file, "dev %s datasync %d ino %ld parent %ld",
-		   inode->i_sb->s_id, datasync, inode->i_ino,
-		   dentry->d_parent->d_inode->i_ino);
+	trace_ext4_sync_file(file, dentry, datasync);
 
 	/*
 	 * data=writeback:
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 47b84e8..80288de 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -22,6 +22,7 @@
 #include <linux/random.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <trace/ext4.h>
 #include <asm/byteorder.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
@@ -29,6 +30,10 @@
 #include "acl.h"
 #include "group.h"
 
+DEFINE_TRACE(ext4_free_inode);
+DEFINE_TRACE(ext4_request_inode);
+DEFINE_TRACE(ext4_allocate_inode);
+
 /*
  * ialloc.c contains the inodes allocation and deallocation routines
  */
@@ -209,11 +214,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
 
 	ino = inode->i_ino;
 	ext4_debug("freeing inode %lu\n", ino);
-	trace_mark(ext4_free_inode,
-		   "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
-		   sb->s_id, inode->i_ino, inode->i_mode,
-		   (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
-		   (unsigned long long) inode->i_blocks);
+	trace_ext4_free_inode(inode);
 
 	/*
 	 * Note: we must free any quota before locking the superblock,
@@ -817,8 +818,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
 		return ERR_PTR(-EPERM);
 
 	sb = dir->i_sb;
-	trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
-		   dir->i_ino, mode);
+	trace_ext4_request_inode(dir, mode);
 	inode = new_inode(sb);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
@@ -1050,8 +1050,7 @@ got:
 	}
 
 	ext4_debug("allocating inode %lu\n", inode->i_ino);
-	trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
-		   sb->s_id, inode->i_ino, dir->i_ino, mode);
+	trace_ext4_allocate_inode(inode, dir, mode);
 	goto really_out;
 fail:
 	ext4_std_error(sb, err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c6bd6ce..3866f44 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,11 +37,24 @@
 #include <linux/namei.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include <trace/ext4.h>
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 #include "ext4_extents.h"
 
+DEFINE_TRACE(ext4_write_begin);
+DEFINE_TRACE(ext4_ordered_write_end);
+DEFINE_TRACE(ext4_writeback_write_end);
+DEFINE_TRACE(ext4_journalled_write_end);
+DEFINE_TRACE(ext4_da_writepage);
+DEFINE_TRACE(ext4_da_writepages);
+DEFINE_TRACE(ext4_da_writepages_result);
+DEFINE_TRACE(ext4_da_write_begin);
+DEFINE_TRACE(ext4_da_write_end);
+DEFINE_TRACE(ext4_normal_writepage);
+DEFINE_TRACE(ext4_journalled_writepage);
+
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
@@ -1433,10 +1446,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
  	pgoff_t index;
 	unsigned from, to;
 
-	trace_mark(ext4_write_begin,
-		   "dev %s ino %lu pos %llu len %u flags %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, flags);
+	trace_ext4_write_begin(inode, pos, len, flags);
  	index = pos >> PAGE_CACHE_SHIFT;
 	from = pos & (PAGE_CACHE_SIZE - 1);
 	to = from + len;
@@ -1512,10 +1522,7 @@ static int ext4_ordered_write_end(struct file *file,
 	struct inode *inode = mapping->host;
 	int ret = 0, ret2;
 
-	trace_mark(ext4_ordered_write_end,
-		   "dev %s ino %lu pos %llu len %u copied %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, copied);
+	trace_ext4_ordered_write_end(inode, pos, len, copied);
 	ret = ext4_jbd2_file_inode(handle, inode);
 
 	if (ret == 0) {
@@ -1554,10 +1561,7 @@ static int ext4_writeback_write_end(struct file *file,
 	int ret = 0, ret2;
 	loff_t new_i_size;
 
-	trace_mark(ext4_writeback_write_end,
-		   "dev %s ino %lu pos %llu len %u copied %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, copied);
+	trace_ext4_writeback_write_end(inode, pos, len, copied);
 	new_i_size = pos + copied;
 	if (new_i_size > EXT4_I(inode)->i_disksize) {
 		ext4_update_i_disksize(inode, new_i_size);
@@ -1593,10 +1597,7 @@ static int ext4_journalled_write_end(struct file *file,
 	unsigned from, to;
 	loff_t new_i_size;
 
-	trace_mark(ext4_journalled_write_end,
-		   "dev %s ino %lu pos %llu len %u copied %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, copied);
+	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_CACHE_SIZE - 1);
 	to = from + len;
 
@@ -2372,9 +2373,7 @@ static int ext4_da_writepage(struct page *page,
 	struct buffer_head *page_bufs;
 	struct inode *inode = page->mapping->host;
 
-	trace_mark(ext4_da_writepage,
-		   "dev %s ino %lu page_index %lu",
-		   inode->i_sb->s_id, inode->i_ino, page->index);
+	trace_ext4_da_writepage(inode, page);
 	size = i_size_read(inode);
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
@@ -2486,19 +2485,7 @@ static int ext4_da_writepages(struct address_space *mapping,
 	int needed_blocks, ret = 0, nr_to_writebump = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
-	trace_mark(ext4_da_writepages,
-		   "dev %s ino %lu nr_t_write %ld "
-		   "pages_skipped %ld range_start %llu "
-		   "range_end %llu nonblocking %d "
-		   "for_kupdate %d for_reclaim %d "
-		   "for_writepages %d range_cyclic %d",
-		   inode->i_sb->s_id, inode->i_ino,
-		   wbc->nr_to_write, wbc->pages_skipped,
-		   (unsigned long long) wbc->range_start,
-		   (unsigned long long) wbc->range_end,
-		   wbc->nonblocking, wbc->for_kupdate,
-		   wbc->for_reclaim, wbc->for_writepages,
-		   wbc->range_cyclic);
+	trace_ext4_da_writepages(inode, wbc);
 
 	/*
 	 * No pages to write? This is mainly a kludge to avoid starting
@@ -2664,14 +2651,7 @@ out_writepages:
 	if (!no_nrwrite_index_update)
 		wbc->no_nrwrite_index_update = 0;
 	wbc->nr_to_write -= nr_to_writebump;
-	trace_mark(ext4_da_writepage_result,
-		   "dev %s ino %lu ret %d pages_written %d "
-		   "pages_skipped %ld congestion %d "
-		   "more_io %d no_nrwrite_index_update %d",
-		   inode->i_sb->s_id, inode->i_ino, ret,
-		   pages_written, wbc->pages_skipped,
-		   wbc->encountered_congestion, wbc->more_io,
-		   wbc->no_nrwrite_index_update);
+	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
 	return ret;
 }
 
@@ -2723,11 +2703,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
-
-	trace_mark(ext4_da_write_begin,
-		   "dev %s ino %lu pos %llu len %u flags %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, flags);
+	trace_ext4_da_write_begin(inode, pos, len, flags);
 retry:
 	/*
 	 * With delayed allocation, we don't log the i_disksize update
@@ -2820,10 +2796,7 @@ static int ext4_da_write_end(struct file *file,
 		}
 	}
 
-	trace_mark(ext4_da_write_end,
-		   "dev %s ino %lu pos %llu len %u copied %u",
-		   inode->i_sb->s_id, inode->i_ino,
-		   (unsigned long long) pos, len, copied);
+	trace_ext4_da_write_end(inode, pos, len, copied);
 	start = pos & (PAGE_CACHE_SIZE - 1);
 	end = start + copied - 1;
 
@@ -3076,9 +3049,7 @@ static int ext4_normal_writepage(struct page *page,
 	loff_t size = i_size_read(inode);
 	loff_t len;
 
-	trace_mark(ext4_normal_writepage,
-		   "dev %s ino %lu page_index %lu",
-		   inode->i_sb->s_id, inode->i_ino, page->index);
+	trace_ext4_normal_writepage(inode, page);
 	J_ASSERT(PageLocked(page));
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
@@ -3164,9 +3135,7 @@ static int ext4_journalled_writepage(struct page *page,
 	loff_t size = i_size_read(inode);
 	loff_t len;
 
-	trace_mark(ext4_journalled_writepage,
-		   "dev %s ino %lu page_index %lu",
-		   inode->i_sb->s_id, inode->i_ino, page->index);
+	trace_ext4_journalled_writepage(inode, page);
 	J_ASSERT(PageLocked(page));
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f871677..73a4866 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -342,6 +342,17 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
 
 
 
+DEFINE_TRACE(ext4_discard_blocks);
+DEFINE_TRACE(ext4_mb_new_inode_pa);
+DEFINE_TRACE(ext4_mb_new_group_pa);
+DEFINE_TRACE(ext4_mb_release_inode_pa);
+DEFINE_TRACE(ext4_mb_release_group_pa);
+DEFINE_TRACE(ext4_discard_preallocations);
+DEFINE_TRACE(ext4_mb_discard_preallocations);
+DEFINE_TRACE(ext4_request_blocks);
+DEFINE_TRACE(ext4_allocate_blocks);
+DEFINE_TRACE(ext4_free_blocks);
+
 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 {
 #if BITS_PER_LONG == 64
@@ -2882,9 +2893,8 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
 		discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
 			+ entry->start_blk
 			+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
-		trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u",
-			   sb->s_id, (unsigned long long) discard_block,
-			   entry->count);
+		trace_ext4_discard_blocks(sb, (unsigned long long)discard_block,
+					  entry->count);
 		sb_issue_discard(sb, discard_block, entry->count);
 
 		kmem_cache_free(ext4_free_ext_cachep, entry);
@@ -3658,10 +3668,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
 
 	mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-	trace_mark(ext4_mb_new_inode_pa,
-		   "dev %s ino %lu pstart %llu len %u lstart %u",
-		   sb->s_id, ac->ac_inode->i_ino,
-		   pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+	trace_ext4_mb_new_inode_pa(ac, pa);
 
 	ext4_mb_use_inode_pa(ac, pa);
 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3720,9 +3727,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
 	pa->pa_type = MB_GROUP_PA;
 
 	mb_debug("new group pa %p: %llu/%u for %u\n", pa,
-		 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
-	trace_mark(ext4_mb_new_group_pa, "dev %s pstart %llu len %u lstart %u",
-		   sb->s_id, pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+	trace_ext4_mb_new_group_pa(ac, pa);
 
 	ext4_mb_use_group_pa(ac, pa);
 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3812,10 +3818,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
 			ext4_mb_store_history(ac);
 		}
 
-		trace_mark(ext4_mb_release_inode_pa,
-			   "dev %s ino %lu block %llu count %u",
-			   sb->s_id, pa->pa_inode->i_ino, grp_blk_start + bit,
-			   next - bit);
+		trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
+					       next - bit);
 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
 		bit = next + 1;
 	}
@@ -3849,8 +3853,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
 	if (ac)
 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
 
-	trace_mark(ext4_mb_release_group_pa, "dev %s pstart %llu len %d",
-		   sb->s_id, pa->pa_pstart, pa->pa_len);
+	trace_ext4_mb_release_group_pa(ac, pa);
 	BUG_ON(pa->pa_deleted == 0);
 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -3918,6 +3921,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
 
 	INIT_LIST_HEAD(&list);
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
+	if (ac)
+		ac->ac_sb = sb;
 repeat:
 	ext4_lock_group(sb, group);
 	list_for_each_entry_safe(pa, tmp,
@@ -4016,12 +4021,15 @@ void ext4_discard_preallocations(struct inode *inode)
 	}
 
 	mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
-	trace_mark(ext4_discard_preallocations, "dev %s ino %lu", sb->s_id,
-		   inode->i_ino);
+	trace_ext4_discard_preallocations(inode);
 
 	INIT_LIST_HEAD(&list);
 
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
+	if (ac) {
+		ac->ac_sb = sb;
+		ac->ac_inode = inode;
+	}
 repeat:
 	/* first, collect all pa's in the inode */
 	spin_lock(&ei->i_prealloc_lock);
@@ -4304,6 +4312,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
 
 	INIT_LIST_HEAD(&discard_list);
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
+	if (ac)
+		ac->ac_sb = sb;
 
 	spin_lock(&lg->lg_prealloc_lock);
 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
@@ -4473,8 +4483,7 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
 	int ret;
 	int freed = 0;
 
-	trace_mark(ext4_mb_discard_preallocations, "dev %s needed %d",
-		   sb->s_id, needed);
+	trace_ext4_mb_discard_preallocations(sb, needed);
 	for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
 		freed += ret;
@@ -4503,17 +4512,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
 	sb = ar->inode->i_sb;
 	sbi = EXT4_SB(sb);
 
-	trace_mark(ext4_request_blocks, "dev %s flags %u len %u ino %lu "
-		   "lblk %llu goal %llu lleft %llu lright %llu "
-		   "pleft %llu pright %llu ",
-		   sb->s_id, ar->flags, ar->len,
-		   ar->inode ? ar->inode->i_ino : 0,
-		   (unsigned long long) ar->logical,
-		   (unsigned long long) ar->goal,
-		   (unsigned long long) ar->lleft,
-		   (unsigned long long) ar->lright,
-		   (unsigned long long) ar->pleft,
-		   (unsigned long long) ar->pright);
+	trace_ext4_request_blocks(ar);
 
 	/*
 	 * For delayed allocation, we could skip the ENOSPC and
@@ -4549,7 +4548,10 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
 	}
 
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-	if (!ac) {
+	if (ac) {
+		ac->ac_sb = sb;
+		ac->ac_inode = ar->inode;
+	} else {
 		ar->len = 0;
 		*errp = -ENOMEM;
 		goto out1;
@@ -4622,18 +4624,7 @@ out3:
 						reserv_blks);
 	}
 
-	trace_mark(ext4_allocate_blocks,
-		   "dev %s block %llu flags %u len %u ino %lu "
-		   "logical %llu goal %llu lleft %llu lright %llu "
-		   "pleft %llu pright %llu ",
-		   sb->s_id, (unsigned long long) block,
-		   ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0,
-		   (unsigned long long) ar->logical,
-		   (unsigned long long) ar->goal,
-		   (unsigned long long) ar->lleft,
-		   (unsigned long long) ar->lright,
-		   (unsigned long long) ar->pleft,
-		   (unsigned long long) ar->pright);
+	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
 
 	return block;
 }
@@ -4768,10 +4759,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
 	}
 
 	ext4_debug("freeing block %lu\n", block);
-	trace_mark(ext4_free_blocks,
-		   "dev %s block %llu count %lu metadata %d ino %lu",
-		   sb->s_id, (unsigned long long) block, count, metadata,
-		   inode ? inode->i_ino : 0);
+	trace_ext4_free_blocks(inode, block, count, metadata);
 
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 	if (ac) {
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index dd9e6cd..61829a2 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -19,8 +19,8 @@
 #include <linux/seq_file.h>
 #include <linux/version.h>
 #include <linux/blkdev.h>
-#include <linux/marker.h>
 #include <linux/mutex.h>
+#include <trace/ext4.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "group.h"
@@ -118,37 +118,11 @@ struct ext4_free_data {
 	tid_t	t_tid;
 };
 
-struct ext4_prealloc_space {
-	struct list_head	pa_inode_list;
-	struct list_head	pa_group_list;
-	union {
-		struct list_head pa_tmp_list;
-		struct rcu_head	pa_rcu;
-	} u;
-	spinlock_t		pa_lock;
-	atomic_t		pa_count;
-	unsigned		pa_deleted;
-	ext4_fsblk_t		pa_pstart;	/* phys. block */
-	ext4_lblk_t		pa_lstart;	/* log. block */
-	unsigned short		pa_len;		/* len of preallocated chunk */
-	unsigned short		pa_free;	/* how many blocks are free */
-	unsigned short		pa_type;	/* pa type. inode or group */
-	spinlock_t		*pa_obj_lock;
-	struct inode		*pa_inode;	/* hack, for history only */
-};
-
 enum {
 	MB_INODE_PA = 0,
 	MB_GROUP_PA = 1
 };
 
-struct ext4_free_extent {
-	ext4_lblk_t fe_logical;
-	ext4_grpblk_t fe_start;
-	ext4_group_t fe_group;
-	int fe_len;
-};
-
 /*
  * Locality group:
  *   we try to group all related changes together
@@ -167,46 +141,6 @@ struct ext4_locality_group {
 	spinlock_t		lg_prealloc_lock;
 };
 
-struct ext4_allocation_context {
-	struct inode *ac_inode;
-	struct super_block *ac_sb;
-
-	/* original request */
-	struct ext4_free_extent ac_o_ex;
-
-	/* goal request (after normalization) */
-	struct ext4_free_extent ac_g_ex;
-
-	/* the best found extent */
-	struct ext4_free_extent ac_b_ex;
-
-	/* copy of the bext found extent taken before preallocation efforts */
-	struct ext4_free_extent ac_f_ex;
-
-	/* number of iterations done. we have to track to limit searching */
-	unsigned long ac_ex_scanned;
-	__u16 ac_groups_scanned;
-	__u16 ac_found;
-	__u16 ac_tail;
-	__u16 ac_buddy;
-	__u16 ac_flags;		/* allocation hints */
-	__u8 ac_status;
-	__u8 ac_criteria;
-	__u8 ac_repeats;
-	__u8 ac_2order;		/* if request is to allocate 2^N blocks and
-				 * N > 0, the field stores N, otherwise 0 */
-	__u8 ac_op;		/* operation, for history only */
-	struct page *ac_bitmap_page;
-	struct page *ac_buddy_page;
-	/*
-	 * pointer to the held semaphore upon successful
-	 * block allocation
-	 */
-	struct rw_semaphore *alloc_semp;
-	struct ext4_prealloc_space *ac_pa;
-	struct ext4_locality_group *ac_lg;
-};
-
 #define AC_STATUS_CONTINUE	1
 #define AC_STATUS_FOUND		2
 #define AC_STATUS_BREAK		3
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2958f4e..9b77dd1 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -36,9 +36,9 @@
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/ctype.h>
-#include <linux/marker.h>
 #include <linux/log2.h>
 #include <linux/crc16.h>
+#include <trace/ext4.h>
 #include <asm/uaccess.h>
 
 #include "ext4.h"
@@ -48,6 +48,8 @@
 #include "namei.h"
 #include "group.h"
 
+DEFINE_TRACE(ext4_sync_fs);
+
 struct proc_dir_entry *ext4_proc_root;
 static struct kset *ext4_kset;
 
@@ -3287,7 +3289,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 	int ret = 0;
 	tid_t target;
 
-	trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
+	trace_ext4_sync_fs(sb, wait);
 	sb->s_dirt = 0;
 	if (EXT4_SB(sb)->s_journal) {
 		if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal,
diff --git a/include/linux/ext4_trace_types.h b/include/linux/ext4_trace_types.h
new file mode 100644
index 0000000..229a2dd
--- /dev/null
+++ b/include/linux/ext4_trace_types.h
@@ -0,0 +1,108 @@
+/*
+ * ext4_trace_types.h -- these data structures are needed by the
+ *	tracing infrastructure
+ */
+
+#ifndef _EXT4_TRACE_TYPES_H
+#define _EXT4_TRACE_TYPES_H
+
+/* data type for block offset of block group */
+typedef int ext4_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long long ext4_fsblk_t;
+
+/* data type for file logical block number */
+typedef __u32 ext4_lblk_t;
+
+/* data type for block group number */
+typedef unsigned int ext4_group_t;
+
+struct ext4_allocation_request {
+	/* target inode for block we're allocating */
+	struct inode *inode;
+	/* logical block in target inode */
+	ext4_lblk_t logical;
+	/* phys. target (a hint) */
+	ext4_fsblk_t goal;
+	/* the closest logical allocated block to the left */
+	ext4_lblk_t lleft;
+	/* phys. block for ^^^ */
+	ext4_fsblk_t pleft;
+	/* the closest logical allocated block to the right */
+	ext4_lblk_t lright;
+	/* phys. block for ^^^ */
+	ext4_fsblk_t pright;
+	/* how many blocks we want to allocate */
+	unsigned int len;
+	/* flags. see above EXT4_MB_HINT_* */
+	unsigned int flags;
+};
+
+struct ext4_free_extent {
+	ext4_lblk_t fe_logical;
+	ext4_grpblk_t fe_start;
+	ext4_group_t fe_group;
+	int fe_len;
+};
+
+struct ext4_allocation_context {
+	struct inode *ac_inode;
+	struct super_block *ac_sb;
+
+	/* original request */
+	struct ext4_free_extent ac_o_ex;
+
+	/* goal request (after normalization) */
+	struct ext4_free_extent ac_g_ex;
+
+	/* the best found extent */
+	struct ext4_free_extent ac_b_ex;
+
+	/* copy of the bext found extent taken before preallocation efforts */
+	struct ext4_free_extent ac_f_ex;
+
+	/* number of iterations done. we have to track to limit searching */
+	unsigned long ac_ex_scanned;
+	__u16 ac_groups_scanned;
+	__u16 ac_found;
+	__u16 ac_tail;
+	__u16 ac_buddy;
+	__u16 ac_flags;		/* allocation hints */
+	__u8 ac_status;
+	__u8 ac_criteria;
+	__u8 ac_repeats;
+	__u8 ac_2order;		/* if request is to allocate 2^N blocks and
+				 * N > 0, the field stores N, otherwise 0 */
+	__u8 ac_op;		/* operation, for history only */
+	struct page *ac_bitmap_page;
+	struct page *ac_buddy_page;
+	/*
+	 * pointer to the held semaphore upon successful
+	 * block allocation
+	 */
+	struct rw_semaphore *alloc_semp;
+	struct ext4_prealloc_space *ac_pa;
+	struct ext4_locality_group *ac_lg;
+};
+
+struct ext4_prealloc_space {
+	struct list_head	pa_inode_list;
+	struct list_head	pa_group_list;
+	union {
+		struct list_head pa_tmp_list;
+		struct rcu_head	pa_rcu;
+	} u;
+	spinlock_t		pa_lock;
+	atomic_t		pa_count;
+	unsigned		pa_deleted;
+	ext4_fsblk_t		pa_pstart;	/* phys. block */
+	ext4_lblk_t		pa_lstart;	/* log. block */
+	unsigned short		pa_len;		/* len of preallocated chunk */
+	unsigned short		pa_free;	/* how many blocks are free */
+	unsigned short		pa_type;	/* pa type. inode or group */
+	spinlock_t		*pa_obj_lock;
+	struct inode		*pa_inode;	/* hack, for history only */
+};
+
+#endif /* _EXT4_TRACE_TYPES_H */
diff --git a/include/trace/ext4.h b/include/trace/ext4.h
new file mode 100644
index 0000000..e8d647a
--- /dev/null
+++ b/include/trace/ext4.h
@@ -0,0 +1,8 @@
+#ifndef _TRACE_EXT4_H
+#define _TRACE_EXT4_H
+
+#include <linux/tracepoint.h>
+
+#include <trace/ext4_event_types.h>
+
+#endif
diff --git a/include/trace/ext4_event_types.h b/include/trace/ext4_event_types.h
new file mode 100644
index 0000000..da9e5f4
--- /dev/null
+++ b/include/trace/ext4_event_types.h
@@ -0,0 +1,690 @@
+
+/* use <trace/ext4.h> instead */
+#ifndef TRACE_EVENT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext4
+
+#include <linux/writeback.h>
+#include <linux/ext4_trace_types.h>
+
+TRACE_EVENT(ext4_free_inode,
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	umode_t, mode			)
+		__field(	uid_t,	uid			)
+		__field(	gid_t,	gid			)
+		__field(	blkcnt_t, blocks		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->mode	= inode->i_mode;
+		__entry->uid	= inode->i_uid;
+		__entry->gid	= inode->i_gid;
+		__entry->blocks	= inode->i_blocks;
+	),
+
+	TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
+		  __entry->devname, __entry->ino, __entry->mode,
+		  __entry->uid, __entry->gid, __entry->blocks)
+);
+
+TRACE_EVENT(ext4_request_inode,
+	TP_PROTO(struct inode *dir, int mode),
+
+	TP_ARGS(dir, mode),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	dir			)
+		__field(	umode_t, mode			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, dir->i_sb->s_id, 32);
+		__entry->dir	= dir->i_ino;
+		__entry->mode	= mode;
+	),
+
+	TP_printk("dev %s dir %lu mode %d",
+		  __entry->devname, __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_allocate_inode,
+	TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+	TP_ARGS(inode, dir, mode),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	ino_t,	dir			)
+		__field(	umode_t, mode			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, dir->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->dir	= dir->i_ino;
+		__entry->mode	= mode;
+	),
+
+	TP_printk("dev %s ino %lu dir %lu mode %d",
+		  __entry->devname, __entry->ino, __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_write_begin,
+
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+		 unsigned int flags),
+
+	TP_ARGS(inode, pos, len, flags),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, flags		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->flags	= flags;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u flags %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->flags)
+);
+
+TRACE_EVENT(ext4_ordered_write_end,
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+			unsigned int copied),
+
+	TP_ARGS(inode, pos, len, copied),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, copied		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->copied	= copied;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->copied)
+);
+
+TRACE_EVENT(ext4_writeback_write_end,
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+		 unsigned int copied),
+
+	TP_ARGS(inode, pos, len, copied),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, copied		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->copied	= copied;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->copied)
+);
+
+TRACE_EVENT(ext4_journalled_write_end,
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+		 unsigned int copied),
+	TP_ARGS(inode, pos, len, copied),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, copied		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->copied	= copied;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->copied)
+);
+
+TRACE_EVENT(ext4_da_writepage,
+	TP_PROTO(struct inode *inode, struct page *page),
+
+	TP_ARGS(inode, page),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	pgoff_t, index			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->index	= page->index;
+	),
+
+	TP_printk("dev %s ino %lu page_index %lu",
+		  __entry->devname, __entry->ino, __entry->index)
+);
+
+TRACE_EVENT(ext4_da_writepages,
+	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+	TP_ARGS(inode, wbc),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	long,	nr_to_write		)
+		__field(	long,	pages_skipped		)
+		__field(	loff_t,	range_start		)
+		__field(	loff_t,	range_end		)
+		__field(	char,	nonblocking		)
+		__field(	char,	for_kupdate		)
+		__field(	char,	for_reclaim		)
+		__field(	char,	for_writepages		)
+		__field(	char,	range_cyclic		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->nr_to_write	= wbc->nr_to_write;
+		__entry->pages_skipped	= wbc->pages_skipped;
+		__entry->range_start	= wbc->range_start;
+		__entry->range_end	= wbc->range_end;
+		__entry->nonblocking	= wbc->nonblocking;
+		__entry->for_kupdate	= wbc->for_kupdate;
+		__entry->for_reclaim	= wbc->for_reclaim;
+		__entry->for_writepages	= wbc->for_writepages;
+		__entry->range_cyclic	= wbc->range_cyclic;
+	),
+
+	TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d",
+		  __entry->devname, __entry->ino, __entry->nr_to_write,
+		  __entry->pages_skipped, __entry->range_start,
+		  __entry->range_end, __entry->nonblocking,
+		  __entry->for_kupdate, __entry->for_reclaim,
+		  __entry->for_writepages, __entry->range_cyclic)
+);
+
+TRACE_EVENT(ext4_da_writepages_result,
+	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
+			int ret, int pages_written),
+
+	TP_ARGS(inode, wbc, ret, pages_written),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	int,	ret			)
+		__field(	int,	pages_written		)
+		__field(	long,	pages_skipped		)
+		__field(	char,	encountered_congestion	)
+		__field(	char,	more_io			)	
+		__field(	char,	no_nrwrite_index_update )
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino		= inode->i_ino;
+		__entry->ret		= ret;
+		__entry->pages_written	= pages_written;
+		__entry->pages_skipped	= wbc->pages_skipped;
+		__entry->encountered_congestion	= wbc->encountered_congestion;
+		__entry->more_io	= wbc->more_io;
+		__entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
+	),
+
+	TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
+		  __entry->devname, __entry->ino, __entry->ret,
+		  __entry->pages_written, __entry->pages_skipped,
+		  __entry->encountered_congestion, __entry->more_io,
+		  __entry->no_nrwrite_index_update)
+);
+
+TRACE_EVENT(ext4_da_write_begin,
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+			unsigned int flags),
+
+	TP_ARGS(inode, pos, len, flags),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, flags		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->flags	= flags;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u flags %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->flags)
+);
+
+TRACE_EVENT(ext4_da_write_end,
+	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+			unsigned int copied),
+
+	TP_ARGS(inode, pos, len, copied),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	pos			)
+		__field(	unsigned int, len		)
+		__field(	unsigned int, copied		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->pos	= pos;
+		__entry->len	= len;
+		__entry->copied	= copied;
+	),
+
+	TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+		  __entry->devname, __entry->ino, __entry->pos, __entry->len,
+		  __entry->copied)
+);
+
+TRACE_EVENT(ext4_normal_writepage,
+	TP_PROTO(struct inode *inode, struct page *page),
+
+	TP_ARGS(inode, page),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	pgoff_t, index			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->index	= page->index;
+	),
+
+	TP_printk("dev %s ino %lu page_index %lu",
+		  __entry->devname, __entry->ino, __entry->index)
+);
+
+TRACE_EVENT(ext4_journalled_writepage,
+	TP_PROTO(struct inode *inode, struct page *page),
+
+	TP_ARGS(inode, page),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	pgoff_t, index			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+		__entry->index	= page->index;
+	),
+
+	TP_printk("dev %s ino %lu page_index %lu",
+		  __entry->devname, __entry->ino, __entry->index)
+);
+
+TRACE_EVENT(ext4_discard_blocks,
+	TP_PROTO(struct super_block *sb, unsigned long long blk,
+			unsigned long long count),
+
+	TP_ARGS(sb, blk, count),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	__u64,	blk			)
+		__field(	__u64,	count			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, sb->s_id, 32);
+		__entry->blk	= blk;
+		__entry->count	= count;
+	),
+
+	TP_printk("dev %s blk %llu count %llu",
+		  __entry->devname, __entry->blk, __entry->count)
+);
+
+TRACE_EVENT(ext4_mb_new_inode_pa,
+	TP_PROTO(struct ext4_allocation_context *ac,
+		 struct ext4_prealloc_space *pa),
+
+	TP_ARGS(ac, pa),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	pa_pstart		)
+		__field(	__u32,	pa_len			)
+		__field(	__u64,	pa_lstart		)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ac->ac_sb->s_id, 32);
+		__entry->ino	= ac->ac_inode->i_ino;
+		__entry->pa_pstart = pa->pa_pstart;
+		__entry->pa_len	= pa->pa_len;
+		__entry->pa_lstart = pa->pa_lstart;
+	),
+
+	TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
+		  __entry->devname, __entry->ino, __entry->pa_pstart,
+		  __entry->pa_len, __entry->pa_lstart)
+);
+
+TRACE_EVENT(ext4_mb_new_group_pa,
+	TP_PROTO(struct ext4_allocation_context *ac,
+		 struct ext4_prealloc_space *pa),
+
+	TP_ARGS(ac, pa),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	pa_pstart		)
+		__field(	__u32,	pa_len			)
+		__field(	__u64,	pa_lstart		)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ac->ac_sb->s_id, 32);
+		__entry->ino	= ac->ac_inode->i_ino;
+		__entry->pa_pstart = pa->pa_pstart;
+		__entry->pa_len	= pa->pa_len;
+		__entry->pa_lstart = pa->pa_lstart;
+	),
+
+	TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
+		  __entry->devname, __entry->ino, __entry->pa_pstart,
+		  __entry->pa_len, __entry->pa_lstart)
+);
+
+TRACE_EVENT(ext4_mb_release_inode_pa,
+	TP_PROTO(struct ext4_allocation_context *ac,
+		 struct ext4_prealloc_space *pa,
+		 unsigned long long block, unsigned int count),
+
+	TP_ARGS(ac, pa, block, count),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	block			)
+		__field(	__u32,	count			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ac->ac_sb->s_id, 32);
+		__entry->ino	= ac->ac_inode->i_ino;
+		__entry->block = block;
+		__entry->count	= count;
+	),
+
+	TP_printk("dev %s ino %lu block %llu count %u",
+		  __entry->devname, __entry->ino, __entry->block,
+		  __entry->count)
+);
+
+TRACE_EVENT(ext4_mb_release_group_pa,
+	TP_PROTO(struct ext4_allocation_context *ac,
+		 struct ext4_prealloc_space *pa),
+
+	TP_ARGS(ac, pa),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	pa_pstart		)
+		__field(	__u32,	pa_len			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ac->ac_sb->s_id, 32);
+		__entry->ino	= ac->ac_inode->i_ino;
+		__entry->pa_pstart = pa->pa_pstart;
+		__entry->pa_len	= pa->pa_len;
+	),
+
+	TP_printk("dev %s pstart %llu len %u",
+		  __entry->devname, __entry->pa_pstart, __entry->pa_len)
+);
+
+TRACE_EVENT(ext4_discard_preallocations,
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino	= inode->i_ino;
+	),
+
+	TP_printk("dev %s ino %lu",
+		  __entry->devname, __entry->ino)
+);
+
+TRACE_EVENT(ext4_mb_discard_preallocations,
+	TP_PROTO(struct super_block *sb, int needed),
+
+	TP_ARGS(sb, needed),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	int,	needed			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, sb->s_id, 32);
+		__entry->needed	= needed;
+	),
+
+	TP_printk("dev %s needed %d",
+		  __entry->devname, __entry->needed)
+);
+
+TRACE_EVENT(ext4_request_blocks,
+	TP_PROTO(struct ext4_allocation_request *ar),
+
+	TP_ARGS(ar),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	struct ext4_allocation_request, ar )
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ar->inode->i_sb->s_id, 32);
+		__entry->ino = ar->inode->i_ino;
+		__entry->ar = *ar;
+	),
+
+	TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+		  __entry->devname, __entry->ino, __entry->ar.flags,
+		  __entry->ar.len,
+		  (unsigned long long) __entry->ar.logical,
+		  (unsigned long long) __entry->ar.goal,
+		  (unsigned long long) __entry->ar.lleft,
+		  (unsigned long long) __entry->ar.lright,
+		  (unsigned long long) __entry->ar.pleft,
+		  (unsigned long long) __entry->ar.pright)
+);
+
+TRACE_EVENT(ext4_allocate_blocks,
+	TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
+
+	TP_ARGS(ar, block),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	block			)
+		__field(	struct ext4_allocation_request, ar )
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, ar->inode->i_sb->s_id, 32);
+		__entry->ino = ar->inode->i_ino;
+		__entry->block = block;
+		__entry->ar = *ar;
+	),
+
+	TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+		  __entry->devname, __entry->ino, __entry->ar.flags,
+		  __entry->ar.len, __entry->block,
+		  (unsigned long long) __entry->ar.logical,
+		  (unsigned long long) __entry->ar.goal,
+		  (unsigned long long) __entry->ar.lleft,
+		  (unsigned long long) __entry->ar.lright,
+		  (unsigned long long) __entry->ar.pleft,
+		  (unsigned long long) __entry->ar.pright)
+);
+
+TRACE_EVENT(ext4_free_blocks,
+	TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
+			int metadata),
+
+	TP_ARGS(inode, block, count, metadata),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	block			)
+		__field(	unsigned long,	count		)
+		__field(	int,	metadata		)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, inode->i_sb->s_id, 32);
+		__entry->ino = inode->i_ino;
+		__entry->block	= block;
+		__entry->count	= count;
+		__entry->metadata	= metadata;
+	),
+
+	TP_printk("dev %s ino %lu block %llu count %lu metadata %d",
+		  __entry->devname, __entry->ino, __entry->block,
+		  __entry->count, __entry->metadata)
+);
+
+TRACE_EVENT(ext4_sync_file,
+	TP_PROTO(struct file *file, struct dentry *dentry, int datasync),
+
+	TP_ARGS(file, dentry, datasync),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	ino_t,	ino			)
+		__field(	ino_t,	parent			)
+		__field(	int,	datasync		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, dentry->d_inode->i_sb->s_id, 32);
+		__entry->ino	= dentry->d_inode->i_ino;
+		__entry->datasync = datasync;
+		__entry->parent	= dentry->d_parent->d_inode->i_ino;
+	),
+
+	TP_printk("dev %s ino %ld parent %ld datasync %d ",
+		  __entry->devname, __entry->ino, __entry->parent,
+		  __entry->datasync)
+);
+
+TRACE_EVENT(ext4_sync_fs,
+	TP_PROTO(struct super_block *sb, int wait),
+
+	TP_ARGS(sb, wait),
+
+	TP_STRUCT__entry(
+		__array(	char,	devname, 32		)
+		__field(	int,	wait			)
+
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->devname, sb->s_id, 32);
+		__entry->wait	= wait;
+	),
+
+	TP_printk("dev %s wait %d", __entry->devname, __entry->wait)
+);
+
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
index bd26f4d..26d7aee 100644
--- a/include/trace/trace_event_types.h
+++ b/include/trace/trace_event_types.h
@@ -4,3 +4,4 @@
 #include <trace/irq_event_types.h>
 #include <trace/lockdep_event_types.h>
 #include <trace/jbd2_event_types.h>
+#include <trace/ext4_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 2a9b970..ad8f6eb 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -4,3 +4,4 @@
 #include <trace/irq.h>
 #include <trace/lockdep.h>
 #include <trace/jbd2.h>
+#include <trace/ext4.h>
-- 
1.5.6.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ