Conver the uses of PAGE_CACHE_xxx to use page_cache_xxx instead. Signed-off-by: Christoph Lameter --- mm/filemap.c | 56 ++++++++++++++++++++++++++++---------------------------- 1 files changed, 28 insertions(+), 28 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 90b657b..1f3d136 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -303,8 +303,8 @@ int wait_on_page_writeback_range(struct address_space *mapping, int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { - pgoff_t start = pos >> PAGE_CACHE_SHIFT; - pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t start = page_cache_index(mapping, pos); + pgoff_t end = page_cache_index(mapping, pos + count - 1); int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) @@ -335,8 +335,8 @@ EXPORT_SYMBOL(sync_page_range); int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { - pgoff_t start = pos >> PAGE_CACHE_SHIFT; - pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t start = page_cache_index(mapping, pos); + pgoff_t end = page_cache_index(mapping, pos + count - 1); int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) @@ -365,7 +365,7 @@ int filemap_fdatawait(struct address_space *mapping) return 0; return wait_on_page_writeback_range(mapping, 0, - (i_size - 1) >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, i_size - 1)); } EXPORT_SYMBOL(filemap_fdatawait); @@ -413,8 +413,8 @@ int filemap_write_and_wait_range(struct address_space *mapping, /* See comment of filemap_write_and_wait() */ if (err != -EIO) { int err2 = wait_on_page_writeback_range(mapping, - lstart >> PAGE_CACHE_SHIFT, - lend >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, lstart), + page_cache_index(mapping, lend)); if (!err) err = err2; } @@ -877,12 +877,12 @@ void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state ra = *_ra; cached_page = NULL; - index = *ppos >> PAGE_CACHE_SHIFT; + index = page_cache_index(mapping, *ppos); next_index = index; prev_index = ra.prev_index; prev_offset = ra.prev_offset; - last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; - offset = *ppos & ~PAGE_CACHE_MASK; + last_index = page_cache_next(mapping, *ppos + desc->count); + offset = page_cache_offset(mapping, *ppos); for (;;) { struct page *page; @@ -919,16 +919,16 @@ page_ok: */ isize = i_size_read(inode); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = page_cache_index(mapping, isize - 1); if (unlikely(!isize || index > end_index)) { page_cache_release(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ - nr = PAGE_CACHE_SIZE; + nr = page_cache_size(mapping); if (index == end_index) { - nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + nr = page_cache_offset(mapping, isize - 1) + 1; if (nr <= offset) { page_cache_release(page); goto out; @@ -963,8 +963,8 @@ page_ok: */ ret = actor(desc, page, offset, nr); offset += ret; - index += offset >> PAGE_CACHE_SHIFT; - offset &= ~PAGE_CACHE_MASK; + index += page_cache_index(mapping, offset); + offset = page_cache_offset(mapping, offset); prev_offset = offset; ra.prev_offset = offset; @@ -1058,7 +1058,7 @@ out: *_ra = ra; _ra->prev_index = prev_index; - *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + *ppos = page_cache_pos(mapping, index, offset); if (cached_page) page_cache_release(cached_page); if (filp) @@ -1240,8 +1240,8 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) if (file) { if (file->f_mode & FMODE_READ) { struct address_space *mapping = file->f_mapping; - unsigned long start = offset >> PAGE_CACHE_SHIFT; - unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; + unsigned long start = page_cache_index(mapping, offset); + unsigned long end = page_cache_index(mapping, offset + count - 1); unsigned long len = end - start + 1; ret = do_readahead(mapping, file, start, len); } @@ -1310,7 +1310,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int did_readaround = 0; int ret = 0; - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size = page_cache_next(mapping, i_size_read(inode)); if (vmf->pgoff >= size) goto outside_data_content; @@ -1385,7 +1385,7 @@ retry_find: goto page_not_uptodate; /* Must recheck i_size under page lock */ - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size = page_cache_next(mapping, i_size_read(inode)); if (unlikely(vmf->pgoff >= size)) { unlock_page(page); goto outside_data_content; @@ -1869,9 +1869,9 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long offset; size_t copied; - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - index = pos >> PAGE_CACHE_SHIFT; - bytes = PAGE_CACHE_SIZE - offset; + offset = page_cache_offset(mapping, pos); /* Within page */ + index = page_cache_index(mapping, pos); + bytes = page_cache_size(mapping) - offset; /* Limit the size of the copy to the caller's write size */ bytes = min(bytes, count); @@ -2082,8 +2082,8 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, - pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, pos), + page_cache_index(mapping, endbyte)); } else { /* * We don't know how much we wrote, so just return @@ -2170,7 +2170,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, */ if (rw == WRITE) { write_len = iov_length(iov, nr_segs); - end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT; + end = page_cache_index(mapping, offset + write_len - 1); if (mapping_mapped(mapping)) unmap_mapping_range(mapping, offset, write_len, 0); } @@ -2187,7 +2187,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, */ if (rw == WRITE && mapping->nrpages) { retval = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); + page_cache_index(mapping, offset), end); if (retval) goto out; } @@ -2205,7 +2205,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, */ if (rw == WRITE && mapping->nrpages) { int err = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); + page_cache_index(mapping, offset), end); if (err && retval >= 0) retval = err; } -- 1.5.2.5 -- - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/