[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CY4PR2101MB0722AE09A8742721EFDFE5F9CE4A0@CY4PR2101MB0722.namprd21.prod.outlook.com>
Date: Mon, 25 Jun 2018 20:27:55 +0000
From: Long Li <longli@...rosoft.com>
To: Tom Talpey <tom@...pey.com>, Steve French <sfrench@...ba.org>,
"linux-cifs@...r.kernel.org" <linux-cifs@...r.kernel.org>,
"samba-technical@...ts.samba.org" <samba-technical@...ts.samba.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>
Subject: RE: [Patch v2 03/15] CIFS: Use offset when reading pages
> From: Tom Talpey <tom@...pey.com>
> Sent: Saturday, June 23, 2018 6:59 PM
> To: Long Li <longli@...rosoft.com>; Steve French <sfrench@...ba.org>;
> linux-cifs@...r.kernel.org; samba-technical@...ts.samba.org; linux-
> kernel@...r.kernel.org; linux-rdma@...r.kernel.org
> Subject: Re: [Patch v2 03/15] CIFS: Use offset when reading pages
>
> On 5/30/2018 3:47 PM, Long Li wrote:
> > From: Long Li <longli@...rosoft.com>
> >
> > With offset defined in rdata, transport functions need to look at this
> > offset when reading data into the correct places in pages.
> >
> > Signed-off-by: Long Li <longli@...rosoft.com>
> > ---
> > fs/cifs/cifsproto.h | 4 +++-
> > fs/cifs/connect.c | 5 +++--
> > fs/cifs/file.c | 52 +++++++++++++++++++++++++++++++++++++--------
> -------
> > fs/cifs/smb2ops.c | 2 +-
> > fs/cifs/smb2pdu.c | 1 +
> > 5 files changed, 45 insertions(+), 19 deletions(-)
> >
> > diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index
> > dc80f84..1f27d8e 100644
> > --- a/fs/cifs/cifsproto.h
> > +++ b/fs/cifs/cifsproto.h
> > @@ -203,7 +203,9 @@ extern void dequeue_mid(struct mid_q_entry *mid,
> bool malformed);
> > extern int cifs_read_from_socket(struct TCP_Server_Info *server, char
> *buf,
> > unsigned int to_read);
> > extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
> > - struct page *page, unsigned int to_read);
> > + struct page *page,
> > + unsigned int page_offset,
> > + unsigned int to_read);
> > extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
> > struct cifs_sb_info *cifs_sb);
> > extern int cifs_match_super(struct super_block *, void *); diff
> > --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 83b0234..8501da0
> > 100644
> > --- a/fs/cifs/connect.c
> > +++ b/fs/cifs/connect.c
> > @@ -594,10 +594,11 @@ cifs_read_from_socket(struct TCP_Server_Info
> > *server, char *buf,
> >
> > int
> > cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page
> *page,
> > - unsigned int to_read)
> > + unsigned int page_offset, unsigned int to_read)
> > {
> > struct msghdr smb_msg;
> > - struct bio_vec bv = {.bv_page = page, .bv_len = to_read};
> > + struct bio_vec bv = {
> > + .bv_page = page, .bv_len = to_read, .bv_offset =
> page_offset};
> > iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1,
> to_read);
> > return cifs_readv_from_socket(server, &smb_msg);
> > }
> > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1c98293..87eece6
> > 100644
> > --- a/fs/cifs/file.c
> > +++ b/fs/cifs/file.c
> > @@ -3026,12 +3026,20 @@ uncached_fill_pages(struct TCP_Server_Info
> *server,
> > int result = 0;
> > unsigned int i;
> > unsigned int nr_pages = rdata->nr_pages;
> > + unsigned int page_offset = rdata->page_offset;
> >
> > rdata->got_bytes = 0;
> > rdata->tailsz = PAGE_SIZE;
> > for (i = 0; i < nr_pages; i++) {
> > struct page *page = rdata->pages[i];
> > size_t n;
> > + unsigned int segment_size = rdata->pagesz;
> > +
> > + if (i == 0)
> > + segment_size -= page_offset;
> > + else
> > + page_offset = 0;
> > +
> >
> > if (len <= 0) {
> > /* no need to hold page hostage */ @@ -3040,24
> +3048,25 @@
> > uncached_fill_pages(struct TCP_Server_Info *server,
> > put_page(page);
> > continue;
> > }
> > +
> > n = len;
> > - if (len >= PAGE_SIZE) {
> > + if (len >= segment_size)
> > /* enough data to fill the page */
> > - n = PAGE_SIZE;
> > - len -= n;
> > - } else {
> > - zero_user(page, len, PAGE_SIZE - len);
> > + n = segment_size;
> > + else
> > rdata->tailsz = len;
> > - len = 0;
> > - }
> > + len -= n;
> > +
> > if (iter)
> > - result = copy_page_from_iter(page, 0, n, iter);
> > + result = copy_page_from_iter(
> > + page, page_offset, n, iter);
> > #ifdef CONFIG_CIFS_SMB_DIRECT
> > else if (rdata->mr)
> > result = n;
> > #endif
>
> I hadn't noticed this type of xonditional code before. It's kind of ugly to
> modify the data handling code like this. Do you plan to break this out into an
> smbdirect-specific hander, like the cases above and below?
This is a good point. I will look for ways to improve this.
>
> > else
> > - result = cifs_read_page_from_socket(server, page,
> n);
> > + result = cifs_read_page_from_socket(
> > + server, page, page_offset, n);
> > if (result < 0)
> > break;
> >
> > @@ -3130,6 +3139,7 @@ cifs_send_async_read(loff_t offset, size_t len,
> struct cifsFileInfo *open_file,
> > rdata->bytes = cur_len;
> > rdata->pid = pid;
> > rdata->pagesz = PAGE_SIZE;
> > + rdata->tailsz = PAGE_SIZE;
> > rdata->read_into_pages = cifs_uncached_read_into_pages;
> > rdata->copy_into_pages = cifs_uncached_copy_into_pages;
> > rdata->credits = credits;
> > @@ -3574,6 +3584,7 @@ readpages_fill_pages(struct TCP_Server_Info
> *server,
> > u64 eof;
> > pgoff_t eof_index;
> > unsigned int nr_pages = rdata->nr_pages;
> > + unsigned int page_offset = rdata->page_offset;
> >
> > /* determine the eof that the server (probably) has */
> > eof = CIFS_I(rdata->mapping->host)->server_eof;
> > @@ -3584,13 +3595,21 @@ readpages_fill_pages(struct TCP_Server_Info
> *server,
> > rdata->tailsz = PAGE_SIZE;
> > for (i = 0; i < nr_pages; i++) {
> > struct page *page = rdata->pages[i];
> > - size_t n = PAGE_SIZE;
> > + unsigned int to_read = rdata->pagesz;
> > + size_t n;
> > +
> > + if (i == 0)
> > + to_read -= page_offset;
> > + else
> > + page_offset = 0;
> > +
> > + n = to_read;
> >
> > - if (len >= PAGE_SIZE) {
> > - len -= PAGE_SIZE;
> > + if (len >= to_read) {
> > + len -= to_read;
> > } else if (len > 0) {
> > /* enough for partial page, fill and zero the rest */
> > - zero_user(page, len, PAGE_SIZE - len);
> > + zero_user(page, len + page_offset, to_read - len);
> > n = rdata->tailsz = len;
> > len = 0;
> > } else if (page->index > eof_index) { @@ -3622,13 +3641,15
> @@
> > readpages_fill_pages(struct TCP_Server_Info *server,
> > }
> >
> > if (iter)
> > - result = copy_page_from_iter(page, 0, n, iter);
> > + result = copy_page_from_iter(
> > + page, page_offset, n, iter);
> > #ifdef CONFIG_CIFS_SMB_DIRECT
> > else if (rdata->mr)
> > result = n;
> > #endif
>
> Ditto previous comment/question.
>
> > else
> > - result = cifs_read_page_from_socket(server, page,
> n);
> > + result = cifs_read_page_from_socket(
> > + server, page, page_offset, n);
> > if (result < 0)
> > break;
> >
> > @@ -3807,6 +3828,7 @@ static int cifs_readpages(struct file *file, struct
> address_space *mapping,
> > rdata->bytes = bytes;
> > rdata->pid = pid;
> > rdata->pagesz = PAGE_SIZE;
> > + rdata->tailsz = PAGE_SIZE;
> > rdata->read_into_pages = cifs_readpages_read_into_pages;
> > rdata->copy_into_pages = cifs_readpages_copy_into_pages;
> > rdata->credits = credits;
> > diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index
> > 7c0edd2..1fa1c29 100644
> > --- a/fs/cifs/smb2ops.c
> > +++ b/fs/cifs/smb2ops.c
> > @@ -2467,7 +2467,7 @@ read_data_into_pages(struct TCP_Server_Info
> *server, struct page **pages,
> > zero_user(page, len, PAGE_SIZE - len);
> > len = 0;
> > }
> > - length = cifs_read_page_from_socket(server, page, n);
> > + length = cifs_read_page_from_socket(server, page, 0, n);
> > if (length < 0)
> > return length;
> > server->total_read += length;
> > diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index
> > a02f6b6..f603fbe 100644
> > --- a/fs/cifs/smb2pdu.c
> > +++ b/fs/cifs/smb2pdu.c
> > @@ -2683,6 +2683,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
> > struct smb_rqst rqst = { .rq_iov = rdata->iov,
> > .rq_nvec = 2,
> > .rq_pages = rdata->pages,
> > + .rq_offset = rdata->page_offset,
> > .rq_npages = rdata->nr_pages,
> > .rq_pagesz = rdata->pagesz,
> > .rq_tailsz = rdata->tailsz };
> >
Powered by blists - more mailing lists