[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190724191743.617931993@linuxfoundation.org>
Date: Wed, 24 Jul 2019 21:20:04 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Ronnie Sahlberg <lsahlber@...hat.com>,
Steve French <stfrench@...rosoft.com>
Subject: [PATCH 5.1 252/371] cifs: fix crash in smb2_compound_op()/smb2_set_next_command()
From: Ronnie Sahlberg <lsahlber@...hat.com>
commit 88a92c913cef09e70b1744a8877d177aa6cb2189 upstream.
RHBZ: 1722704
In low memory situations the various SMB2_*_init() functions can fail
to allocate a request PDU and thus leave the request iovector as NULL.
If we don't check the return code for failure we end up calling
smb2_set_next_command() with a NULL iovector causing a crash when it tries
to dereference it.
CC: Stable <stable@...r.kernel.org>
Signed-off-by: Ronnie Sahlberg <lsahlber@...hat.com>
Signed-off-by: Steve French <stfrench@...rosoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/cifs/smb2inode.c | 12 ++++++++++++
fs/cifs/smb2ops.c | 11 ++++++++++-
2 files changed, 22 insertions(+), 1 deletion(-)
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -120,6 +120,8 @@ smb2_compound_op(const unsigned int xid,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
@@ -147,6 +149,8 @@ smb2_compound_op(const unsigned int xid,
COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -163,6 +167,8 @@ smb2_compound_op(const unsigned int xid,
COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -180,6 +186,8 @@ smb2_compound_op(const unsigned int xid,
COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
@@ -206,6 +214,8 @@ smb2_compound_op(const unsigned int xid,
COMPOUND_FID, current->tgid,
FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -231,6 +241,8 @@ smb2_compound_op(const unsigned int xid,
COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2004,6 +2004,10 @@ smb2_set_related(struct smb_rqst *rqst)
struct smb2_sync_hdr *shdr;
shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ if (shdr == NULL) {
+ cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+ return;
+ }
shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
}
@@ -2018,6 +2022,12 @@ smb2_set_next_command(struct cifs_tcon *
unsigned long len = smb_rqst_len(server, rqst);
int i, num_padding;
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ if (shdr == NULL) {
+ cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
+ return;
+ }
+
/* SMB headers in a compound are 8 byte aligned. */
/* No padding needed */
@@ -2057,7 +2067,6 @@ smb2_set_next_command(struct cifs_tcon *
}
finished:
- shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
shdr->NextCommand = cpu_to_le32(len);
}
Powered by blists - more mailing lists