[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211206145548.814595649@linuxfoundation.org>
Date: Mon, 6 Dec 2021 15:56:11 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Juergen Gross <jgross@...e.com>,
Jan Beulich <jbeulich@...e.com>,
Roger Pau Monné <roger.pau@...rix.com>
Subject: [PATCH 4.4 27/52] xen/blkfront: dont take local copy of a request from the ring page
From: Juergen Gross <jgross@...e.com>
commit 8f5a695d99000fc3aa73934d7ced33cfc64dcdab upstream.
In order to avoid a malicious backend being able to influence the local
copy of a request build the request locally first and then copy it to
the ring page instead of doing it the other way round as today.
Signed-off-by: Juergen Gross <jgross@...e.com>
Reviewed-by: Jan Beulich <jbeulich@...e.com>
Acked-by: Roger Pau Monné <roger.pau@...rix.com>
Link: https://lore.kernel.org/r/20210730103854.12681-3-jgross@suse.com
Signed-off-by: Juergen Gross <jgross@...e.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/block/xen-blkfront.c | 38 ++++++++++++++++++++++++++------------
1 file changed, 26 insertions(+), 12 deletions(-)
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -456,16 +456,31 @@ static int blkif_ioctl(struct block_devi
return 0;
}
+static unsigned long blkif_ring_get_request(struct blkfront_info *info,
+ struct request *req,
+ struct blkif_request **ring_req)
+{
+ unsigned long id;
+
+ *ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ info->ring.req_prod_pvt++;
+
+ id = get_id_from_freelist(info);
+ info->shadow[id].request = req;
+ info->shadow[id].req.u.rw.id = id;
+
+ return id;
+}
+
static int blkif_queue_discard_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
- id = get_id_from_freelist(info);
- info->shadow[id].request = req;
+ id = blkif_ring_get_request(info, req, &final_ring_req);
+ ring_req = &info->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -478,8 +493,8 @@ static int blkif_queue_discard_req(struc
info->ring.req_prod_pvt++;
- /* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ /* Copy the request to the ring page. */
+ *final_ring_req = *ring_req;
return 0;
}
@@ -569,7 +584,7 @@ static void blkif_setup_rw_req_grant(uns
static int blkif_queue_rw_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
int i;
struct setup_rw_req setup = {
@@ -613,9 +628,8 @@ static int blkif_queue_rw_req(struct req
new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
- id = get_id_from_freelist(info);
- info->shadow[id].request = req;
+ id = blkif_ring_get_request(info, req, &final_ring_req);
+ ring_req = &info->shadow[id].req;
BUG_ON(info->max_indirect_segments == 0 &&
GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -696,8 +710,8 @@ static int blkif_queue_rw_req(struct req
info->ring.req_prod_pvt++;
- /* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ /* Copy request(s) to the ring page. */
+ *final_ring_req = *ring_req;
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
Powered by blists - more mailing lists