lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170704054057.3b0de003@canb.auug.org.au>
Date:   Tue, 4 Jul 2017 05:40:57 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Martin Schwidefsky <schwidefsky@...ibm.com>
Cc:     Linus Torvalds <torvalds@...ux-foundation.org>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        linux-s390 <linux-s390@...r.kernel.org>,
        Heiko Carstens <heiko.carstens@...ibm.com>
Subject: Re: [GIT PULL] s390 patches for 4.13 merge window

Hi all,

On Mon, 3 Jul 2017 11:01:34 +0200 Martin Schwidefsky <schwidefsky@...ibm.com> wrote:
>
> please pull from the 'for-linus' branch of
> 
> 	git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git for-linus
> 
> to receive the following updates:
> 
> The bulk of the s390 patches for 4.13. Some new things but mostly
> bug fixes and cleanups. Noteworthy changes:
> 
> * The SCM block driver is converted to blk-mq

It might have been nice to mention the conflict in this with the block
tree which I resolved like this:

diff --cc drivers/s390/block/scm_blk.c
index 725f912fab41,3c2c84b72877..0071febac9e6
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@@ -228,12 -231,22 +228,12 @@@ static inline void scm_request_init(str
  	aob->request.data = (u64) aobrq;
  	scmrq->bdev = bdev;
  	scmrq->retries = 4;
- 	scmrq->error = 0;
+ 	scmrq->error = BLK_STS_OK;
  	/* We don't use all msbs - place aidaws at the end of the aob page. */
  	scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
 -	scm_request_cluster_init(scmrq);
  }
  
 -static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
 -{
 -	if (atomic_read(&bdev->queued_reqs)) {
 -		/* Queue restart is triggered by the next interrupt. */
 -		return;
 -	}
 -	blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
 -}
 -
 -void scm_request_requeue(struct scm_request *scmrq)
 +static void scm_request_requeue(struct scm_request *scmrq)
  {
  	struct scm_blk_dev *bdev = scmrq->bdev;
  	int i;
@@@ -271,83 -289,75 +271,83 @@@ static void scm_request_start(struct sc
  		SCM_LOG(5, "no subchannel");
  		scm_request_requeue(scmrq);
  	}
 -	return ret;
  }
  
 -static void scm_blk_request(struct request_queue *rq)
 +struct scm_queue {
 +	struct scm_request *scmrq;
 +	spinlock_t lock;
 +};
 +
- static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
++static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
 +			   const struct blk_mq_queue_data *qd)
  {
 -	struct scm_device *scmdev = rq->queuedata;
 +	struct scm_device *scmdev = hctx->queue->queuedata;
  	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
 -	struct scm_request *scmrq = NULL;
 -	struct request *req;
 +	struct scm_queue *sq = hctx->driver_data;
 +	struct request *req = qd->rq;
 +	struct scm_request *scmrq;
  
 -	while ((req = blk_peek_request(rq))) {
 -		if (!scm_permit_request(bdev, req))
 -			goto out;
 +	spin_lock(&sq->lock);
 +	if (!scm_permit_request(bdev, req)) {
 +		spin_unlock(&sq->lock);
- 		return BLK_MQ_RQ_QUEUE_BUSY;
++		return BLK_STS_RESOURCE;
 +	}
  
 +	scmrq = sq->scmrq;
 +	if (!scmrq) {
 +		scmrq = scm_request_fetch();
  		if (!scmrq) {
 -			scmrq = scm_request_fetch();
 -			if (!scmrq) {
 -				SCM_LOG(5, "no request");
 -				goto out;
 -			}
 -			scm_request_init(bdev, scmrq);
 +			SCM_LOG(5, "no request");
 +			spin_unlock(&sq->lock);
- 			return BLK_MQ_RQ_QUEUE_BUSY;
++			return BLK_STS_RESOURCE;
  		}
 -		scm_request_set(scmrq, req);
 +		scm_request_init(bdev, scmrq);
 +		sq->scmrq = scmrq;
 +	}
 +	scm_request_set(scmrq, req);
  
 -		if (!scm_reserve_cluster(scmrq)) {
 -			SCM_LOG(5, "cluster busy");
 -			scm_request_set(scmrq, NULL);
 -			if (scmrq->aob->request.msb_count)
 -				goto out;
 +	if (scm_request_prepare(scmrq)) {
 +		SCM_LOG(5, "aidaw alloc failed");
 +		scm_request_set(scmrq, NULL);
  
 -			scm_request_done(scmrq);
 -			return;
 -		}
 +		if (scmrq->aob->request.msb_count)
 +			scm_request_start(scmrq);
  
 -		if (scm_need_cluster_request(scmrq)) {
 -			if (scmrq->aob->request.msb_count) {
 -				/* Start cluster requests separately. */
 -				scm_request_set(scmrq, NULL);
 -				if (scm_request_start(scmrq))
 -					return;
 -			} else {
 -				atomic_inc(&bdev->queued_reqs);
 -				blk_start_request(req);
 -				scm_initiate_cluster_request(scmrq);
 -			}
 -			scmrq = NULL;
 -			continue;
 -		}
 +		sq->scmrq = NULL;
 +		spin_unlock(&sq->lock);
- 		return BLK_MQ_RQ_QUEUE_BUSY;
++		return BLK_STS_RESOURCE;
 +	}
 +	blk_mq_start_request(req);
  
 -		if (scm_request_prepare(scmrq)) {
 -			SCM_LOG(5, "aidaw alloc failed");
 -			scm_request_set(scmrq, NULL);
 -			goto out;
 -		}
 -		blk_start_request(req);
 +	if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
 +		scm_request_start(scmrq);
 +		sq->scmrq = NULL;
 +	}
 +	spin_unlock(&sq->lock);
- 	return BLK_MQ_RQ_QUEUE_OK;
++	return BLK_STS_OK;
 +}
  
 -		if (scmrq->aob->request.msb_count < nr_requests_per_io)
 -			continue;
 +static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 +			     unsigned int idx)
 +{
 +	struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
  
 -		if (scm_request_start(scmrq))
 -			return;
 +	if (!qd)
 +		return -ENOMEM;
  
 -		scmrq = NULL;
 -	}
 -out:
 -	if (scmrq)
 -		scm_request_start(scmrq);
 -	else
 -		scm_ensure_queue_restart(bdev);
 +	spin_lock_init(&qd->lock);
 +	hctx->driver_data = qd;
 +
 +	return 0;
 +}
 +
 +static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
 +{
 +	struct scm_queue *qd = hctx->driver_data;
 +
 +	WARN_ON(qd->scmrq);
 +	kfree(hctx->driver_data);
 +	hctx->driver_data = NULL;
  }
  
  static void __scmrq_log_error(struct scm_request *scmrq)
@@@ -394,28 -419,43 +394,28 @@@ restart
  		return;
  
  requeue:
 -	spin_lock_irqsave(&bdev->rq_lock, flags);
  	scm_request_requeue(scmrq);
 -	spin_unlock_irqrestore(&bdev->rq_lock, flags);
  }
  
- void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
 -static void scm_blk_tasklet(struct scm_blk_dev *bdev)
++void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
  {
 -	struct scm_request *scmrq;
 -	unsigned long flags;
 -
 -	spin_lock_irqsave(&bdev->lock, flags);
 -	while (!list_empty(&bdev->finished_requests)) {
 -		scmrq = list_first_entry(&bdev->finished_requests,
 -					 struct scm_request, list);
 -		list_del(&scmrq->list);
 -		spin_unlock_irqrestore(&bdev->lock, flags);
 +	struct scm_request *scmrq = data;
  
 -		if (scmrq->error && scmrq->retries-- > 0) {
 +	scmrq->error = error;
 +	if (error) {
 +		__scmrq_log_error(scmrq);
 +		if (scmrq->retries-- > 0) {
  			scm_blk_handle_error(scmrq);
 -
 -			/* Request restarted or requeued, handle next. */
 -			spin_lock_irqsave(&bdev->lock, flags);
 -			continue;
 +			return;
  		}
 +	}
  
 -		if (scm_test_cluster_request(scmrq)) {
 -			scm_cluster_request_irq(scmrq);
 -			spin_lock_irqsave(&bdev->lock, flags);
 -			continue;
 -		}
 +	scm_request_finish(scmrq);
 +}
  
 -		scm_request_finish(scmrq);
 -		spin_lock_irqsave(&bdev->lock, flags);
 -	}
 -	spin_unlock_irqrestore(&bdev->lock, flags);
 -	/* Look out for more requests. */
 -	blk_run_queue(bdev->rq);
 +static void scm_blk_request_done(struct request *req)
 +{
 +	blk_mq_end_request(req, 0);
  }
  
  static const struct block_device_operations scm_blk_devops = {
diff --cc drivers/s390/block/scm_blk.h
index 242d17a91920,cd598d1a4eae..71288dd9dd7f
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@@ -32,7 -35,14 +32,7 @@@ struct scm_request 
  	struct aob *aob;
  	struct list_head list;
  	u8 retries;
- 	int error;
+ 	blk_status_t error;
 -#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
 -	struct {
 -		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
 -		struct list_head list;
 -		void **buf;
 -	} cluster;
 -#endif
  };
  
  #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
@@@ -40,8 -50,11 +40,8 @@@
  int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
  void scm_blk_dev_cleanup(struct scm_blk_dev *);
  void scm_blk_set_available(struct scm_blk_dev *);
- void scm_blk_irq(struct scm_device *, void *, int);
+ void scm_blk_irq(struct scm_device *, void *, blk_status_t);
  
 -void scm_request_finish(struct scm_request *);
 -void scm_request_requeue(struct scm_request *);
 -
  struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
  
  int scm_drv_init(void);

-- 
Cheers,
Stephen Rothwell

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ