block: pass 'op' to blk_queue_enter()

We need to check if the request to be allocated is PREEMPT_ONLY,
and have to pass REQ_PREEEMPT flag to blk_queue_eneter(), so pass
'op' to blk_queue_enter() directly.

Change-Id: I53bafb80d59917f65b5855571489638d9fe507c3
Cc: Bart Van Assche <Bart.VanAssche@wdc.com>
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Patch-mainline: linux-block@vger.kernel.org @ 03/10/2017, 22:04
Signed-off-by: Pradeep P V K <ppvk@codeaurora.org>
tirimbino
Ming Lei 7 years ago committed by Gerrit - the friendly Code Review server
parent 3e48d1e840
commit 599917ee4f
  1. 13
      block/blk-core.c
  2. 3
      block/blk-mq.c
  3. 4
      fs/block_dev.c
  4. 2
      include/linux/blkdev.h

@ -816,14 +816,14 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, bool nowait)
int blk_queue_enter(struct request_queue *q, unsigned int op)
{
while (true) {
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
if (nowait)
if (op & REQ_NOWAIT)
return -EBUSY;
/*
@ -1457,8 +1457,8 @@ static struct request *blk_old_get_request(struct request_queue *q,
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
(op & REQ_NOWAIT));
ret = blk_queue_enter(q, (gfp_mask & __GFP_DIRECT_RECLAIM) ? op :
op | REQ_NOWAIT);
if (ret)
return ERR_PTR(ret);
spin_lock_irq(q->queue_lock);
@ -1485,6 +1485,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
req = blk_mq_alloc_request(q, op,
(gfp_mask & __GFP_DIRECT_RECLAIM) ?
0 : BLK_MQ_REQ_NOWAIT);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
q->mq_ops->initialize_rq_fn(req);
} else {
@ -2234,7 +2235,7 @@ blk_qc_t generic_make_request(struct bio *bio)
flags = BLK_MQ_REQ_NOWAIT;
if (bio_flagged(bio, BIO_QUEUE_ENTERED))
blk_queue_enter_live(q);
else if (blk_queue_enter(q, flags) < 0) {
else if (blk_queue_enter(q, bio->bi_opf) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
@ -2287,7 +2288,7 @@ blk_qc_t generic_make_request(struct bio *bio)
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
if (blk_queue_enter(q, flags) < 0)
if (blk_queue_enter(q, bio->bi_opf) < 0)
enter_succeeded = false;
}

@ -405,7 +405,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *rq;
int ret;
ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
ret = blk_queue_enter(q, !(flags & BLK_MQ_REQ_NOWAIT) ? op :
op | REQ_NOWAIT);
if (ret)
return ERR_PTR(ret);

@ -689,7 +689,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev))
return result;
result = blk_queue_enter(bdev->bd_queue, false);
result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
@ -725,7 +725,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev))
return -EOPNOTSUPP;
result = blk_queue_enter(bdev->bd_queue, false);
result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;

@ -987,7 +987,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
extern int blk_queue_enter(struct request_queue *q, bool nowait);
extern int blk_queue_enter(struct request_queue *q, unsigned int op);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
extern void blk_start_queue_async(struct request_queue *q);

Loading…
Cancel
Save