All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* clean up blk_mq_submit_bio
@ 2024-01-24  9:26 Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests Christoph Hellwig
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Christoph Hellwig @ 2024-01-24  9:26 UTC (permalink / raw
  To: Jens Axboe; +Cc: linux-block

Hi Jens,

this series tries to make the cached rq handling in blk_mq_submit_bio
a little less messy and better documented.  Let me know what you think.

Diffstat:
 blk-mq.c |  105 +++++++++++++++++++++++++++++++++------------------------------
 1 file changed, 56 insertions(+), 49 deletions(-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
@ 2024-01-24  9:26 ` Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 2/3] blk-mq: introduce a blk_mq_peek_cached_request helper Christoph Hellwig
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2024-01-24  9:26 UTC (permalink / raw
  To: Jens Axboe; +Cc: linux-block

blk_mq_attempt_bio_merge has nothing to do with allocating a new
request, it avoids allocating a new request.  Move the call out of
blk_mq_get_new_requests and into the only caller.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index aa87fcfda1ecfc..fbd1ec56acea4d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2892,9 +2892,6 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
 	};
 	struct request *rq;
 
-	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
-		return NULL;
-
 	rq_qos_throttle(q, bio);
 
 	if (plug) {
@@ -3002,18 +2999,18 @@ void blk_mq_submit_bio(struct bio *bio)
 		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
 			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
 			if (!bio)
-				goto fail;
+				goto queue_exit;
 		}
 		if (!bio_integrity_prep(bio))
-			goto fail;
+			goto queue_exit;
 	}
 
+	if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+		goto queue_exit;
+
 	rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-	if (unlikely(!rq)) {
-fail:
-		blk_queue_exit(q);
-		return;
-	}
+	if (unlikely(!rq))
+		goto queue_exit;
 
 done:
 	trace_block_getrq(bio);
@@ -3046,6 +3043,10 @@ void blk_mq_submit_bio(struct bio *bio)
 	} else {
 		blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
 	}
+	return;
+
+queue_exit:
+	blk_queue_exit(q);
 }
 
 #ifdef CONFIG_BLK_MQ_STACKING
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] blk-mq: introduce a blk_mq_peek_cached_request helper
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests Christoph Hellwig
@ 2024-01-24  9:26 ` Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 3/3] blk-mq: special case cached requests less Christoph Hellwig
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2024-01-24  9:26 UTC (permalink / raw
  To: Jens Axboe; +Cc: linux-block

Add a new helper to check if there is suitable cached request in
blk_mq_submit_bio.  This removes open coded logic in blk_mq_submit_bio
and moves some checks that so far are in blk_mq_use_cached_rq to
be performed earlier.  This avoids the case where we first do check
with the cached request but then later end up allocating a new one
anyway and need to grab a queue reference.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 63 ++++++++++++++++++++++++++------------------------
 1 file changed, 33 insertions(+), 30 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index fbd1ec56acea4d..66df323c2b9489 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2910,22 +2910,31 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
 }
 
 /*
- * Check if we can use the passed on request for submitting the passed in bio,
- * and remove it from the request list if it can be used.
+ * Check if there is a suitable cached request and return it.
  */
-static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
-		struct bio *bio)
+static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
+		struct request_queue *q, blk_opf_t opf)
 {
-	enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
-	enum hctx_type hctx_type = rq->mq_hctx->type;
+	enum hctx_type type = blk_mq_get_hctx_type(opf);
+	struct request *rq;
 
-	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+	if (!plug)
+		return NULL;
+	rq = rq_list_peek(&plug->cached_rq);
+	if (!rq || rq->q != q)
+		return NULL;
+	if (type != rq->mq_hctx->type &&
+	    (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT))
+		return NULL;
+	if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
+		return NULL;
+	return rq;
+}
 
-	if (type != hctx_type &&
-	    !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
-		return false;
-	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
-		return false;
+static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
+		struct bio *bio)
+{
+	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
 
 	/*
 	 * If any qos ->throttle() end up blocking, we will have flushed the
@@ -2938,7 +2947,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
 	blk_mq_rq_time_init(rq, 0);
 	rq->cmd_flags = bio->bi_opf;
 	INIT_LIST_HEAD(&rq->queuelist);
-	return true;
 }
 
 static void bio_set_ioprio(struct bio *bio)
@@ -2975,11 +2983,7 @@ void blk_mq_submit_bio(struct bio *bio)
 	bio = blk_queue_bounce(bio, q);
 	bio_set_ioprio(bio);
 
-	if (plug) {
-		rq = rq_list_peek(&plug->cached_rq);
-		if (rq && rq->q != q)
-			rq = NULL;
-	}
+	rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
 	if (rq) {
 		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
 			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
@@ -2990,20 +2994,19 @@ void blk_mq_submit_bio(struct bio *bio)
 			return;
 		if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
 			return;
-		if (blk_mq_use_cached_rq(rq, plug, bio))
-			goto done;
-		percpu_ref_get(&q->q_usage_counter);
-	} else {
-		if (unlikely(bio_queue_enter(bio)))
-			return;
-		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
-			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
-			if (!bio)
-				goto queue_exit;
-		}
-		if (!bio_integrity_prep(bio))
+		blk_mq_use_cached_rq(rq, plug, bio);
+		goto done;
+	}
+
+	if (unlikely(bio_queue_enter(bio)))
+		return;
+	if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+		bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+		if (!bio)
 			goto queue_exit;
 	}
+	if (!bio_integrity_prep(bio))
+		goto queue_exit;
 
 	if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
 		goto queue_exit;
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] blk-mq: special case cached requests less
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests Christoph Hellwig
  2024-01-24  9:26 ` [PATCH 2/3] blk-mq: introduce a blk_mq_peek_cached_request helper Christoph Hellwig
@ 2024-01-24  9:26 ` Christoph Hellwig
  2024-01-24 17:27 ` clean up blk_mq_submit_bio Jens Axboe
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2024-01-24  9:26 UTC (permalink / raw
  To: Jens Axboe; +Cc: linux-block

Share the main merge / split / integrity preparation code between the
cached request vs newly allocated request cases, and add comments
explaining the cached request handling.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 41 ++++++++++++++++++++++-------------------
 1 file changed, 22 insertions(+), 19 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 66df323c2b9489..edb9ee463bd0f0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2976,30 +2976,25 @@ void blk_mq_submit_bio(struct bio *bio)
 	struct blk_plug *plug = blk_mq_plug(bio);
 	const int is_sync = op_is_sync(bio->bi_opf);
 	struct blk_mq_hw_ctx *hctx;
-	struct request *rq = NULL;
 	unsigned int nr_segs = 1;
+	struct request *rq;
 	blk_status_t ret;
 
 	bio = blk_queue_bounce(bio, q);
 	bio_set_ioprio(bio);
 
+	/*
+	 * If the plug has a cached request for this queue, try use it.
+	 *
+	 * The cached request already holds a q_usage_counter reference and we
+	 * don't have to acquire a new one if we use it.
+	 */
 	rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
-	if (rq) {
-		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
-			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
-			if (!bio)
-				return;
-		}
-		if (!bio_integrity_prep(bio))
-			return;
-		if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+	if (!rq) {
+		if (unlikely(bio_queue_enter(bio)))
 			return;
-		blk_mq_use_cached_rq(rq, plug, bio);
-		goto done;
 	}
 
-	if (unlikely(bio_queue_enter(bio)))
-		return;
 	if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
 		bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
 		if (!bio)
@@ -3011,11 +3006,14 @@ void blk_mq_submit_bio(struct bio *bio)
 	if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
 		goto queue_exit;
 
-	rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-	if (unlikely(!rq))
-		goto queue_exit;
+	if (!rq) {
+		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+		if (unlikely(!rq))
+			goto queue_exit;
+	} else {
+		blk_mq_use_cached_rq(rq, plug, bio);
+	}
 
-done:
 	trace_block_getrq(bio);
 
 	rq_qos_track(q, rq, bio);
@@ -3049,7 +3047,12 @@ void blk_mq_submit_bio(struct bio *bio)
 	return;
 
 queue_exit:
-	blk_queue_exit(q);
+	/*
+	 * Don't drop the queue reference if we were trying to use a cached
+	 * request and thus didn't acquire one.
+	 */
+	if (!rq)
+		blk_queue_exit(q);
 }
 
 #ifdef CONFIG_BLK_MQ_STACKING
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: clean up blk_mq_submit_bio
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
                   ` (2 preceding siblings ...)
  2024-01-24  9:26 ` [PATCH 3/3] blk-mq: special case cached requests less Christoph Hellwig
@ 2024-01-24 17:27 ` Jens Axboe
  2024-01-28 23:52 ` Damien Le Moal
  2024-02-05 17:08 ` Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2024-01-24 17:27 UTC (permalink / raw
  To: Christoph Hellwig; +Cc: linux-block

On 1/24/24 2:26 AM, Christoph Hellwig wrote:
> Hi Jens,
> 
> this series tries to make the cached rq handling in blk_mq_submit_bio
> a little less messy and better documented.  Let me know what you think.

Looks good to me!

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: clean up blk_mq_submit_bio
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
                   ` (3 preceding siblings ...)
  2024-01-24 17:27 ` clean up blk_mq_submit_bio Jens Axboe
@ 2024-01-28 23:52 ` Damien Le Moal
  2024-02-05 17:08 ` Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Damien Le Moal @ 2024-01-28 23:52 UTC (permalink / raw
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block

On 1/24/24 18:26, Christoph Hellwig wrote:
> Hi Jens,
> 
> this series tries to make the cached rq handling in blk_mq_submit_bio
> a little less messy and better documented.  Let me know what you think.
> 
> Diffstat:
>  blk-mq.c |  105 +++++++++++++++++++++++++++++++++------------------------------
>  1 file changed, 56 insertions(+), 49 deletions(-)
> 

For the series:

Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Tested-by: Damien Le Moal <dlemoal@kernel.org>

I actually need this one as it simplifies the zone write plugging I am working
on (as a better replacement for zone write locking).

-- 
Damien Le Moal
Western Digital Research


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: clean up blk_mq_submit_bio
  2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
                   ` (4 preceding siblings ...)
  2024-01-28 23:52 ` Damien Le Moal
@ 2024-02-05 17:08 ` Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2024-02-05 17:08 UTC (permalink / raw
  To: Christoph Hellwig; +Cc: linux-block


On Wed, 24 Jan 2024 10:26:55 +0100, Christoph Hellwig wrote:
> this series tries to make the cached rq handling in blk_mq_submit_bio
> a little less messy and better documented.  Let me know what you think.
> 
> Diffstat:
>  blk-mq.c |  105 +++++++++++++++++++++++++++++++++------------------------------
>  1 file changed, 56 insertions(+), 49 deletions(-)
> 
> [...]

Applied, thanks!

[1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests
      commit: 0f299da55ac3d28bc9de23a84fae01a85c4e253a
[2/3] blk-mq: introduce a blk_mq_peek_cached_request helper
      commit: 337e89feb7c29043dacd851b6ac28542a9a8aacf
[3/3] blk-mq: special case cached requests less
      commit: 72e84e909eb5354e1e405c968dfdc4dcc23d41cc

Best regards,
-- 
Jens Axboe




^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2024-02-05 17:08 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-01-24  9:26 clean up blk_mq_submit_bio Christoph Hellwig
2024-01-24  9:26 ` [PATCH 1/3] blk-mq: move blk_mq_attempt_bio_merge out blk_mq_get_new_requests Christoph Hellwig
2024-01-24  9:26 ` [PATCH 2/3] blk-mq: introduce a blk_mq_peek_cached_request helper Christoph Hellwig
2024-01-24  9:26 ` [PATCH 3/3] blk-mq: special case cached requests less Christoph Hellwig
2024-01-24 17:27 ` clean up blk_mq_submit_bio Jens Axboe
2024-01-28 23:52 ` Damien Le Moal
2024-02-05 17:08 ` Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.