From 71b8642e79f277459555629f2bea1a8d1fed307e Mon Sep 17 00:00:00 2001 From: Yu Kuai <yukuai3@huawei.com> Date: Sat, 27 May 2023 09:06:41 +0800 Subject: [PATCH] blk-wbt: remove dead code to handle wbt enable/disable with io inflight enable or disable wbt is always called with queue freezed, so that wbt can never be enabled or disabled while io is still inflight, and this behaviour should always hold to avoid io hang(There have been reported several times). Therefor, the code to handle wbt enable/diskble with io inflight is not and never will be used, hence remove such dead code. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230527010644.647900-3-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk> --- block/blk-wbt.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 53bf5aa6f9ad6..21bbeb31a4449 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -200,15 +200,6 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, inflight = atomic_dec_return(&rqw->inflight); - /* - * wbt got disabled with IO in flight. Wake up any potential - * waiters, we don't have to do more than that. - */ - if (unlikely(!rwb_enabled(rwb))) { - rwb_wake_all(rwb); - return; - } - /* * For discards, our limit is always the background. For writes, if * the device does write back caching, drop further down before we @@ -545,13 +536,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) { unsigned int limit; - /* - * If we got disabled, just return UINT_MAX. This ensures that - * we'll properly inc a new IO, and dec+wakeup at the end. - */ - if (!rwb_enabled(rwb)) - return UINT_MAX; - if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD) return rwb->wb_background; -- GitLab