block: remove the queue_lock indirection

With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>

Fixed floppy and blk-cgroup missing conversions and half done edits.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2018-11-15 12:17:28 -07:00 committed by Jens Axboe
parent 6d46964230
commit 0d945c1f96
16 changed files with 92 additions and 106 deletions

View File

@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
parent = bfqg_parent(bfqg); parent = bfqg_parent(bfqg);
lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
if (unlikely(!parent)) if (unlikely(!parent))
return; return;

View File

@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
unsigned long flags; unsigned long flags;
struct bfq_io_cq *icq; struct bfq_io_cq *icq;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q)); icq = icq_to_bic(ioc_lookup_icq(ioc, q));
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(&q->queue_lock, flags);
return icq; return icq;
} }
@ -4034,7 +4034,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that * In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well. * bfqq_group(bfqq) exists as well.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (idle_timer_disabled) if (idle_timer_disabled)
/* /*
* Since the idle timer has been disabled, * Since the idle timer has been disabled,
@ -4053,7 +4053,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
bfqg_stats_set_start_empty_time(bfqg); bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
#else #else
static inline void bfq_update_dispatch_stats(struct request_queue *q, static inline void bfq_update_dispatch_stats(struct request_queue *q,
@ -4637,11 +4637,11 @@ static void bfq_update_insert_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that * In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well. * bfqq_group(bfqq) exists as well.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
if (idle_timer_disabled) if (idle_timer_disabled)
bfqg_stats_update_idle_time(bfqq_group(bfqq)); bfqg_stats_update_idle_time(bfqq_group(bfqq));
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
#else #else
static inline void bfq_update_insert_stats(struct request_queue *q, static inline void bfq_update_insert_stats(struct request_queue *q,
@ -5382,9 +5382,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
} }
eq->elevator_data = bfqd; eq->elevator_data = bfqd;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
q->elevator = eq; q->elevator = eq;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
/* /*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.

View File

@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) { if (blkg && blkg->q == q) {
if (update_hint) { if (update_hint) {
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg); rcu_assign_pointer(blkcg->blkg_hint, blkg);
} }
return blkg; return blkg;
@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
int i, ret; int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
/* blkg holds a reference to blkcg */ /* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) { if (!css_tryget_online(&blkcg->css)) {
@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
blkg = __blkg_lookup(blkcg, q, true); blkg = __blkg_lookup(blkcg, q, true);
if (blkg) if (blkg)
@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
struct blkcg_gq *parent = blkg->parent; struct blkcg_gq *parent = blkg->parent;
int i; int i;
lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock); lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */ /* Something wrong if we are trying to remove same group twice */
@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
{ {
struct blkcg_gq *blkg, *n; struct blkcg_gq *blkg, *n;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg; struct blkcg *blkcg = blkg->blkcg;
@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
} }
q->root_blkg = NULL; q->root_blkg = NULL;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
/* /*
@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(blkg->q->queue_lock); spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol)) if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data); total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(blkg->q->queue_lock); spin_unlock_irq(&blkg->q->queue_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
struct cgroup_subsys_state *pos_css; struct cgroup_subsys_state *pos_css;
u64 sum = 0; u64 sum = 0;
lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock(); rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
struct blkg_rwstat sum = { }; struct blkg_rwstat sum = { };
int i; int i;
lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock(); rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
struct request_queue *q) struct request_queue *q)
{ {
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
*/ */
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx) char *input, struct blkg_conf_ctx *ctx)
__acquires(rcu) __acquires(disk->queue->queue_lock) __acquires(rcu) __acquires(&disk->queue->queue_lock)
{ {
struct gendisk *disk; struct gendisk *disk;
struct request_queue *q; struct request_queue *q;
@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
q = disk->queue; q = disk->queue;
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(blkcg, pol, q); blkg = blkg_lookup_check(blkcg, pol, q);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
} }
/* Drop locks to do new blkg allocation with GFP_KERNEL. */ /* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL); new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
} }
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q); blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
@ -843,7 +843,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
return 0; return 0;
fail_unlock: fail_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
fail: fail:
put_disk_and_module(disk); put_disk_and_module(disk);
@ -868,9 +868,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
* with blkg_conf_prep(). * with blkg_conf_prep().
*/ */
void blkg_conf_finish(struct blkg_conf_ctx *ctx) void blkg_conf_finish(struct blkg_conf_ctx *ctx)
__releases(ctx->disk->queue->queue_lock) __releases(rcu) __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{ {
spin_unlock_irq(ctx->disk->queue->queue_lock); spin_unlock_irq(&ctx->disk->queue->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
put_disk_and_module(ctx->disk); put_disk_and_module(ctx->disk);
} }
@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/ */
off += scnprintf(buf+off, size-off, "%s ", dname); off += scnprintf(buf+off, size-off, "%s ", dname);
spin_lock_irq(blkg->q->queue_lock); spin_lock_irq(&blkg->q->queue_lock);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL, rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes)); offsetof(struct blkcg_gq, stat_bytes));
@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
spin_unlock_irq(blkg->q->queue_lock); spin_unlock_irq(&blkg->q->queue_lock);
if (rbytes || wbytes || rios || wios) { if (rbytes || wbytes || rios || wios) {
has_stats = true; has_stats = true;
@ -1038,9 +1038,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node); struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q; struct request_queue *q = blkg->q;
if (spin_trylock(q->queue_lock)) { if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg); blkg_destroy(blkg);
spin_unlock(q->queue_lock); spin_unlock(&q->queue_lock);
} else { } else {
spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock);
cpu_relax(); cpu_relax();
@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q)
/* Make sure the root blkg exists. */ /* Make sure the root blkg exists. */
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg); blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
goto err_unlock; goto err_unlock;
q->root_blkg = blkg; q->root_blkg = blkg;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
if (preloaded) if (preloaded)
@ -1185,7 +1185,7 @@ int blkcg_init_queue(struct request_queue *q)
blkg_destroy_all(q); blkg_destroy_all(q);
return ret; return ret;
err_unlock: err_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
@ -1200,7 +1200,7 @@ int blkcg_init_queue(struct request_queue *q)
*/ */
void blkcg_drain_queue(struct request_queue *q) void blkcg_drain_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
/* /*
* @q could be exiting and already have destroyed all blkgs as * @q could be exiting and already have destroyed all blkgs as
@ -1335,7 +1335,7 @@ int blkcg_activate_policy(struct request_queue *q,
} }
} }
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd; struct blkg_policy_data *pd;
@ -1347,7 +1347,7 @@ int blkcg_activate_policy(struct request_queue *q,
if (!pd) if (!pd)
swap(pd, pd_prealloc); swap(pd, pd_prealloc);
if (!pd) { if (!pd) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
goto pd_prealloc; goto pd_prealloc;
} }
@ -1361,7 +1361,7 @@ int blkcg_activate_policy(struct request_queue *q,
__set_bit(pol->plid, q->blkcg_pols); __set_bit(pol->plid, q->blkcg_pols);
ret = 0; ret = 0;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
out_bypass_end: out_bypass_end:
if (q->mq_ops) if (q->mq_ops)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (q->mq_ops) if (q->mq_ops)
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols); __clear_bit(pol->plid, q->blkcg_pols);
@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
} }
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (q->mq_ops) if (q->mq_ops)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);

View File

@ -327,8 +327,6 @@ void blk_exit_queue(struct request_queue *q)
*/ */
void blk_cleanup_queue(struct request_queue *q) void blk_cleanup_queue(struct request_queue *q)
{ {
spinlock_t *lock = q->queue_lock;
/* mark @q DYING, no new request or merges will be allowed afterwards */ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q); blk_set_queue_dying(q);
@ -381,11 +379,6 @@ void blk_cleanup_queue(struct request_queue *q)
percpu_ref_exit(&q->q_usage_counter); percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
/* @q is and will stay empty, shutdown and put */ /* @q is and will stay empty, shutdown and put */
blk_put_queue(q); blk_put_queue(q);
} }
@ -524,8 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
mutex_init(&q->blk_trace_mutex); mutex_init(&q->blk_trace_mutex);
#endif #endif
mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock); spin_lock_init(&q->queue_lock);
q->queue_lock = &q->__queue_lock;
init_waitqueue_head(&q->mq_freeze_wq); init_waitqueue_head(&q->mq_freeze_wq);

View File

@ -110,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work)
struct io_cq, ioc_node); struct io_cq, ioc_node);
struct request_queue *q = icq->q; struct request_queue *q = icq->q;
if (spin_trylock(q->queue_lock)) { if (spin_trylock(&q->queue_lock)) {
ioc_destroy_icq(icq); ioc_destroy_icq(icq);
spin_unlock(q->queue_lock); spin_unlock(&q->queue_lock);
} else { } else {
spin_unlock_irqrestore(&ioc->lock, flags); spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax(); cpu_relax();
@ -233,9 +233,9 @@ void ioc_clear_queue(struct request_queue *q)
{ {
LIST_HEAD(icq_list); LIST_HEAD(icq_list);
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list); list_splice_init(&q->icq_list, &icq_list);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
__ioc_clear_queue(&icq_list); __ioc_clear_queue(&icq_list);
} }
@ -326,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{ {
struct io_cq *icq; struct io_cq *icq;
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
/* /*
* icq's are indexed from @ioc using radix tree and hint pointer, * icq's are indexed from @ioc using radix tree and hint pointer,
@ -385,7 +385,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
INIT_HLIST_NODE(&icq->ioc_node); INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */ /* lock both q and ioc and try to link @icq */
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
spin_lock(&ioc->lock); spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
@ -401,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
} }
spin_unlock(&ioc->lock); spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
radix_tree_preload_end(); radix_tree_preload_end();
return icq; return icq;
} }

View File

@ -485,11 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
bio_associate_blkcg(bio, &blkcg->css); bio_associate_blkcg(bio, &blkcg->css);
blkg = blkg_lookup(blkcg, q); blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) { if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_create(blkcg, q); blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
blkg = NULL; blkg = NULL;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
if (!blkg) if (!blkg)
goto out; goto out;

View File

@ -37,9 +37,9 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
struct io_context *ioc = rq_ioc(bio); struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq; struct io_cq *icq;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q); icq = ioc_lookup_icq(ioc, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (!icq) { if (!icq) {
icq = ioc_create_icq(ioc, q, GFP_ATOMIC); icq = ioc_create_icq(ioc, q, GFP_ATOMIC);

View File

@ -89,12 +89,12 @@ int blk_pre_runtime_suspend(struct request_queue *q)
/* Switch q_usage_counter back to per-cpu mode. */ /* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (ret < 0) if (ret < 0)
pm_runtime_mark_last_busy(q->dev); pm_runtime_mark_last_busy(q->dev);
else else
q->rpm_status = RPM_SUSPENDING; q->rpm_status = RPM_SUSPENDING;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (ret) if (ret)
blk_clear_pm_only(q); blk_clear_pm_only(q);
@ -121,14 +121,14 @@ void blk_post_runtime_suspend(struct request_queue *q, int err)
if (!q->dev) if (!q->dev)
return; return;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (!err) { if (!err) {
q->rpm_status = RPM_SUSPENDED; q->rpm_status = RPM_SUSPENDED;
} else { } else {
q->rpm_status = RPM_ACTIVE; q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev); pm_runtime_mark_last_busy(q->dev);
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (err) if (err)
blk_clear_pm_only(q); blk_clear_pm_only(q);
@ -151,9 +151,9 @@ void blk_pre_runtime_resume(struct request_queue *q)
if (!q->dev) if (!q->dev)
return; return;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_RESUMING; q->rpm_status = RPM_RESUMING;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
EXPORT_SYMBOL(blk_pre_runtime_resume); EXPORT_SYMBOL(blk_pre_runtime_resume);
@ -176,7 +176,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
if (!q->dev) if (!q->dev)
return; return;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (!err) { if (!err) {
q->rpm_status = RPM_ACTIVE; q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev); pm_runtime_mark_last_busy(q->dev);
@ -184,7 +184,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
} else { } else {
q->rpm_status = RPM_SUSPENDED; q->rpm_status = RPM_SUSPENDED;
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (!err) if (!err)
blk_clear_pm_only(q); blk_clear_pm_only(q);
@ -207,10 +207,10 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
*/ */
void blk_set_runtime_active(struct request_queue *q) void blk_set_runtime_active(struct request_queue *q)
{ {
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_ACTIVE; q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev); pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev); pm_request_autosuspend(q->dev);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
EXPORT_SYMBOL(blk_set_runtime_active); EXPORT_SYMBOL(blk_set_runtime_active);

View File

@ -21,7 +21,7 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
static inline void blk_pm_requeue_request(struct request *rq) static inline void blk_pm_requeue_request(struct request *rq)
{ {
lockdep_assert_held(rq->q->queue_lock); lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM)) if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--; rq->q->nr_pending--;
@ -30,7 +30,7 @@ static inline void blk_pm_requeue_request(struct request *rq)
static inline void blk_pm_add_request(struct request_queue *q, static inline void blk_pm_add_request(struct request_queue *q,
struct request *rq) struct request *rq)
{ {
lockdep_assert_held(q->queue_lock); lockdep_assert_held(&q->queue_lock);
if (q->dev && !(rq->rq_flags & RQF_PM)) if (q->dev && !(rq->rq_flags & RQF_PM))
q->nr_pending++; q->nr_pending++;
@ -38,7 +38,7 @@ static inline void blk_pm_add_request(struct request_queue *q,
static inline void blk_pm_put_request(struct request *rq) static inline void blk_pm_put_request(struct request *rq)
{ {
lockdep_assert_held(rq->q->queue_lock); lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM)) if (rq->q->dev && !(rq->rq_flags & RQF_PM))
--rq->q->nr_pending; --rq->q->nr_pending;

View File

@ -238,10 +238,10 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL; return -EINVAL;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1; q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
return ret; return ret;
} }

View File

@ -1243,7 +1243,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
bool dispatched; bool dispatched;
int ret; int ret;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (throtl_can_upgrade(td, NULL)) if (throtl_can_upgrade(td, NULL))
throtl_upgrade_state(td); throtl_upgrade_state(td);
@ -1266,9 +1266,9 @@ static void throtl_pending_timer_fn(struct timer_list *t)
break; break;
/* this dispatch windows is still open, relax and repeat */ /* this dispatch windows is still open, relax and repeat */
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
cpu_relax(); cpu_relax();
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
} }
if (!dispatched) if (!dispatched)
@ -1290,7 +1290,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
queue_work(kthrotld_workqueue, &td->dispatch_work); queue_work(kthrotld_workqueue, &td->dispatch_work);
} }
out_unlock: out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
/** /**
@ -1314,11 +1314,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
bio_list_init(&bio_list_on_stack); bio_list_init(&bio_list_on_stack);
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++) for (rw = READ; rw <= WRITE; rw++)
while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio_list_add(&bio_list_on_stack, bio); bio_list_add(&bio_list_on_stack, bio);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (!bio_list_empty(&bio_list_on_stack)) { if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug); blk_start_plug(&plug);
@ -2141,7 +2141,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out; goto out;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
throtl_update_latency_buckets(td); throtl_update_latency_buckets(td);
@ -2224,7 +2224,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
} }
out_unlock: out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
out: out:
bio_set_flag(bio, BIO_THROTTLED); bio_set_flag(bio, BIO_THROTTLED);
@ -2345,7 +2345,7 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq)
* Dispatch all currently throttled bios on @q through ->make_request_fn(). * Dispatch all currently throttled bios on @q through ->make_request_fn().
*/ */
void blk_throtl_drain(struct request_queue *q) void blk_throtl_drain(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock) __releases(&q->queue_lock) __acquires(&q->queue_lock)
{ {
struct throtl_data *td = q->td; struct throtl_data *td = q->td;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
@ -2368,7 +2368,7 @@ void blk_throtl_drain(struct request_queue *q)
tg_drain_bios(&td->service_queue); tg_drain_bios(&td->service_queue);
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
/* all bios now should be in td->service_queue, issue them */ /* all bios now should be in td->service_queue, issue them */
for (rw = READ; rw <= WRITE; rw++) for (rw = READ; rw <= WRITE; rw++)
@ -2376,7 +2376,7 @@ void blk_throtl_drain(struct request_queue *q)
NULL))) NULL)))
generic_make_request(bio); generic_make_request(bio);
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
} }
int blk_throtl_init(struct request_queue *q) int blk_throtl_init(struct request_queue *q)

View File

@ -2255,9 +2255,9 @@ static void request_done(int uptodate)
DRS->maxtrack = 1; DRS->maxtrack = 1;
/* unlock chained buffers */ /* unlock chained buffers */
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
floppy_end_request(req, 0); floppy_end_request(req, 0);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(&q->queue_lock, flags);
} else { } else {
if (rq_data_dir(req) == WRITE) { if (rq_data_dir(req) == WRITE) {
/* record write error information */ /* record write error information */
@ -2269,9 +2269,9 @@ static void request_done(int uptodate)
DRWE->last_error_sector = blk_rq_pos(req); DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation; DRWE->last_error_generation = DRS->generation;
} }
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
floppy_end_request(req, BLK_STS_IOERR); floppy_end_request(req, BLK_STS_IOERR);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(&q->queue_lock, flags);
} }
} }

View File

@ -2203,9 +2203,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* Some CDRW drives can not handle writes larger than one packet, * Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size. * even if the size is a multiple of the packet size.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blk_queue_max_hw_sectors(q, pd->settings.size); blk_queue_max_hw_sectors(q, pd->settings.size);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
set_bit(PACKET_WRITABLE, &pd->flags); set_bit(PACKET_WRITABLE, &pd->flags);
} else { } else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);

View File

@ -44,15 +44,15 @@ static int ide_pm_execute_rq(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
if (unlikely(blk_queue_dying(q))) { if (unlikely(blk_queue_dying(q))) {
rq->rq_flags |= RQF_QUIET; rq->rq_flags |= RQF_QUIET;
scsi_req(rq)->result = -ENXIO; scsi_req(rq)->result = -ENXIO;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
blk_mq_end_request(rq, BLK_STS_OK); blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO; return -ENXIO;
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
blk_execute_rq(q, NULL, rq, true); blk_execute_rq(q, NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0; return scsi_req(rq)->result ? -EIO : 0;
@ -214,12 +214,12 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
printk("%s: completing PM request, %s\n", drive->name, printk("%s: completing PM request, %s\n", drive->name,
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif #endif
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_mq_stop_hw_queues(q); blk_mq_stop_hw_queues(q);
else else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED; drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(&q->queue_lock, flags);
drive->hwif->rq = NULL; drive->hwif->rq = NULL;

View File

@ -717,11 +717,11 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg = blkg_lookup(blkcg, q); blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) { if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_create(blkcg, q); blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
blkg = NULL; blkg = NULL;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }
throtl = blk_throtl_bio(q, blkg, bio); throtl = blk_throtl_bio(q, blkg, bio);

View File

@ -446,13 +446,7 @@ struct request_queue {
*/ */
gfp_t bounce_gfp; gfp_t bounce_gfp;
/* spinlock_t queue_lock;
* protects queue structures from reentrancy. ->__queue_lock should
* _never_ be used directly, it is queue private. always use
* ->queue_lock.
*/
spinlock_t __queue_lock;
spinlock_t *queue_lock;
/* /*
* queue kobject * queue kobject