mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
block-6.10-20240523
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmZPaegQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgplkkD/4h1vxr2a6jg44TEUJ9f59rIOELuYHXJdpt 5m7r8UWcy7LF6HfmMgSeHV/7Gr1bBw6jh1eMubZRt9pZJ1sSGnc6vQdrOU+RnG9k F9i0qogAD2WXClQPAxvHGC1KD1quSdeiKME0hNJdGA6SsV4cYnDVeR8O6SQbaomD KPeGGBdjvrygRFhyDBFDACWK3GuD5POlbswUOwASYNrAb4OrQsj+bX/QXkuOXir9 n/NW/RfiQqAvI4m51yzaMqfFWw+s0irhXNfchl3i8RBMvDFBRNEkgtDN4y2rUynK +FaDeAwGXR51/qL9gr0ZScXAY6Q7f/B9FkrTUZR7S1lD3JsLXiS+uOefXEljKsDd RpNUc0sX3RjaSu1uNiUD/H4v+umvR+r3uuAyH6OXstCQt+98SJUbQvZuzphVGC60 iM8W+NRsaYZUhjN4LBj0NBGgCiidHanm22GCPADWN1fxZbjRWUoA886sZXTqmmMj +GGqpPU3pbGtj09ysaJpLKxu1TbD3QmcCUVPWQ8+DKt8PGGDDa+vIRXV8xswwQDg DyZoq0s/s00DzCXiPsbvVyKwXCJ1XSB0sEq0gvjDfGXb+5h6T+lH2irbcjBxUlwq qbofAmk6PVjxeWMUP4NXE04oK5Itc/l20LT9ECFPWzMdc1ht31TsqmxldHLIpDqp KUeacOh94A== =Btam -----END PGP SIGNATURE----- Merge tag 'block-6.10-20240523' of git://git.kernel.dk/linux Pull more block updates from Jens Axboe: "Followup block updates, mostly due to NVMe being a bit late to the party. But nothing major in there, so not a big deal. In detail, this contains: - NVMe pull request via Keith: - Fabrics connection retries (Daniel, Hannes) - Fabrics logging enhancements (Tokunori) - RDMA delete optimization (Sagi) - ublk DMA alignment fix (me) - null_blk sparse warning fixes (Bart) - Discard support for brd (Keith) - blk-cgroup list corruption fixes (Ming) - blk-cgroup stat propagation fix (Waiman) - Regression fix for plugging stall with md (Yu) - Misc fixes or cleanups (David, Jeff, Justin)" * tag 'block-6.10-20240523' of git://git.kernel.dk/linux: (24 commits) null_blk: fix null-ptr-dereference while configuring 'power' and 'submit_queues' blk-throttle: remove unused struct 'avg_latency_bucket' block: fix lost bio for plug enabled bio based device block: t10-pi: add MODULE_DESCRIPTION() blk-mq: add helper for checking if one CPU is mapped to specified hctx blk-cgroup: Properly propagate the iostat update up the hierarchy blk-cgroup: fix list corruption from reorder of WRITE ->lqueued blk-cgroup: fix list corruption from resetting io stat cdrom: rearrange last_media_change check to avoid unintentional overflow nbd: Fix signal handling nbd: Remove a local variable from nbd_send_cmd() nbd: Improve the documentation of the locking assumptions nbd: Remove superfluous casts nbd: Use NULL to represent a pointer brd: implement discard support null_blk: Fix two sparse warnings ublk_drv: set DMA alignment mask to 3 nvme-rdma, nvme-tcp: include max reconnects for reconnect logging nvmet-rdma: Avoid o(n^2) loop in delete_ctrl nvme: do not retry authentication failures ...
This commit is contained in:
commit
b4d88a60fe
@ -322,6 +322,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
|
||||
blkg->q = disk->queue;
|
||||
INIT_LIST_HEAD(&blkg->q_node);
|
||||
blkg->blkcg = blkcg;
|
||||
blkg->iostat.blkg = blkg;
|
||||
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
|
||||
spin_lock_init(&blkg->async_bio_lock);
|
||||
bio_list_init(&blkg->async_bios);
|
||||
@ -618,12 +619,45 @@ static void blkg_destroy_all(struct gendisk *disk)
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
|
||||
static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLKG_IOSTAT_NR; i++) {
|
||||
dst->bytes[i] = src->bytes[i];
|
||||
dst->ios[i] = src->ios[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void __blkg_clear_stat(struct blkg_iostat_set *bis)
|
||||
{
|
||||
struct blkg_iostat cur = {0};
|
||||
unsigned long flags;
|
||||
|
||||
flags = u64_stats_update_begin_irqsave(&bis->sync);
|
||||
blkg_iostat_set(&bis->cur, &cur);
|
||||
blkg_iostat_set(&bis->last, &cur);
|
||||
u64_stats_update_end_irqrestore(&bis->sync, flags);
|
||||
}
|
||||
|
||||
static void blkg_clear_stat(struct blkcg_gq *blkg)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
|
||||
|
||||
__blkg_clear_stat(s);
|
||||
}
|
||||
__blkg_clear_stat(&blkg->iostat);
|
||||
}
|
||||
|
||||
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
|
||||
struct cftype *cftype, u64 val)
|
||||
{
|
||||
struct blkcg *blkcg = css_to_blkcg(css);
|
||||
struct blkcg_gq *blkg;
|
||||
int i, cpu;
|
||||
int i;
|
||||
|
||||
mutex_lock(&blkcg_pol_mutex);
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
@ -634,18 +668,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
|
||||
* anyway. If you get hit by a race, retry.
|
||||
*/
|
||||
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct blkg_iostat_set *bis =
|
||||
per_cpu_ptr(blkg->iostat_cpu, cpu);
|
||||
memset(bis, 0, sizeof(*bis));
|
||||
|
||||
/* Re-initialize the cleared blkg_iostat_set */
|
||||
u64_stats_init(&bis->sync);
|
||||
bis->blkg = blkg;
|
||||
}
|
||||
memset(&blkg->iostat, 0, sizeof(blkg->iostat));
|
||||
u64_stats_init(&blkg->iostat.sync);
|
||||
|
||||
blkg_clear_stat(blkg);
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
|
||||
@ -948,16 +971,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_conf_exit);
|
||||
|
||||
static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLKG_IOSTAT_NR; i++) {
|
||||
dst->bytes[i] = src->bytes[i];
|
||||
dst->ios[i] = src->ios[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
|
||||
{
|
||||
int i;
|
||||
@ -1023,7 +1036,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
|
||||
struct blkg_iostat cur;
|
||||
unsigned int seq;
|
||||
|
||||
/*
|
||||
* Order assignment of `next_bisc` from `bisc->lnode.next` in
|
||||
* llist_for_each_entry_safe and clearing `bisc->lqueued` for
|
||||
* avoiding to assign `next_bisc` with new next pointer added
|
||||
* in blk_cgroup_bio_start() in case of re-ordering.
|
||||
*
|
||||
* The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
WRITE_ONCE(bisc->lqueued, false);
|
||||
if (bisc == &blkg->iostat)
|
||||
goto propagate_up; /* propagate up to parent only */
|
||||
|
||||
/* fetch the current per-cpu values */
|
||||
do {
|
||||
@ -1033,10 +1058,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
|
||||
|
||||
blkcg_iostat_update(blkg, &cur, &bisc->last);
|
||||
|
||||
propagate_up:
|
||||
/* propagate global delta to parent (unless that's root) */
|
||||
if (parent && parent->parent)
|
||||
if (parent && parent->parent) {
|
||||
blkcg_iostat_update(parent, &blkg->iostat.cur,
|
||||
&blkg->iostat.last);
|
||||
/*
|
||||
* Queue parent->iostat to its blkcg's lockless
|
||||
* list to propagate up to the grandparent if the
|
||||
* iostat hasn't been queued yet.
|
||||
*/
|
||||
if (!parent->iostat.lqueued) {
|
||||
struct llist_head *plhead;
|
||||
|
||||
plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
|
||||
llist_add(&parent->iostat.lnode, plhead);
|
||||
parent->iostat.lqueued = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
|
||||
out:
|
||||
|
@ -615,9 +615,14 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
|
||||
|
||||
static void __submit_bio(struct bio *bio)
|
||||
{
|
||||
/* If plug is not used, add new plug here to cache nsecs time. */
|
||||
struct blk_plug plug;
|
||||
|
||||
if (unlikely(!blk_crypto_bio_prep(&bio)))
|
||||
return;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
|
||||
blk_mq_submit_bio(bio);
|
||||
} else if (likely(bio_queue_enter(bio) == 0)) {
|
||||
@ -626,6 +631,8 @@ static void __submit_bio(struct bio *bio)
|
||||
disk->fops->submit_bio(bio);
|
||||
blk_queue_exit(disk->queue);
|
||||
}
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -650,13 +657,11 @@ static void __submit_bio(struct bio *bio)
|
||||
static void __submit_bio_noacct(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
struct blk_plug plug;
|
||||
|
||||
BUG_ON(bio->bi_next);
|
||||
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
blk_start_plug(&plug);
|
||||
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
@ -690,23 +695,19 @@ static void __submit_bio_noacct(struct bio *bio)
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
current->bio_list = NULL;
|
||||
}
|
||||
|
||||
static void __submit_bio_noacct_mq(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list[2] = { };
|
||||
struct blk_plug plug;
|
||||
|
||||
current->bio_list = bio_list;
|
||||
blk_start_plug(&plug);
|
||||
|
||||
do {
|
||||
__submit_bio(bio);
|
||||
} while ((bio = bio_list_pop(&bio_list[0])));
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
current->bio_list = NULL;
|
||||
}
|
||||
|
||||
|
@ -3545,12 +3545,28 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if one CPU is mapped to the specified hctx
|
||||
*
|
||||
* Isolated CPUs have been ruled out from hctx->cpumask, which is supposed
|
||||
* to be used for scheduling kworker only. For other usage, please call this
|
||||
* helper for checking if one CPU belongs to the specified hctx
|
||||
*/
|
||||
static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
|
||||
const struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue,
|
||||
hctx->type, cpu);
|
||||
|
||||
return mapped_hctx == hctx;
|
||||
}
|
||||
|
||||
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
|
||||
struct blk_mq_hw_ctx, cpuhp_online);
|
||||
|
||||
if (cpumask_test_cpu(cpu, hctx->cpumask))
|
||||
if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
|
||||
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
|
||||
return 0;
|
||||
}
|
||||
@ -3568,7 +3584,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
|
||||
enum hctx_type type;
|
||||
|
||||
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
|
||||
if (!cpumask_test_cpu(cpu, hctx->cpumask))
|
||||
if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
|
||||
return 0;
|
||||
|
||||
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
|
||||
|
@ -39,11 +39,6 @@ struct latency_bucket {
|
||||
int samples;
|
||||
};
|
||||
|
||||
struct avg_latency_bucket {
|
||||
unsigned long latency; /* ns / 1024 */
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct throtl_data
|
||||
{
|
||||
/* service tree for active throtl groups */
|
||||
|
@ -495,5 +495,5 @@ const struct blk_integrity_profile ext_pi_type3_crc64 = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ext_pi_type3_crc64);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("T10 Protection Information module");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -222,6 +222,23 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
|
||||
{
|
||||
sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
|
||||
struct page *page;
|
||||
|
||||
size -= (aligned_sector - sector) * SECTOR_SIZE;
|
||||
xa_lock(&brd->brd_pages);
|
||||
while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
|
||||
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
|
||||
if (page)
|
||||
__free_page(page);
|
||||
aligned_sector += PAGE_SECTORS;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
xa_unlock(&brd->brd_pages);
|
||||
}
|
||||
|
||||
static void brd_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
|
||||
@ -229,6 +246,12 @@ static void brd_submit_bio(struct bio *bio)
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
if (unlikely(op_is_discard(bio->bi_opf))) {
|
||||
brd_do_discard(brd, sector, bio->bi_iter.bi_size);
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
unsigned int len = bvec.bv_len;
|
||||
int err;
|
||||
@ -309,6 +332,9 @@ static int brd_alloc(int i)
|
||||
* is harmless)
|
||||
*/
|
||||
.physical_block_size = PAGE_SIZE,
|
||||
.max_hw_discard_sectors = UINT_MAX,
|
||||
.max_discard_segments = 1,
|
||||
.discard_granularity = PAGE_SIZE,
|
||||
};
|
||||
|
||||
list_for_each_entry(brd, &brd_devices, brd_list)
|
||||
|
@ -222,7 +222,7 @@ static ssize_t pid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
|
||||
struct nbd_device *nbd = disk->private_data;
|
||||
|
||||
return sprintf(buf, "%d\n", nbd->pid);
|
||||
}
|
||||
@ -236,7 +236,7 @@ static ssize_t backend_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
|
||||
struct nbd_device *nbd = disk->private_data;
|
||||
|
||||
return sprintf(buf, "%s\n", nbd->backend ?: "");
|
||||
}
|
||||
@ -588,7 +588,10 @@ static inline int was_interrupted(int result)
|
||||
return result == -ERESTARTSYS || result == -EINTR;
|
||||
}
|
||||
|
||||
/* always call with the tx_lock held */
|
||||
/*
|
||||
* Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
|
||||
* -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
|
||||
*/
|
||||
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
@ -598,13 +601,15 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
|
||||
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
|
||||
struct iov_iter from;
|
||||
unsigned long size = blk_rq_bytes(req);
|
||||
struct bio *bio;
|
||||
u64 handle;
|
||||
u32 type;
|
||||
u32 nbd_cmd_flags = 0;
|
||||
int sent = nsock->sent, skip = 0;
|
||||
|
||||
lockdep_assert_held(&cmd->lock);
|
||||
lockdep_assert_held(&nsock->tx_lock);
|
||||
|
||||
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
|
||||
|
||||
type = req_to_nbd_cmd_type(req);
|
||||
@ -644,7 +649,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
request.type = htonl(type | nbd_cmd_flags);
|
||||
if (type != NBD_CMD_FLUSH) {
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
request.len = htonl(blk_rq_bytes(req));
|
||||
}
|
||||
handle = nbd_cmd_handle(cmd);
|
||||
request.cookie = cpu_to_be64(handle);
|
||||
@ -669,7 +674,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
nsock->sent = sent;
|
||||
}
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return BLK_STS_RESOURCE;
|
||||
return (__force int)BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Send control failed (result %d)\n", result);
|
||||
@ -710,7 +715,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
nsock->pending = req;
|
||||
nsock->sent = sent;
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return BLK_STS_RESOURCE;
|
||||
return (__force int)BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Send data failed (result %d)\n",
|
||||
@ -1007,7 +1012,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
|
||||
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
|
||||
}
|
||||
|
||||
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
struct nbd_device *nbd = cmd->nbd;
|
||||
@ -1015,18 +1020,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
struct nbd_sock *nsock;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&cmd->lock);
|
||||
|
||||
config = nbd_get_config_unlocked(nbd);
|
||||
if (!config) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Socks array is empty\n");
|
||||
return -EINVAL;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (index >= config->num_connections) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Attempted send on invalid socket\n");
|
||||
nbd_config_put(nbd);
|
||||
return -EINVAL;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
cmd->status = BLK_STS_OK;
|
||||
again:
|
||||
@ -1049,7 +1056,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
*/
|
||||
sock_shutdown(nbd);
|
||||
nbd_config_put(nbd);
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
@ -1062,7 +1069,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
blk_mq_start_request(req);
|
||||
if (unlikely(nsock->pending && nsock->pending != req)) {
|
||||
nbd_requeue_cmd(cmd);
|
||||
ret = 0;
|
||||
ret = BLK_STS_OK;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
@ -1081,19 +1088,19 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
"Request send failed, requeueing\n");
|
||||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
nbd_requeue_cmd(cmd);
|
||||
ret = 0;
|
||||
ret = BLK_STS_OK;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
nbd_config_put(nbd);
|
||||
return ret;
|
||||
return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
|
||||
}
|
||||
|
||||
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
/*
|
||||
* Since we look at the bio's to send the request over the network we
|
||||
@ -1113,10 +1120,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
* appropriate.
|
||||
*/
|
||||
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
||||
if (ret < 0)
|
||||
ret = BLK_STS_IOERR;
|
||||
else if (!ret)
|
||||
ret = BLK_STS_OK;
|
||||
mutex_unlock(&cmd->lock);
|
||||
|
||||
return ret;
|
||||
|
@ -413,13 +413,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
|
||||
static int nullb_apply_submit_queues(struct nullb_device *dev,
|
||||
unsigned int submit_queues)
|
||||
{
|
||||
return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&lock);
|
||||
ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nullb_apply_poll_queues(struct nullb_device *dev,
|
||||
unsigned int poll_queues)
|
||||
{
|
||||
return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&lock);
|
||||
ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
NULLB_DEVICE_ATTR(size, ulong, NULL);
|
||||
@ -468,28 +480,31 @@ static ssize_t nullb_device_power_store(struct config_item *item,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = count;
|
||||
mutex_lock(&lock);
|
||||
if (!dev->power && newp) {
|
||||
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
|
||||
return count;
|
||||
goto out;
|
||||
|
||||
ret = null_add_dev(dev);
|
||||
if (ret) {
|
||||
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
|
||||
dev->power = newp;
|
||||
} else if (dev->power && !newp) {
|
||||
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
|
||||
mutex_lock(&lock);
|
||||
dev->power = newp;
|
||||
null_del_dev(dev->nullb);
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
|
||||
}
|
||||
|
||||
return count;
|
||||
out:
|
||||
mutex_unlock(&lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(nullb_device_, power);
|
||||
@ -1218,7 +1233,7 @@ static int null_transfer(struct nullb *nullb, struct page *page,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int null_handle_rq(struct nullb_cmd *cmd)
|
||||
static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
|
||||
{
|
||||
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
||||
struct nullb *nullb = cmd->nq->dev->nullb;
|
||||
@ -1932,15 +1947,12 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
nullb->q->queuedata = nullb;
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
|
||||
|
||||
mutex_lock(&lock);
|
||||
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
|
||||
if (rv < 0) {
|
||||
mutex_unlock(&lock);
|
||||
if (rv < 0)
|
||||
goto out_cleanup_disk;
|
||||
}
|
||||
|
||||
nullb->index = rv;
|
||||
dev->index = rv;
|
||||
mutex_unlock(&lock);
|
||||
|
||||
if (config_item_name(&dev->group.cg_item)) {
|
||||
/* Use configfs dir name as the device name */
|
||||
@ -1969,9 +1981,7 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
if (rv)
|
||||
goto out_ida_free;
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_add_tail(&nullb->list, &nullb_list);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
pr_info("disk %s created\n", nullb->disk_name);
|
||||
|
||||
@ -2020,7 +2030,9 @@ static int null_create_dev(void)
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&lock);
|
||||
ret = null_add_dev(dev);
|
||||
mutex_unlock(&lock);
|
||||
if (ret) {
|
||||
null_free_dev(dev);
|
||||
return ret;
|
||||
|
@ -36,7 +36,12 @@ TRACE_EVENT(nullb_zone_op,
|
||||
TP_ARGS(cmd, zone_no, zone_cond),
|
||||
TP_STRUCT__entry(
|
||||
__array(char, disk, DISK_NAME_LEN)
|
||||
__field(enum req_op, op)
|
||||
/*
|
||||
* __field() uses is_signed_type(). is_signed_type() does not
|
||||
* support bitwise types. Use __field_struct() instead because
|
||||
* it does not use is_signed_type().
|
||||
*/
|
||||
__field_struct(enum req_op, op)
|
||||
__field(unsigned int, zone_no)
|
||||
__field(unsigned int, zone_cond)
|
||||
),
|
||||
|
@ -2178,6 +2178,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
|
||||
.virt_boundary_mask = p->virt_boundary_mask,
|
||||
.max_segments = USHRT_MAX,
|
||||
.max_segment_size = UINT_MAX,
|
||||
.dma_alignment = 3,
|
||||
};
|
||||
struct gendisk *disk;
|
||||
int ret = -EINVAL;
|
||||
|
@ -2358,7 +2358,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
|
||||
return -EFAULT;
|
||||
|
||||
tmp_info.media_flags = 0;
|
||||
if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
|
||||
if (cdi->last_media_change_ms > tmp_info.last_media_change)
|
||||
tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
|
||||
|
||||
tmp_info.last_media_change = cdi->last_media_change_ms;
|
||||
|
@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
|
||||
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
|
||||
if (ret) {
|
||||
chap->status = ret;
|
||||
chap->error = -ECONNREFUSED;
|
||||
chap->error = -EKEYREJECTED;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -797,7 +797,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
|
||||
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
|
||||
if (ret) {
|
||||
chap->status = ret;
|
||||
chap->error = -ECONNREFUSED;
|
||||
chap->error = -EKEYREJECTED;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -818,7 +818,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
|
||||
ret = nvme_auth_process_dhchap_success1(ctrl, chap);
|
||||
if (ret) {
|
||||
/* Controller authentication failed */
|
||||
chap->error = -ECONNREFUSED;
|
||||
chap->error = -EKEYREJECTED;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
|
@ -383,14 +383,14 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
|
||||
if (likely(nvme_req(req)->status == 0))
|
||||
return COMPLETE;
|
||||
|
||||
if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
|
||||
return AUTHENTICATE;
|
||||
|
||||
if (blk_noretry_request(req) ||
|
||||
(nvme_req(req)->status & NVME_SC_DNR) ||
|
||||
nvme_req(req)->retries >= nvme_max_retries)
|
||||
return COMPLETE;
|
||||
|
||||
if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
|
||||
return AUTHENTICATE;
|
||||
|
||||
if (req->cmd_flags & REQ_NVME_MPATH) {
|
||||
if (nvme_is_path_error(nvme_req(req)->status) ||
|
||||
blk_queue_dying(req->q))
|
||||
|
@ -428,12 +428,6 @@ static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
|
||||
* fabrics-protocol connection of the NVMe Admin queue between the
|
||||
* host system device and the allocated NVMe controller on the
|
||||
* target system via a NVMe Fabrics "Connect" command.
|
||||
*
|
||||
* Return:
|
||||
* 0: success
|
||||
* > 0: NVMe error status code
|
||||
* < 0: Linux errno error code
|
||||
*
|
||||
*/
|
||||
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
@ -467,7 +461,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid 0: secure concatenation is not supported\n");
|
||||
ret = NVME_SC_AUTH_REQUIRED;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_free_data;
|
||||
}
|
||||
/* Authentication required */
|
||||
@ -475,14 +469,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||
if (ret) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid 0: authentication setup failed\n");
|
||||
ret = NVME_SC_AUTH_REQUIRED;
|
||||
goto out_free_data;
|
||||
}
|
||||
ret = nvme_auth_wait(ctrl, 0);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid 0: authentication failed\n");
|
||||
else
|
||||
"qid 0: authentication failed, error %d\n",
|
||||
ret);
|
||||
} else
|
||||
dev_info(ctrl->device,
|
||||
"qid 0: authenticated\n");
|
||||
}
|
||||
@ -542,7 +536,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
|
||||
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid 0: secure concatenation is not supported\n");
|
||||
ret = NVME_SC_AUTH_REQUIRED;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_free_data;
|
||||
}
|
||||
/* Authentication required */
|
||||
@ -550,12 +544,13 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
|
||||
if (ret) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid %d: authentication setup failed\n", qid);
|
||||
ret = NVME_SC_AUTH_REQUIRED;
|
||||
} else {
|
||||
ret = nvme_auth_wait(ctrl, qid);
|
||||
if (ret)
|
||||
dev_warn(ctrl->device,
|
||||
"qid %u: authentication failed\n", qid);
|
||||
goto out_free_data;
|
||||
}
|
||||
ret = nvme_auth_wait(ctrl, qid);
|
||||
if (ret) {
|
||||
dev_warn(ctrl->device,
|
||||
"qid %u: authentication failed, error %d\n",
|
||||
qid, ret);
|
||||
}
|
||||
}
|
||||
out_free_data:
|
||||
@ -564,8 +559,26 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
|
||||
|
||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
|
||||
/*
|
||||
* Evaluate the status information returned by the transport in order to decided
|
||||
* if a reconnect attempt should be scheduled.
|
||||
*
|
||||
* Do not retry when:
|
||||
*
|
||||
* - the DNR bit is set and the specification states no further connect
|
||||
* attempts with the same set of paramenters should be attempted.
|
||||
*
|
||||
* - when the authentication attempt fails, because the key was invalid.
|
||||
* This error code is set on the host side.
|
||||
*/
|
||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
|
||||
{
|
||||
if (status > 0 && (status & NVME_SC_DNR))
|
||||
return false;
|
||||
|
||||
if (status == -EKEYREJECTED)
|
||||
return false;
|
||||
|
||||
if (ctrl->opts->max_reconnects == -1 ||
|
||||
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
|
||||
return true;
|
||||
|
@ -223,7 +223,7 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops);
|
||||
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
|
||||
void nvmf_free_options(struct nvmf_ctrl_options *opts);
|
||||
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status);
|
||||
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
|
||||
struct nvmf_ctrl_options *opts);
|
||||
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
|
||||
|
@ -3310,12 +3310,10 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
|
||||
ctrl->cnum, status);
|
||||
if (status > 0 && (status & NVME_SC_DNR))
|
||||
recon = false;
|
||||
} else if (time_after_eq(jiffies, rport->dev_loss_end))
|
||||
recon = false;
|
||||
|
||||
if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
|
||||
if (recon && nvmf_should_reconnect(&ctrl->ctrl, status)) {
|
||||
if (portptr->port_state == FC_OBJSTATE_ONLINE)
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: Reconnect attempt in %ld "
|
||||
|
@ -1148,7 +1148,7 @@ static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
|
||||
}
|
||||
static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
|
||||
{
|
||||
return NVME_SC_AUTH_REQUIRED;
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
|
||||
#endif
|
||||
|
@ -982,7 +982,8 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
kfree(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
|
||||
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
|
||||
int status)
|
||||
{
|
||||
enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
|
||||
|
||||
@ -992,7 +993,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvmf_should_reconnect(&ctrl->ctrl)) {
|
||||
if (nvmf_should_reconnect(&ctrl->ctrl, status)) {
|
||||
dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
|
||||
ctrl->ctrl.opts->reconnect_delay);
|
||||
queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
|
||||
@ -1104,10 +1105,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
|
||||
struct nvme_rdma_ctrl, reconnect_work);
|
||||
int ret;
|
||||
|
||||
++ctrl->ctrl.nr_reconnects;
|
||||
|
||||
if (nvme_rdma_setup_ctrl(ctrl, false))
|
||||
ret = nvme_rdma_setup_ctrl(ctrl, false);
|
||||
if (ret)
|
||||
goto requeue;
|
||||
|
||||
dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
|
||||
@ -1118,9 +1121,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
||||
return;
|
||||
|
||||
requeue:
|
||||
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
|
||||
ctrl->ctrl.nr_reconnects);
|
||||
nvme_rdma_reconnect_or_remove(ctrl);
|
||||
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n",
|
||||
ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects);
|
||||
nvme_rdma_reconnect_or_remove(ctrl, ret);
|
||||
}
|
||||
|
||||
static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
@ -1145,7 +1148,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_rdma_reconnect_or_remove(ctrl);
|
||||
nvme_rdma_reconnect_or_remove(ctrl, 0);
|
||||
}
|
||||
|
||||
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
|
||||
@ -2169,6 +2172,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl =
|
||||
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
|
||||
int ret;
|
||||
|
||||
nvme_stop_ctrl(&ctrl->ctrl);
|
||||
nvme_rdma_shutdown_ctrl(ctrl, false);
|
||||
@ -2179,14 +2183,15 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvme_rdma_setup_ctrl(ctrl, false))
|
||||
ret = nvme_rdma_setup_ctrl(ctrl, false);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
return;
|
||||
|
||||
out_fail:
|
||||
++ctrl->ctrl.nr_reconnects;
|
||||
nvme_rdma_reconnect_or_remove(ctrl);
|
||||
nvme_rdma_reconnect_or_remove(ctrl, ret);
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||
|
@ -2161,7 +2161,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
|
||||
nvme_tcp_destroy_io_queues(ctrl, remove);
|
||||
}
|
||||
|
||||
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
|
||||
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
|
||||
int status)
|
||||
{
|
||||
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
|
||||
|
||||
@ -2171,13 +2172,14 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvmf_should_reconnect(ctrl)) {
|
||||
if (nvmf_should_reconnect(ctrl, status)) {
|
||||
dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
|
||||
ctrl->opts->reconnect_delay);
|
||||
queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
|
||||
ctrl->opts->reconnect_delay * HZ);
|
||||
} else {
|
||||
dev_info(ctrl->device, "Removing controller...\n");
|
||||
dev_info(ctrl->device, "Removing controller (%d)...\n",
|
||||
status);
|
||||
nvme_delete_ctrl(ctrl);
|
||||
}
|
||||
}
|
||||
@ -2258,23 +2260,25 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
|
||||
struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
|
||||
struct nvme_tcp_ctrl, connect_work);
|
||||
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
|
||||
int ret;
|
||||
|
||||
++ctrl->nr_reconnects;
|
||||
|
||||
if (nvme_tcp_setup_ctrl(ctrl, false))
|
||||
ret = nvme_tcp_setup_ctrl(ctrl, false);
|
||||
if (ret)
|
||||
goto requeue;
|
||||
|
||||
dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
|
||||
ctrl->nr_reconnects);
|
||||
dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n",
|
||||
ctrl->nr_reconnects, ctrl->opts->max_reconnects);
|
||||
|
||||
ctrl->nr_reconnects = 0;
|
||||
|
||||
return;
|
||||
|
||||
requeue:
|
||||
dev_info(ctrl->device, "Failed reconnect attempt %d\n",
|
||||
ctrl->nr_reconnects);
|
||||
nvme_tcp_reconnect_or_remove(ctrl);
|
||||
dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n",
|
||||
ctrl->nr_reconnects, ctrl->opts->max_reconnects);
|
||||
nvme_tcp_reconnect_or_remove(ctrl, ret);
|
||||
}
|
||||
|
||||
static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||
@ -2301,7 +2305,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_tcp_reconnect_or_remove(ctrl);
|
||||
nvme_tcp_reconnect_or_remove(ctrl, 0);
|
||||
}
|
||||
|
||||
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
|
||||
@ -2321,6 +2325,7 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
|
||||
{
|
||||
struct nvme_ctrl *ctrl =
|
||||
container_of(work, struct nvme_ctrl, reset_work);
|
||||
int ret;
|
||||
|
||||
nvme_stop_ctrl(ctrl);
|
||||
nvme_tcp_teardown_ctrl(ctrl, false);
|
||||
@ -2334,14 +2339,15 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvme_tcp_setup_ctrl(ctrl, false))
|
||||
ret = nvme_tcp_setup_ctrl(ctrl, false);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
return;
|
||||
|
||||
out_fail:
|
||||
++ctrl->nr_reconnects;
|
||||
nvme_tcp_reconnect_or_remove(ctrl);
|
||||
nvme_tcp_reconnect_or_remove(ctrl, ret);
|
||||
}
|
||||
|
||||
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||
|
@ -44,6 +44,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
|
||||
dhchap_secret = kstrdup(secret, GFP_KERNEL);
|
||||
if (!dhchap_secret)
|
||||
return -ENOMEM;
|
||||
down_write(&nvmet_config_sem);
|
||||
if (set_ctrl) {
|
||||
kfree(host->dhchap_ctrl_secret);
|
||||
host->dhchap_ctrl_secret = strim(dhchap_secret);
|
||||
@ -53,6 +54,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
|
||||
host->dhchap_secret = strim(dhchap_secret);
|
||||
host->dhchap_key_hash = key_hash;
|
||||
}
|
||||
up_write(&nvmet_config_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -124,12 +126,11 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nvmet_host_link *p;
|
||||
struct nvmet_host *host = NULL;
|
||||
const char *hash_name;
|
||||
|
||||
down_read(&nvmet_config_sem);
|
||||
if (nvmet_is_disc_subsys(ctrl->subsys))
|
||||
@ -147,13 +148,16 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
}
|
||||
if (!host) {
|
||||
pr_debug("host %s not found\n", ctrl->hostnqn);
|
||||
ret = -EPERM;
|
||||
ret = NVME_AUTH_DHCHAP_FAILURE_FAILED;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pr_warn("Failed to setup DH group");
|
||||
ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!host->dhchap_secret) {
|
||||
pr_debug("No authentication provided\n");
|
||||
@ -164,12 +168,6 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
pr_debug("Re-use existing hash ID %d\n",
|
||||
ctrl->shash_id);
|
||||
} else {
|
||||
hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
|
||||
if (!hash_name) {
|
||||
pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
ctrl->shash_id = host->dhchap_hash_id;
|
||||
}
|
||||
|
||||
@ -178,7 +176,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
|
||||
host->dhchap_key_hash);
|
||||
if (IS_ERR(ctrl->host_key)) {
|
||||
ret = PTR_ERR(ctrl->host_key);
|
||||
ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
|
||||
ctrl->host_key = NULL;
|
||||
goto out_free_hash;
|
||||
}
|
||||
@ -196,7 +194,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
|
||||
host->dhchap_ctrl_key_hash);
|
||||
if (IS_ERR(ctrl->ctrl_key)) {
|
||||
ret = PTR_ERR(ctrl->ctrl_key);
|
||||
ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
|
||||
ctrl->ctrl_key = NULL;
|
||||
goto out_free_hash;
|
||||
}
|
||||
|
@ -2007,11 +2007,17 @@ static struct config_group nvmet_ports_group;
|
||||
static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
u8 *dhchap_secret = to_host(item)->dhchap_secret;
|
||||
u8 *dhchap_secret;
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&nvmet_config_sem);
|
||||
dhchap_secret = to_host(item)->dhchap_secret;
|
||||
if (!dhchap_secret)
|
||||
return sprintf(page, "\n");
|
||||
return sprintf(page, "%s\n", dhchap_secret);
|
||||
ret = sprintf(page, "\n");
|
||||
else
|
||||
ret = sprintf(page, "%s\n", dhchap_secret);
|
||||
up_read(&nvmet_config_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
|
||||
@ -2035,10 +2041,16 @@ static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&nvmet_config_sem);
|
||||
dhchap_secret = to_host(item)->dhchap_ctrl_secret;
|
||||
if (!dhchap_secret)
|
||||
return sprintf(page, "\n");
|
||||
return sprintf(page, "%s\n", dhchap_secret);
|
||||
ret = sprintf(page, "\n");
|
||||
else
|
||||
ret = sprintf(page, "%s\n", dhchap_secret);
|
||||
up_read(&nvmet_config_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
|
||||
|
@ -31,7 +31,7 @@ void nvmet_auth_sq_init(struct nvmet_sq *sq)
|
||||
sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
|
||||
}
|
||||
|
||||
static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
|
||||
static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmf_auth_dhchap_negotiate_data *data = d;
|
||||
@ -109,7 +109,7 @@ static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
|
||||
static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmf_auth_dhchap_reply_data *data = d;
|
||||
@ -172,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 nvmet_auth_failure2(void *d)
|
||||
static u8 nvmet_auth_failure2(void *d)
|
||||
{
|
||||
struct nvmf_auth_dhchap_failure_data *data = d;
|
||||
|
||||
@ -186,6 +186,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
||||
void *d;
|
||||
u32 tl;
|
||||
u16 status = 0;
|
||||
u8 dhchap_status;
|
||||
|
||||
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
@ -237,30 +238,32 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
||||
if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
|
||||
if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
|
||||
/* Restart negotiation */
|
||||
pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
|
||||
ctrl->cntlid, req->sq->qid);
|
||||
pr_debug("%s: ctrl %d qid %d reset negotiation\n",
|
||||
__func__, ctrl->cntlid, req->sq->qid);
|
||||
if (!req->sq->qid) {
|
||||
if (nvmet_setup_auth(ctrl) < 0) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
pr_err("ctrl %d qid 0 failed to setup"
|
||||
"re-authentication",
|
||||
dhchap_status = nvmet_setup_auth(ctrl);
|
||||
if (dhchap_status) {
|
||||
pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
|
||||
ctrl->cntlid);
|
||||
goto done_failure1;
|
||||
req->sq->dhchap_status = dhchap_status;
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
|
||||
goto done_kfree;
|
||||
}
|
||||
}
|
||||
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
|
||||
} else if (data->auth_id != req->sq->dhchap_step)
|
||||
goto done_failure1;
|
||||
/* Validate negotiation parameters */
|
||||
status = nvmet_auth_negotiate(req, d);
|
||||
if (status == 0)
|
||||
dhchap_status = nvmet_auth_negotiate(req, d);
|
||||
if (dhchap_status == 0)
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
|
||||
else {
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
|
||||
req->sq->dhchap_status = status;
|
||||
status = 0;
|
||||
req->sq->dhchap_status = dhchap_status;
|
||||
}
|
||||
goto done_kfree;
|
||||
}
|
||||
@ -284,15 +287,14 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
||||
|
||||
switch (data->auth_id) {
|
||||
case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
|
||||
status = nvmet_auth_reply(req, d);
|
||||
if (status == 0)
|
||||
dhchap_status = nvmet_auth_reply(req, d);
|
||||
if (dhchap_status == 0)
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
|
||||
else {
|
||||
req->sq->dhchap_step =
|
||||
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
|
||||
req->sq->dhchap_status = status;
|
||||
status = 0;
|
||||
req->sq->dhchap_status = dhchap_status;
|
||||
}
|
||||
goto done_kfree;
|
||||
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
|
||||
@ -301,13 +303,12 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
||||
__func__, ctrl->cntlid, req->sq->qid);
|
||||
goto done_kfree;
|
||||
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
|
||||
status = nvmet_auth_failure2(d);
|
||||
if (status) {
|
||||
dhchap_status = nvmet_auth_failure2(d);
|
||||
if (dhchap_status) {
|
||||
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
|
||||
ctrl->cntlid, req->sq->qid, status);
|
||||
req->sq->dhchap_status = status;
|
||||
ctrl->cntlid, req->sq->qid, dhchap_status);
|
||||
req->sq->dhchap_status = dhchap_status;
|
||||
req->sq->authenticated = false;
|
||||
status = 0;
|
||||
}
|
||||
goto done_kfree;
|
||||
default:
|
||||
|
@ -211,7 +211,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
||||
struct nvmf_connect_data *d;
|
||||
struct nvmet_ctrl *ctrl = NULL;
|
||||
u16 status;
|
||||
int ret;
|
||||
u8 dhchap_status;
|
||||
|
||||
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
|
||||
return;
|
||||
@ -254,11 +254,12 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
||||
|
||||
uuid_copy(&ctrl->hostid, &d->hostid);
|
||||
|
||||
ret = nvmet_setup_auth(ctrl);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to setup authentication, error %d\n", ret);
|
||||
dhchap_status = nvmet_setup_auth(ctrl);
|
||||
if (dhchap_status) {
|
||||
pr_err("Failed to setup authentication, dhchap status %u\n",
|
||||
dhchap_status);
|
||||
nvmet_ctrl_put(ctrl);
|
||||
if (ret == -EPERM)
|
||||
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
|
||||
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
|
||||
else
|
||||
status = NVME_SC_INTERNAL;
|
||||
|
@ -113,8 +113,8 @@ struct nvmet_sq {
|
||||
bool authenticated;
|
||||
struct delayed_work auth_expired_work;
|
||||
u16 dhchap_tid;
|
||||
u16 dhchap_status;
|
||||
int dhchap_step;
|
||||
u8 dhchap_status;
|
||||
u8 dhchap_step;
|
||||
u8 *dhchap_c1;
|
||||
u8 *dhchap_c2;
|
||||
u32 dhchap_s1;
|
||||
@ -714,7 +714,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
|
||||
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
|
||||
bool set_ctrl);
|
||||
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
|
||||
int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
|
||||
u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_auth_sq_init(struct nvmet_sq *sq);
|
||||
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_auth_sq_free(struct nvmet_sq *sq);
|
||||
@ -733,7 +733,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
|
||||
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
|
||||
u8 *buf, int buf_size);
|
||||
#else
|
||||
static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1806,18 +1806,14 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||
|
||||
static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
struct nvmet_rdma_queue *queue;
|
||||
struct nvmet_rdma_queue *queue, *n;
|
||||
|
||||
restart:
|
||||
mutex_lock(&nvmet_rdma_queue_mutex);
|
||||
list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
|
||||
if (queue->nvme_sq.ctrl == ctrl) {
|
||||
list_del_init(&queue->queue_list);
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
__nvmet_rdma_queue_disconnect(queue);
|
||||
goto restart;
|
||||
}
|
||||
list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) {
|
||||
if (queue->nvme_sq.ctrl != ctrl)
|
||||
continue;
|
||||
list_del_init(&queue->queue_list);
|
||||
__nvmet_rdma_queue_disconnect(queue);
|
||||
}
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(nbd_send_request,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->nbd_request = 0;
|
||||
__entry->nbd_request = NULL;
|
||||
__entry->dev_index = index;
|
||||
__entry->request = rq;
|
||||
),
|
||||
|
Loading…
Reference in New Issue
Block a user