for-linus-2019-10-03

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl2WrkYQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpjf1D/9wy2L/yAXA/KLQkwDZ2kn3tMtzMrsv6bOJ
 4UTdlLWKH2ihGg69iAE5f19iQSeNmqqXxBz0VvIBpFR7TqcRbGe0d9iVtoOsDhpZ
 c1OvQ2Ey35Xx1T2w6uMh0llgeWY2J/gJkY64unUxwUBUZwNOPA8ZjxqeXcMQmyAt
 sYmXpNLCT/f4YTYOYDgzoh5960TsCB/H+m/bLEVRvr0MaonvlaBUKRcysQikDFCQ
 yobzDmlSJqGKqlFJ2fnRVSkJC0BmBE5p8Ric9HHiUOT8BO31079IHUGbkbSh/csH
 0yPipNaYNMv+Hr0t9pgfcNbAt2weMK5HFgtpQwv8Frl4xjvBSWDS5fQesCVDjkZt
 +ROeOvQtjfeKtLy5PCu6BJwYpu8iYG9eGF8zxBQ4FBHM3tghcVhqssaNbfrVOW+u
 YXYbAuLMkLwKlmJ+6WBiVIMefyF59ue3+UJGECiCrj/BrgxUyw8HcGKwpKEAZSok
 VFGDukL0Y3flnoO/gyOf0GFaD5Uovr1sx82DCz05B/XEMfkqFMJRGkbyZBarJL69
 9QrnyGpF4rwtfg+usR1PmJ+9/oY/ypSk8N9MAIkoK9e1YIexxvBiXAf0k8AxuDyC
 uPuOiQgKcqUr3aF+ivao8dQB9NiK1bJGc4pqBPPN4ZYRSjMSfBT/cms4IeUyj0K6
 sokcB1p+CQ==
 =vKVl
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-2019-10-03' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Mandate timespec64 for the io_uring timeout ABI (Arnd)

 - Set of NVMe changes via Sagi:
     - controller removal race fix from Balbir
     - quirk additions from Gabriel and Jian-Hong
     - nvme-pci power state save fix from Mario
     - Add 64bit user commands (for 64bit registers) from Marta
     - nvme-rdma/nvme-tcp fixes from Max, Mark and Me
     - Minor cleanups and nits from James, Dan and John

 - Two s390 dasd fixes (Jan, Stefan)

 - Have loop change block size in DIO mode (Martijn)

 - paride pg header ifdef guard (Masahiro)

 - Two blk-mq queue scheduler tweaks, fixing an ordering issue on zoned
   devices and suboptimal performance on others (Ming)

* tag 'for-linus-2019-10-03' of git://git.kernel.dk/linux-block: (22 commits)
  block: sed-opal: fix sparse warning: convert __be64 data
  block: sed-opal: fix sparse warning: obsolete array init.
  block: pg: add header include guard
  Revert "s390/dasd: Add discard support for ESE volumes"
  s390/dasd: Fix error handling during online processing
  io_uring: use __kernel_timespec in timeout ABI
  loop: change queue block size to match when using DIO
  blk-mq: apply normal plugging for HDD
  blk-mq: honor IO scheduler for multiqueue devices
  nvme-rdma: fix possible use-after-free in connect timeout
  nvme: Move ctrl sqsize to generic space
  nvme: Add ctrl attributes for queue_count and sqsize
  nvme: allow 64-bit results in passthru commands
  nvme: Add quirk for Kingston NVME SSD running FW E8FK11.T
  nvmet-tcp: remove superflous check on request sgl
  Added QUIRKs for ADATA XPG SX8200 Pro 512GB
  nvme-rdma: Fix max_hw_sectors calculation
  nvme: fix an error code in nvme_init_subsystem()
  nvme-pci: Save PCI state before putting drive into deepest state
  nvme-tcp: fix wrong stop condition in io_work
  ...
This commit is contained in:
Linus Torvalds 2019-10-04 09:56:51 -07:00
commit c4bd70e8c9
14 changed files with 218 additions and 132 deletions

View File

@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
/* bypass scheduler for flush rq */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
!blk_queue_nonrot(q))) {
/*
* Use plugging if we have a ->commit_rqs() hook as well, as
* we know the driver uses bd->last in a smart fashion.
*
* Use normal plugging if this disk is slow HDD, as sequential
* IO may benefit a lot from plug merging.
*/
unsigned int request_count = plug->rq_count;
struct request *last = NULL;
@ -2012,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
blk_add_rq_to_plug(plug, rq);
} else if (q->elevator) {
blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
/*
* We do limited plugging. If the bio can be merged, do that.
@ -2035,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) {
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
blk_mq_sched_insert_request(rq, false, true, true);

View File

@ -129,7 +129,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
{ 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
/* tables */
[OPAL_TABLE_TABLE]
[OPAL_TABLE_TABLE] =
{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
[OPAL_LOCKINGRANGE_GLOBAL] =
{ 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
@ -372,8 +372,8 @@ static void check_geometry(struct opal_dev *dev, const void *data)
{
const struct d0_geometry_features *geo = data;
dev->align = geo->alignment_granularity;
dev->lowest_lba = geo->lowest_aligned_lba;
dev->align = be64_to_cpu(geo->alignment_granularity);
dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
}
static int execute_step(struct opal_dev *dev,

View File

@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
/* In case of direct I/O, match underlying block size */
unsigned short bsize = bdev_logical_block_size(
inode->i_sb->s_bdev);
blk_queue_logical_block_size(lo->lo_queue, bsize);
blk_queue_physical_block_size(lo->lo_queue, bsize);
blk_queue_io_min(lo->lo_queue, bsize);
}
loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);

View File

@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
*/
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
/*
* Revalidate after unblocking dispatchers that may be holding bd_butex
*/
revalidate_disk(ns->disk);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@ -847,7 +850,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u32 *result, unsigned timeout)
u32 meta_seed, u64 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@ -888,7 +891,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
else
ret = nvme_req(req)->status;
if (result)
*result = le32_to_cpu(nvme_req(req)->result.u32);
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@ -1335,6 +1338,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_command c;
unsigned timeout = 0;
u32 effects;
u64 result;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
c.common.flags = cmd.flags;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata,
cmd.metadata_len, 0, &result, timeout);
nvme_passthru_end(ctrl, effects);
if (status >= 0) {
if (put_user(result, &ucmd->result))
return -EFAULT;
}
return status;
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@ -1405,6 +1456,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
srcu_read_unlock(&head->srcu, idx);
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
return true;
if (is_sed_ioctl(cmd))
return true;
return false;
}
static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp,
struct nvme_ns_head *head,
int srcu_idx)
{
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
nvme_get_ctrl(ns->ctrl);
nvme_put_ns_from_disk(head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
ret = nvme_user_cmd(ctrl, NULL, argp);
break;
case NVME_IOCTL_ADMIN64_CMD:
ret = nvme_user_cmd64(ctrl, NULL, argp);
break;
default:
ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
break;
}
nvme_put_ctrl(ctrl);
return ret;
}
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@ -1422,20 +1508,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
* seperately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
struct nvme_ctrl *ctrl = ns->ctrl;
nvme_get_ctrl(ns->ctrl);
nvme_put_ns_from_disk(head, srcu_idx);
if (cmd == NVME_IOCTL_ADMIN_CMD)
ret = nvme_user_cmd(ctrl, NULL, argp);
else
ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
nvme_put_ctrl(ctrl);
return ret;
}
if (is_ctrl_ioctl(cmd))
return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ID:
@ -1448,6 +1522,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
case NVME_IOCTL_SUBMIT_IO:
ret = nvme_submit_io(ns, argp);
break;
case NVME_IOCTL_IO64_CMD:
ret = nvme_user_cmd64(ns->ctrl, ns, argp);
break;
default:
if (ns->ndev)
ret = nvme_nvm_ioctl(ns, cmd, arg);
@ -2289,6 +2366,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
},
{
/*
* This Kingston E8FK11.T firmware version has no interrupt
* after resume with actions related to suspend to idle
* https://bugzilla.kernel.org/show_bug.cgi?id=204887
*/
.vid = 0x2646,
.fr = "E8FK11.T",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@ -2540,8 +2627,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device))) {
ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device));
if (ret) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
goto out_put_subsystem;
@ -2838,6 +2926,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
@ -3045,6 +3135,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@ -3125,6 +3217,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_address.attr,
&dev_attr_state.attr,
&dev_attr_numa_node.attr,
&dev_attr_queue_count.attr,
&dev_attr_sqsize.attr,
NULL
};

View File

@ -221,6 +221,7 @@ struct nvme_ctrl {
u16 oacs;
u16 nssa;
u16 nr_streams;
u16 sqsize;
u32 max_namespaces;
atomic_t abort_limit;
u8 vwc;
@ -269,7 +270,6 @@ struct nvme_ctrl {
u16 hmmaxd;
/* Fabrics only */
u16 sqsize;
u32 ioccsz;
u32 iorcsz;
u16 icdoff;

View File

@ -2946,11 +2946,21 @@ static int nvme_suspend(struct device *dev)
if (ret < 0)
goto unfreeze;
/*
* A saved state prevents pci pm from generically controlling the
* device's power. If we're using protocol specific settings, we don't
* want pci interfering.
*/
pci_save_state(pdev);
ret = nvme_set_power_state(ctrl, ctrl->npss);
if (ret < 0)
goto unfreeze;
if (ret) {
/* discard the saved state */
pci_load_saved_state(pdev, NULL);
/*
* Clearing npss forces a controller reset on resume. The
* correct value will be resdicovered then.
@ -2958,14 +2968,7 @@ static int nvme_suspend(struct device *dev)
nvme_dev_disable(ndev, true);
ctrl->npss = 0;
ret = 0;
goto unfreeze;
}
/*
* A saved state prevents pci pm from generically controlling the
* device's power. If we're using protocol specific settings, we don't
* want pci interfering.
*/
pci_save_state(pdev);
unfreeze:
nvme_unfreeze(ctrl);
return ret;
@ -3090,6 +3093,9 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },

View File

@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
{
return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
ibdev->attrs.max_fast_reg_page_list_len);
ibdev->attrs.max_fast_reg_page_list_len - 1);
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue);
enum ib_poll_context poll_ctx;
int ret;
int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
if (!queue->device) {
@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_qp;
}
/*
* Currently we don't use SG_GAPS MR's so if the first entry is
* misaligned we'll end up using two entries for a single data page,
* so one additional entry is required.
*/
pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
nvme_rdma_get_max_fr_pages(ibdev), 0);
pages_per_mr, 0);
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
} else {
__nvme_rdma_stop_queue(queue);
if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
__nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_stop_queue;
ctrl->ctrl.max_hw_sectors =
(ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
ctrl->ctrl.max_segments = ctrl->max_fr_pages;
ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

View File

@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
{
struct nvme_tcp_queue *queue =
container_of(w, struct nvme_tcp_queue, io_work);
unsigned long start = jiffies + msecs_to_jiffies(1);
unsigned long deadline = jiffies + msecs_to_jiffies(1);
do {
bool pending = false;
@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (!pending)
return;
} while (time_after(jiffies, start)); /* quota is exhausted */
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}

View File

@ -11,10 +11,10 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
/* Number of physical blocks per logical block. */
const u32 ppl = ql->physical_block_size / ql->logical_block_size;
/* Physical blocks per logical block, 0's based. */
const __le16 ppl0b = to0based(ppl);
/* Number of logical blocks per physical block. */
const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
const __le16 lpp0b = to0based(lpp);
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
* field from the identify controller data structure should be used.
*/
id->nsfeat |= 1 << 1;
id->nawun = ppl0b;
id->nawupf = ppl0b;
id->nacwu = ppl0b;
id->nawun = lpp0b;
id->nawupf = lpp0b;
id->nacwu = lpp0b;
/*
* Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = ppl0b;
id->npwg = lpp0b;
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */

View File

@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
if (cmd->req.sg_cnt)
sgl_free(cmd->req.sg);
sgl_free(cmd->req.sg);
return NVME_SC_INTERNAL;
}
@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
if (queue->nvme_sq.sqhd_disabled) {
kfree(cmd->iov);
if (cmd->req.sg_cnt)
sgl_free(cmd->req.sg);
sgl_free(cmd->req.sg);
}
return 1;
@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
return -EAGAIN;
kfree(cmd->iov);
if (cmd->req.sg_cnt)
sgl_free(cmd->req.sg);
sgl_free(cmd->req.sg);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
kfree(cmd->iov);
if (cmd->req.sg_cnt)
sgl_free(cmd->req.sg);
sgl_free(cmd->req.sg);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)

View File

@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
if (rc == 0) {
memcpy(&private->vsq, vsq, sizeof(*vsq));
} else {
dev_warn(&device->cdev->dev,
"Reading the volume storage information failed with rc=%d\n", rc);
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the volume storage information failed with rc=%d", rc);
}
if (useglobal)
@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
if (rc == 0) {
dasd_eckd_cpy_ext_pool_data(device, lcq);
} else {
dev_warn(&device->cdev->dev,
"Reading the logical configuration failed with rc=%d\n", rc);
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the logical configuration failed with rc=%d", rc);
}
dasd_sfree_request(cqr, cqr->memdev);
@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
rc = dasd_eckd_read_vol_info(device);
if (rc)
goto out_err3;
dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
rc = dasd_eckd_read_ext_pool_info(device);
if (rc)
goto out_err3;
dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
if (dasd_eckd_is_ese(device))
dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
return -EINVAL;
}
static struct dasd_ccw_req *
dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
struct request *req, sector_t first_trk,
sector_t last_trk)
{
return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
if (req_op(req) == REQ_OP_DISCARD)
return dasd_eckd_build_cp_discard(startdev, block, req,
first_trk, last_trk);
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
struct dasd_device *startdev = NULL;
struct dasd_eckd_private *private;
struct dasd_ccw_req *cqr;
struct dasd_device *startdev;
unsigned long flags;
struct dasd_ccw_req *cqr;
/* Discard requests can only be processed on base devices */
if (req_op(req) != REQ_OP_DISCARD)
startdev = dasd_alias_get_start_dev(base);
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
rc = dasd_eckd_read_vol_info(device);
if (rc)
goto out_err2;
dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
rc = dasd_eckd_read_ext_pool_info(device);
if (rc)
goto out_err2;
dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->request_queue;
struct dasd_device *device = block->base;
struct dasd_eckd_private *private;
unsigned int max_discard_sectors;
unsigned int max_bytes;
unsigned int ext_bytes; /* Extent Size in Bytes */
int recs_per_trk;
int trks_per_cyl;
int ext_limit;
int ext_size; /* Extent Size in Cylinders */
int max;
private = device->private;
trks_per_cyl = private->rdc_data.trk_per_cyl;
recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
if (device->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
if (dasd_eckd_is_ese(device)) {
/*
* Depending on the extent size, up to UINT_MAX bytes can be
* accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
* device limits should be exceeded.
*/
ext_size = dasd_eckd_ext_size(device);
ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
logical_block_size;
max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
if (max_bytes / ext_bytes > ext_limit)
max_bytes = ext_bytes * ext_limit;
max_discard_sectors = max_bytes / 512;
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = ext_bytes;
q->limits.discard_alignment = ext_bytes;
}
}
static struct ccw_driver dasd_eckd_driver = {

View File

@ -1892,15 +1892,15 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned count, req_dist, tail_index;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
struct timespec ts;
struct timespec64 ts;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
sqe->len != 1)
return -EINVAL;
if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
sizeof(ts)))
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
/*
@ -1934,7 +1934,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
req->timeout.timer.function = io_timeout_fn;
hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
HRTIMER_MODE_REL);
return 0;
}

View File

@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
__u32 result;
};
struct nvme_passthru_cmd64 {
__u8 opcode;
__u8 flags;
__u16 rsvd1;
__u32 nsid;
__u32 cdw2;
__u32 cdw3;
__u64 metadata;
__u64 addr;
__u32 metadata_len;
__u32 data_len;
__u32 cdw10;
__u32 cdw11;
__u32 cdw12;
__u32 cdw13;
__u32 cdw14;
__u32 cdw15;
__u32 timeout_ms;
__u64 result;
};
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_RESET _IO('N', 0x44)
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */

View File

@ -35,6 +35,9 @@
*/
#ifndef _UAPI_LINUX_PG_H
#define _UAPI_LINUX_PG_H
#define PG_MAGIC 'P'
#define PG_RESET 'Z'
#define PG_COMMAND 'C'
@ -61,4 +64,4 @@ struct pg_read_hdr {
};
/* end of pg.h */
#endif /* _UAPI_LINUX_PG_H */