nvme: Fix PRP Offset Invalid

When large writes take place I saw a Samsung EVO 970+ return a status
value of 0x13, PRP Offset Invalid.  I tracked this down to the
improper handling of PRP entries.  The blocks the PRP entries are
placed in cannot cross a page boundary and thus should be allocated
on page boundaries.  This is how the Linux kernel driver works.

With this patch, the PRP pool is allocated on a page boundary and
other than the very first allocation, the pool size is a multiple of
the page size.  Each page can hold (4096 / 8) - 1 entries since the
last entry must point to the next page in the pool.

Signed-off-by: Aaron Williams <awilliams@marvell.com>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
This commit is contained in:
Aaron Williams 2019-08-22 20:37:26 -07:00 committed by Tom Rini
parent 4ebeb4c559
commit b21dcebfa6

View File

@ -73,6 +73,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
u64 *prp_pool; u64 *prp_pool;
int length = total_len; int length = total_len;
int i, nprps; int i, nprps;
u32 prps_per_page = (page_size >> 3) - 1;
u32 num_pages;
length -= (page_size - offset); length -= (page_size - offset);
if (length <= 0) { if (length <= 0) {
@ -89,15 +92,20 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
} }
nprps = DIV_ROUND_UP(length, page_size); nprps = DIV_ROUND_UP(length, page_size);
num_pages = DIV_ROUND_UP(nprps, prps_per_page);
if (nprps > dev->prp_entry_num) { if (nprps > dev->prp_entry_num) {
free(dev->prp_pool); free(dev->prp_pool);
dev->prp_pool = malloc(nprps << 3); /*
* Always increase in increments of pages. It doesn't waste
* much memory and reduces the number of allocations.
*/
dev->prp_pool = memalign(page_size, num_pages * page_size);
if (!dev->prp_pool) { if (!dev->prp_pool) {
printf("Error: malloc prp_pool fail\n"); printf("Error: malloc prp_pool fail\n");
return -ENOMEM; return -ENOMEM;
} }
dev->prp_entry_num = nprps; dev->prp_entry_num = prps_per_page * num_pages;
} }
prp_pool = dev->prp_pool; prp_pool = dev->prp_pool;
@ -788,14 +796,6 @@ static int nvme_probe(struct udevice *udev)
} }
memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *)); memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
ndev->prp_pool = malloc(MAX_PRP_POOL);
if (!ndev->prp_pool) {
ret = -ENOMEM;
printf("Error: %s: Out of memory!\n", udev->name);
goto free_nvme;
}
ndev->prp_entry_num = MAX_PRP_POOL >> 3;
ndev->cap = nvme_readq(&ndev->bar->cap); ndev->cap = nvme_readq(&ndev->bar->cap);
ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH); ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap); ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
@ -805,6 +805,15 @@ static int nvme_probe(struct udevice *udev)
if (ret) if (ret)
goto free_queue; goto free_queue;
/* Allocate after the page size is known */
ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
if (!ndev->prp_pool) {
ret = -ENOMEM;
printf("Error: %s: Out of memory!\n", udev->name);
goto free_nvme;
}
ndev->prp_entry_num = MAX_PRP_POOL >> 3;
ret = nvme_setup_io_queues(ndev); ret = nvme_setup_io_queues(ndev);
if (ret) if (ret)
goto free_queue; goto free_queue;