summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/core.c
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2021-04-16 13:46:20 +0200
committerChristoph Hellwig <hch@lst.de>2021-04-21 19:13:15 +0200
commita70b81bd4d9d2d6c05cfe6ef2a10bccc2e04357a (patch)
treeb8bf44fd9e9a4f1d7a8c2cc34d10743e33490af4 /drivers/nvme/host/core.c
parent8f864c595bed20ef85fef3e7314212b73800d51d (diff)
nvme: sanitize KATO setting
According to the NVMe base spec the KATO commands should be sent at half of the KATO interval, to properly account for round-trip times. As we now will only ever send one KATO command per connection we can easily use the recommended values. This also fixes a potential issue where the request timeout for the KATO command does not match the value in the connect command, which might be causing spurious connection drops from the target. Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/host/core.c')
-rw-r--r--drivers/nvme/host/core.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 40f08e6325ef..0cb097cd6a8e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1109,6 +1109,17 @@ void nvme_execute_passthru_rq(struct request *rq)
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+/*
+ * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
+ *
+ * The host should send Keep Alive commands at half of the Keep Alive Timeout
+ * accounting for transport roundtrip times [..].
+ */
+static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+{
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
+}
+
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
@@ -1131,7 +1142,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1161,7 +1172,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
return;
}
@@ -1178,7 +1189,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)