summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/core.c')
-rw-r--r--drivers/nvme/target/core.c68
1 files changed, 53 insertions, 15 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 3fc2b548b36f..bfaba79f971d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -813,11 +813,43 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status)
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);
+void nvmet_cq_init(struct nvmet_cq *cq)
+{
+ refcount_set(&cq->ref, 1);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_init);
+
+bool nvmet_cq_get(struct nvmet_cq *cq)
+{
+ return refcount_inc_not_zero(&cq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_get);
+
+void nvmet_cq_put(struct nvmet_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->ref))
+ nvmet_cq_destroy(cq);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_put);
+
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
u16 qid, u16 size)
{
cq->qid = qid;
cq->size = size;
+
+ ctrl->cqs[qid] = cq;
+}
+
+void nvmet_cq_destroy(struct nvmet_cq *cq)
+{
+ struct nvmet_ctrl *ctrl = cq->ctrl;
+
+ if (ctrl) {
+ ctrl->cqs[cq->qid] = NULL;
+ nvmet_ctrl_put(cq->ctrl);
+ cq->ctrl = NULL;
+ }
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -837,41 +869,39 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
complete(&sq->confirm_done);
}
-u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
{
- if (!ctrl->sqs)
+ if (!ctrl->cqs)
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
if (cqid > ctrl->subsys->max_qid)
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- /*
- * Note: For PCI controllers, the NVMe specifications allows multiple
- * SQs to share a single CQ. However, we do not support this yet, so
- * check that there is no SQ defined for a CQ. If one exist, then the
- * CQ ID is invalid for creation as well as when the CQ is being
- * deleted (as that would mean that the SQ was not deleted before the
- * CQ).
- */
- if (ctrl->sqs[cqid])
+ if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid]))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
return NVME_SC_SUCCESS;
}
-u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
{
if (!cqid)
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- return nvmet_check_cqid(ctrl, cqid);
+ return nvmet_check_cqid(ctrl, cqid, create);
}
+bool nvmet_cq_in_use(struct nvmet_cq *cq)
+{
+ return refcount_read(&cq->ref) > 1;
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_in_use);
+
u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
u16 qid, u16 size)
{
u16 status;
- status = nvmet_check_cqid(ctrl, qid);
+ status = nvmet_check_cqid(ctrl, qid, true);
if (status != NVME_SC_SUCCESS)
return status;
@@ -1619,12 +1649,17 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
if (!ctrl->sqs)
goto out_free_changed_ns_list;
+ ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *),
+ GFP_KERNEL);
+ if (!ctrl->cqs)
+ goto out_free_sqs;
+
ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
- goto out_free_sqs;
+ goto out_free_cqs;
}
ctrl->cntlid = ret;
@@ -1683,6 +1718,8 @@ init_pr_fail:
mutex_unlock(&subsys->lock);
nvmet_stop_keep_alive_timer(ctrl);
ida_free(&cntlid_ida, ctrl->cntlid);
+out_free_cqs:
+ kfree(ctrl->cqs);
out_free_sqs:
kfree(ctrl->sqs);
out_free_changed_ns_list:
@@ -1719,6 +1756,7 @@ static void nvmet_ctrl_free(struct kref *ref)
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
+ kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);