summaryrefslogtreecommitdiff
path: root/drivers/cxl/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cxl/mem.c')
-rw-r--r--drivers/cxl/mem.c148
1 files changed, 24 insertions, 124 deletions
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 49a4b1c47299..c310f1fd3db0 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -24,27 +24,6 @@
* in higher level operations.
*/
-static int wait_for_media(struct cxl_memdev *cxlmd)
-{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- int rc;
-
- if (!info->mem_enabled)
- return -EBUSY;
-
- rc = cxlds->wait_media_ready(cxlds);
- if (rc)
- return rc;
-
- /*
- * We know the device is active, and enabled, if any ranges are non-zero
- * we'll need to check later before adding the port since that owns the
- * HDM decoder registers.
- */
- return 0;
-}
-
static int create_endpoint(struct cxl_memdev *cxlmd,
struct cxl_port *parent_port)
{
@@ -67,72 +46,14 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
return cxl_endpoint_autoremove(cxlmd, endpoint);
}
-/**
- * cxl_dvsec_decode_init() - Setup HDM decoding for the endpoint
- * @cxlds: Device state
- *
- * Additionally, enables global HDM decoding. Warning: don't call this outside
- * of probe. Once probe is complete, the port driver owns all access to the HDM
- * decoder registers.
- *
- * Returns: false if DVSEC Ranges are being used instead of HDM
- * decoders, or if it can not be determined if DVSEC Ranges are in use.
- * Otherwise, returns true.
- */
-__mock bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds)
+static void enable_suspend(void *data)
{
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- struct cxl_register_map map;
- struct cxl_component_reg_map *cmap = &map.component_map;
- bool global_enable, do_hdm_init = false;
- void __iomem *crb;
- u32 global_ctrl;
-
- /* map hdm decoder */
- crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
- dev_dbg(cxlds->dev, "Failed to map component registers\n");
- return false;
- }
-
- cxl_probe_component_regs(cxlds->dev, crb, cmap);
- if (!cmap->hdm_decoder.valid) {
- dev_dbg(cxlds->dev, "Invalid HDM decoder registers\n");
- goto out;
- }
-
- global_ctrl = readl(crb + cmap->hdm_decoder.offset +
- CXL_HDM_DECODER_CTRL_OFFSET);
- global_enable = global_ctrl & CXL_HDM_DECODER_ENABLE;
- if (!global_enable && info->ranges) {
- dev_dbg(cxlds->dev,
- "DVSEC ranges already programmed and HDM decoders not enabled.\n");
- goto out;
- }
-
- do_hdm_init = true;
-
- /*
- * Permanently (for this boot at least) opt the device into HDM
- * operation. Individual HDM decoders still need to be enabled after
- * this point.
- */
- if (!global_enable) {
- dev_dbg(cxlds->dev, "Enabling HDM decode\n");
- writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
- crb + cmap->hdm_decoder.offset +
- CXL_HDM_DECODER_CTRL_OFFSET);
- }
-
-out:
- iounmap(crb);
- return do_hdm_init;
+ cxl_mem_active_dec();
}
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *parent_port;
int rc;
@@ -147,44 +68,6 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
- rc = wait_for_media(cxlmd);
- if (rc) {
- dev_err(dev, "Media not active (%d)\n", rc);
- return rc;
- }
-
- /*
- * If DVSEC ranges are being used instead of HDM decoder registers there
- * is no use in trying to manage those.
- */
- if (!cxl_dvsec_decode_init(cxlds)) {
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
- int i;
-
- /* */
- for (i = 0; i < 2; i++) {
- u64 base, size;
-
- /*
- * Give a nice warning to the user that BIOS has really
- * botched things for them if it didn't place DVSEC
- * ranges in the memory map.
- */
- base = info->dvsec_range[i].start;
- size = range_len(&info->dvsec_range[i]);
- if (size && !region_intersects(base, size,
- IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE)) {
- dev_err(dev,
- "DVSEC range %#llx-%#llx must be reserved by BIOS, but isn't\n",
- base, base + size - 1);
- }
- }
- dev_err(dev,
- "Active DVSEC range registers in use. Will not bind.\n");
- return -EBUSY;
- }
-
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
@@ -195,19 +78,36 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(&parent_port->dev));
rc = -ENXIO;
- goto out;
+ goto unlock;
}
rc = create_endpoint(cxlmd, parent_port);
-out:
- cxl_device_unlock(&parent_port->dev);
+unlock:
+ device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
- return rc;
+ if (rc)
+ return rc;
+
+ /*
+ * The kernel may be operating out of CXL memory on this device,
+ * there is no spec defined way to determine whether this device
+ * preserves contents over suspend, and there is no simple way
+ * to arrange for the suspend image to avoid CXL memory which
+ * would setup a circular dependency between PCI resume and save
+ * state restoration.
+ *
+ * TODO: support suspend when all the regions this device is
+ * hosting are locked and covered by the system address map,
+ * i.e. platform firmware owns restoring the HDM configuration
+ * that it locked.
+ */
+ cxl_mem_active_inc();
+ return devm_add_action_or_reset(dev, enable_suspend, NULL);
}
static struct cxl_driver cxl_mem_driver = {