summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/acpi/apei/ghes.c84
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/acpi.c93
-rw-r--r--drivers/cxl/core/core.h14
-rw-r--r--drivers/cxl/core/hdm.c13
-rw-r--r--drivers/cxl/core/mbox.c48
-rw-r--r--drivers/cxl/core/memdev.c44
-rw-r--r--drivers/cxl/core/region.c177
-rw-r--r--drivers/cxl/core/trace.c91
-rw-r--r--drivers/cxl/core/trace.h50
-rw-r--r--drivers/cxl/cxl.h7
-rw-r--r--drivers/cxl/cxlmem.h3
-rw-r--r--drivers/cxl/pci.c71
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--include/linux/cxl-event.h39
-rw-r--r--include/uapi/linux/cxl_mem.h3
-rw-r--r--tools/testing/cxl/test/mem.c19
18 files changed, 527 insertions, 234 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c4a192a73adb..c4e39b7cd472 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5407,7 +5407,7 @@ M: Dan Williams <dan.j.williams@intel.com>
L: linux-cxl@vger.kernel.org
S: Maintained
F: drivers/cxl/
-F: include/linux/cxl-einj.h
+F: include/linux/einj-cxl.h
F: include/linux/cxl-event.h
F: include/uapi/linux/cxl_mem.h
F: tools/testing/cxl/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 512067cac170..623cc0cb4a65 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,6 +26,8 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
+#include <linux/cleanup.h>
+#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -33,6 +35,7 @@
#include <linux/irq_work.h>
#include <linux/llist.h>
#include <linux/genalloc.h>
+#include <linux/kfifo.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/aer.h>
@@ -673,6 +676,75 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries for each of the 4 event log queues */
+#define CXL_CPER_FIFO_DEPTH 32
+DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_work changes */
+static DEFINE_SPINLOCK(cxl_cper_work_lock);
+struct work_struct *cxl_cper_work;
+
+static void cxl_cper_post_event(enum cxl_event_type event_type,
+ struct cxl_cper_event_rec *rec)
+{
+ struct cxl_cper_work_data wd;
+
+ if (rec->hdr.length <= sizeof(rec->hdr) ||
+ rec->hdr.length > sizeof(*rec)) {
+ pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
+ rec->hdr.length);
+ return;
+ }
+
+ if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
+ pr_err(FW_WARN "CXL CPER invalid event\n");
+ return;
+ }
+
+ guard(spinlock_irqsave)(&cxl_cper_work_lock);
+
+ if (!cxl_cper_work)
+ return;
+
+ wd.event_type = event_type;
+ memcpy(&wd.rec, rec, sizeof(wd.rec));
+
+ if (!kfifo_put(&cxl_cper_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_work);
+}
+
+int cxl_cper_register_work(struct work_struct *work)
+{
+ if (cxl_cper_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_work_lock);
+ cxl_cper_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
+
+int cxl_cper_unregister_work(struct work_struct *work)
+{
+ if (cxl_cper_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_work_lock);
+ cxl_cper_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
+
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
+
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -707,6 +779,18 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 5f3c9c5529b9..99b5c25be079 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -6,6 +6,7 @@ menuconfig CXL_BUS
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
+ select NUMA_KEEP_MEMINFO if (NUMA && X86)
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index cb8c155a2c9b..571069863c62 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,28 +316,59 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
+static void del_cxl_resource(struct resource *res)
+{
+ if (!res)
+ return;
+ kfree(res->name);
+ kfree(res);
+}
+
+static struct resource *alloc_cxl_resource(resource_size_t base,
+ resource_size_t n, int id)
+{
+ struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res)
+ return NULL;
+
+ res->start = base;
+ res->end = base + n - 1;
+ res->flags = IORESOURCE_MEM;
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
+ if (!res->name)
+ return NULL;
+
+ return no_free_ptr(res);
+}
+
+static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
+{
+ int rc = insert_resource(parent, res);
+
+ if (rc)
+ del_cxl_resource(res);
+ return rc;
+}
+
+DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
+ if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
- struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
- struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
- struct resource *res;
int rc;
rc = cxl_acpi_cfmws_verify(dev, cfmws);
- if (rc) {
- dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
- cfmws->base_hpa,
- cfmws->base_hpa + cfmws->window_size - 1);
+ if (rc)
return rc;
- }
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
if (rc)
@@ -348,29 +379,23 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- res = kzalloc(sizeof(*res), GFP_KERNEL);
+ struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
+ cfmws->base_hpa, cfmws->window_size, ctx->id++);
if (!res)
return -ENOMEM;
- res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
- if (!res->name)
- goto err_name;
-
- res->start = cfmws->base_hpa;
- res->end = cfmws->base_hpa + cfmws->window_size - 1;
- res->flags = IORESOURCE_MEM;
-
/* add to the local resource tracking to establish a sort order */
- rc = insert_resource(cxl_res, res);
+ rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
if (rc)
- goto err_insert;
+ return rc;
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO)
cxl_calc_hb = cxl_hb_modulo;
else
cxl_calc_hb = cxl_hb_xor;
- cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
+ struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
+ cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
return PTR_ERR(cxlrd);
@@ -378,8 +403,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
- .start = res->start,
- .end = res->end,
+ .start = cfmws->base_hpa,
+ .end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
/*
@@ -399,11 +424,10 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
cxl_parse_cxims, &cxims_ctx);
if (rc < 0)
- goto err_xormap;
+ return rc;
if (!cxlrd->platform_data) {
dev_err(dev, "No CXIMS for HBIG %u\n", ig);
- rc = -EINVAL;
- goto err_xormap;
+ return -EINVAL;
}
}
}
@@ -411,18 +435,9 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->qos_class = cfmws->qtg_id;
rc = cxl_decoder_add(cxld, target_map);
-err_xormap:
if (rc)
- put_device(&cxld->dev);
- else
- rc = cxl_decoder_autoremove(dev, cxld);
- return rc;
-
-err_insert:
- kfree(res->name);
-err_name:
- kfree(res);
- return -ENOMEM;
+ return rc;
+ return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -683,12 +698,6 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
-static void del_cxl_resource(struct resource *res)
-{
- kfree(res->name);
- kfree(res);
-}
-
static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
{
priv->desc = (unsigned long) pub;
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index bc5a95665aa0..625394486459 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -27,7 +27,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa);
+
#else
+static inline u64
+cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return ULLONG_MAX;
+}
+static inline
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ return NULL;
+}
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 7d97790b893d..784843fa2a22 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -319,8 +319,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
else if (resource_contains(&cxlds->ram_res, res))
cxled->mode = CXL_DECODER_RAM;
else {
- dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
- cxled->cxld.id, cxled->dpa_res);
+ dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED;
}
@@ -519,8 +519,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
- &avail);
+ cxl_decoder_mode_name(cxled->mode), &avail);
rc = -ENOSPC;
goto out;
}
@@ -888,8 +887,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
- if (rc)
+ if (rc) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
+ port->id, cxld->id, ctrl);
return rc;
+ }
dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 65185c9fa001..2626f3fff201 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
+ CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
@@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
return false;
break;
}
+ case CXL_MBOX_OP_CLEAR_LOG: {
+ const uuid_t *uuid = (uuid_t *)payload_in;
+
+ /*
+ * Restrict the ‘Clear log’ action to only apply to
+ * Vendor debug logs.
+ */
+ return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
+ }
default:
break;
}
@@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
{
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
- trace_cxl_general_media(cxlmd, type, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, &evt->dram);
- else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+ if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
- else
+ return;
+ }
+ if (event_type == CXL_CPER_EVENT_GENERIC) {
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+ return;
+ }
+
+ if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
+ u64 dpa, hpa = ULLONG_MAX;
+ struct cxl_region *cxlr;
+
+ /*
+ * These trace points are annotated with HPA and region
+ * translations. Take topology mutation locks and lookup
+ * { HPA, REGION } from { DPA, MEMDEV } in the event record.
+ */
+ guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
+
+ dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK;
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ hpa = cxl_trace_hpa(cxlr, cxlmd, dpa);
+
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ trace_cxl_general_media(cxlmd, type, cxlr, hpa,
+ &evt->gen_media);
+ else if (event_type == CXL_CPER_EVENT_DRAM)
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index d4e259f3a7e9..0277726afd04 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
-struct cxl_dpa_to_region_context {
- struct cxl_region *cxlr;
- u64 dpa;
-};
-
-static int __cxl_dpa_to_region(struct device *dev, void *arg)
-{
- struct cxl_dpa_to_region_context *ctx = arg;
- struct cxl_endpoint_decoder *cxled;
- u64 dpa = ctx->dpa;
-
- if (!is_endpoint_decoder(dev))
- return 0;
-
- cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return 0;
-
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
- return 0;
-
- dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
- dev_name(&cxled->cxld.region->dev));
-
- ctx->cxlr = cxled->cxld.region;
-
- return 1;
-}
-
-static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
-{
- struct cxl_dpa_to_region_context ctx;
- struct cxl_port *port;
-
- ctx = (struct cxl_dpa_to_region_context) {
- .dpa = dpa,
- };
- port = cxlmd->endpoint;
- if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
- device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
-
- return ctx.cxlr;
-}
-
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 5c186e0a39b9..00a9f0eef8dd 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2679,28 +2679,158 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
return rc;
}
+struct cxl_dpa_to_region_context {
+ struct cxl_region *cxlr;
+ u64 dpa;
+};
+
+static int __cxl_dpa_to_region(struct device *dev, void *arg)
+{
+ struct cxl_dpa_to_region_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ u64 dpa = ctx->dpa;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return 0;
+
+ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ return 0;
+
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxled->cxld.region->dev));
+
+ ctx->cxlr = cxled->cxld.region;
+
+ return 1;
+}
+
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dpa_to_region_context ctx;
+ struct cxl_port *port;
+
+ ctx = (struct cxl_dpa_to_region_context) {
+ .dpa = dpa,
+ };
+ port = cxlmd->endpoint;
+ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
+ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+
+ return ctx.cxlr;
+}
+
+static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int gran = p->interleave_granularity;
+ int ways = p->interleave_ways;
+ u64 offset;
+
+ /* Is the hpa within this region at all */
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in region\n", hpa);
+ return false;
+ }
+
+ /* Is the hpa in an expected chunk for its pos(-ition) */
+ offset = hpa - p->res->start;
+ offset = do_div(offset, gran * ways);
+ if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
+ return true;
+
+ dev_dbg(&cxlr->dev,
+ "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
+
+ return false;
+}
+
+static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
+ struct cxl_region_params *p = &cxlr->params;
+ int pos = cxled->pos;
+ u16 eig = 0;
+ u8 eiw = 0;
+
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ /*
+ * The device position in the region interleave set was removed
+ * from the offset at HPA->DPA translation. To reconstruct the
+ * HPA, place the 'pos' in the offset.
+ *
+ * The placement of 'pos' in the HPA is determined by interleave
+ * ways and granularity and is defined in the CXL Spec 3.0 Section
+ * 8.2.4.19.13 Implementation Note: Device Decode Logic
+ */
+
+ /* Remove the dpa base */
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+
+ mask_upper = GENMASK_ULL(51, eig + 8);
+
+ if (eiw < 8) {
+ hpa_offset = (dpa_offset & mask_upper) << eiw;
+ hpa_offset |= pos << (eig + 8);
+ } else {
+ bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
+ bits_upper = bits_upper * 3;
+ hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
+ }
+
+ /* The lower bits remain unchanged */
+ hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+
+ /* Apply the hpa_offset to the region base address */
+ hpa = hpa_offset + p->res->start;
+
+ if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
+ return ULLONG_MAX;
+
+ return hpa;
+}
+
+u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxlmd == cxled_to_memdev(cxled))
+ break;
+ }
+ if (!cxled || cxlmd != cxled_to_memdev(cxled))
+ return ULLONG_MAX;
+
+ return cxl_dpa_to_hpa(dpa, cxlr, cxled);
+}
+
static struct lock_class_key cxl_pmem_region_key;
-static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_nvdimm_bridge *cxl_nvb;
- struct cxl_pmem_region *cxlr_pmem;
struct device *dev;
int i;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_pmem = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return -ENXIO;
- cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
- GFP_KERNEL);
- if (!cxlr_pmem) {
- cxlr_pmem = ERR_PTR(-ENOMEM);
- goto out;
- }
+ struct cxl_pmem_region *cxlr_pmem __free(kfree) =
+ kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
+ if (!cxlr_pmem)
+ return -ENOMEM;
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
@@ -2718,10 +2848,8 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
*/
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
- if (!cxl_nvb) {
- cxlr_pmem = ERR_PTR(-ENODEV);
- goto out;
- }
+ if (!cxl_nvb)
+ return -ENODEV;
cxlr->cxl_nvb = cxl_nvb;
}
m->cxlmd = cxlmd;
@@ -2732,18 +2860,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
}
dev = &cxlr_pmem->dev;
- cxlr_pmem->cxlr = cxlr;
- cxlr->cxlr_pmem = cxlr_pmem;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
device_set_pm_not_required(dev);
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_pmem_region_type;
-out:
- up_read(&cxl_region_rwsem);
+ cxlr_pmem->cxlr = cxlr;
+ cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
- return cxlr_pmem;
+ return 0;
}
static void cxl_dax_region_release(struct device *dev)
@@ -2860,9 +2986,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
struct device *dev;
int rc;
- cxlr_pmem = cxl_pmem_region_alloc(cxlr);
- if (IS_ERR(cxlr_pmem))
- return PTR_ERR(cxlr_pmem);
+ rc = cxl_pmem_region_alloc(cxlr);
+ if (rc)
+ return rc;
+ cxlr_pmem = cxlr->cxlr_pmem;
cxl_nvb = cxlr->cxl_nvb;
dev = &cxlr_pmem->dev;
diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c
index d0403dc3c8ab..7f2a9dd0d0e3 100644
--- a/drivers/cxl/core/trace.c
+++ b/drivers/cxl/core/trace.c
@@ -6,94 +6,3 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-
-static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
-{
- struct cxl_region_params *p = &cxlr->params;
- int gran = p->interleave_granularity;
- int ways = p->interleave_ways;
- u64 offset;
-
- /* Is the hpa within this region at all */
- if (hpa < p->res->start || hpa > p->res->end) {
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in region\n", hpa);
- return false;
- }
-
- /* Is the hpa in an expected chunk for its pos(-ition) */
- offset = hpa - p->res->start;
- offset = do_div(offset, gran * ways);
- if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
- return true;
-
- dev_dbg(&cxlr->dev,
- "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
-
- return false;
-}
-
-static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
-{
- u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
- struct cxl_region_params *p = &cxlr->params;
- int pos = cxled->pos;
- u16 eig = 0;
- u8 eiw = 0;
-
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
-
- /*
- * The device position in the region interleave set was removed
- * from the offset at HPA->DPA translation. To reconstruct the
- * HPA, place the 'pos' in the offset.
- *
- * The placement of 'pos' in the HPA is determined by interleave
- * ways and granularity and is defined in the CXL Spec 3.0 Section
- * 8.2.4.19.13 Implementation Note: Device Decode Logic
- */
-
- /* Remove the dpa base */
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
-
- mask_upper = GENMASK_ULL(51, eig + 8);
-
- if (eiw < 8) {
- hpa_offset = (dpa_offset & mask_upper) << eiw;
- hpa_offset |= pos << (eig + 8);
- } else {
- bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
- bits_upper = bits_upper * 3;
- hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
- }
-
- /* The lower bits remain unchanged */
- hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
-
- /* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
-
- if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
- return ULLONG_MAX;
-
- return hpa;
-}
-
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd,
- u64 dpa)
-{
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_endpoint_decoder *cxled = NULL;
-
- for (int i = 0; i < p->nr_targets; i++) {
- cxled = p->targets[i];
- if (cxlmd == cxled_to_memdev(cxled))
- break;
- }
- if (!cxled || cxlmd != cxled_to_memdev(cxled))
- return ULLONG_MAX;
-
- return cxl_dpa_to_hpa(dpa, cxlr, cxled);
-}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index e5f13260fc52..07a0394b1d99 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
-#define CXL_DPA_FLAGS_MASK 0x3F
-#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
+#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
+#define CXL_DPA_MASK GENMASK_ULL(63, 6)
#define CXL_DPA_VOLATILE BIT(0)
#define CXL_DPA_NOT_REPAIRABLE BIT(1)
@@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media,
__field(u8, channel)
__field(u32, device)
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
- __field(u16, validity_flags)
/* Following are out of order to pack trace record */
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
+ __field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media,
memcpy(__entry->comp_id, &rec->component_id,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->validity_flags);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
"descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s'",
+ "device=%x comp_id=%s validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
- show_valid_flags(__entry->validity_flags)
+ show_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram,
__field(u32, nibble_mask)
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
+ __field(u64, hpa)
+ __field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ __string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
@@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram,
__entry->column = get_unaligned_le16(rec->column);
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
+ __entry->hpa = hpa;
+ if (cxlr) {
+ __assign_str(region_name, dev_name(&cxlr->dev));
+ uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
+ } else {
+ __assign_str(region_name, "");
+ uuid_copy(&__entry->region_uuid, &uuid_null);
+ }
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
- "validity_flags='%s'",
+ "validity_flags='%s' " \
+ "hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
@@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
- show_dram_valid_flags(__entry->validity_flags)
+ show_dram_valid_flags(__entry->validity_flags),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
@@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module,
#define cxl_poison_overflow(flags, time) \
(flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0)
-u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
-
TRACE_EVENT(cxl_poison,
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 036d17db68e0..80f58b96dc1c 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -12,6 +12,8 @@
#include <linux/node.h>
#include <linux/io.h>
+extern const struct nvdimm_security_ops *cxl_security_ops;
+
/**
* DOC: cxl objects
*
@@ -779,6 +781,11 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
+static inline int cxl_root_decoder_autoremove(struct device *host,
+ struct cxl_root_decoder *cxlrd)
+{
+ return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld);
+}
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
/**
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 36cee9c30ceb..19aba81cdf13 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -527,6 +527,9 @@ enum cxl_opcode {
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
+ CXL_MBOX_OP_GET_LOG_CAPS = 0x0402,
+ CXL_MBOX_OP_CLEAR_LOG = 0x0403,
+ CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 2ff361e756d6..74876c9835e8 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -974,6 +974,75 @@ static struct pci_driver cxl_pci_driver = {
},
};
-module_pci_driver(cxl_pci_driver);
+#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
+static void cxl_handle_cper_event(enum cxl_event_type ev_type,
+ struct cxl_cper_event_rec *rec)
+{
+ struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ enum cxl_event_log_type log_type;
+ struct cxl_dev_state *cxlds;
+ unsigned int devfn;
+ u32 hdr_flags;
+
+ pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type,
+ device_id->segment_num, device_id->bus_num,
+ device_id->device_num, device_id->func_num);
+
+ devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
+ pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
+ device_id->bus_num, devfn);
+ if (!pdev)
+ return;
+
+ guard(device)(&pdev->dev);
+ if (pdev->driver != &cxl_pci_driver)
+ return;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ /* Fabricate a log type */
+ hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
+ log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
+
+ cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
+ &uuid_null, &rec->event);
+}
+
+static void cxl_cper_work_fn(struct work_struct *work)
+{
+ struct cxl_cper_work_data wd;
+
+ while (cxl_cper_kfifo_get(&wd))
+ cxl_handle_cper_event(wd.event_type, &wd.rec);
+}
+static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn);
+
+static int __init cxl_pci_driver_init(void)
+{
+ int rc;
+
+ rc = pci_register_driver(&cxl_pci_driver);
+ if (rc)
+ return rc;
+
+ rc = cxl_cper_register_work(&cxl_cper_work);
+ if (rc)
+ pci_unregister_driver(&cxl_pci_driver);
+
+ return rc;
+}
+
+static void __exit cxl_pci_driver_exit(void)
+{
+ cxl_cper_unregister_work(&cxl_cper_work);
+ cancel_work_sync(&cxl_cper_work);
+ pci_unregister_driver(&cxl_pci_driver);
+}
+
+module_init(cxl_pci_driver_init);
+module_exit(cxl_pci_driver_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 7cb8994f8809..2ecdaee63021 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -11,8 +11,6 @@
#include "cxlmem.h"
#include "cxl.h"
-extern const struct nvdimm_security_ops *cxl_security_ops;
-
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *mds)
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
index 03fa6d50d46f..60b25020281f 100644
--- a/include/linux/cxl-event.h
+++ b/include/linux/cxl-event.h
@@ -3,6 +3,10 @@
#ifndef _LINUX_CXL_EVENT_H
#define _LINUX_CXL_EVENT_H
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/workqueue_types.h>
+
/*
* Common Event Record Format
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
@@ -91,11 +95,21 @@ struct cxl_event_mem_module {
u8 reserved[0x3d];
} __packed;
+/*
+ * General Media or DRAM Event Common Fields
+ * - provides common access to phys_addr
+ */
+struct cxl_event_common {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+} __packed;
+
union cxl_event {
struct cxl_event_generic generic;
struct cxl_event_gen_media gen_media;
struct cxl_event_dram dram;
struct cxl_event_mem_module mem_module;
+ struct cxl_event_common common;
} __packed;
/*
@@ -140,4 +154,29 @@ struct cxl_cper_event_rec {
union cxl_event event;
} __packed;
+struct cxl_cper_work_data {
+ enum cxl_event_type event_type;
+ struct cxl_cper_event_rec rec;
+};
+
+#ifdef CONFIG_ACPI_APEI_GHES
+int cxl_cper_register_work(struct work_struct *work);
+int cxl_cper_unregister_work(struct work_struct *work);
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+#else
+static inline int cxl_cper_register_work(struct work_struct *work)
+{
+ return 0;
+}
+
+static inline int cxl_cper_unregister_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index 42066f4eb890..c6c0fe27495d 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -47,6 +47,9 @@
___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___C(GET_TIMESTAMP, "Get Timestamp"), \
+ ___C(GET_LOG_CAPS, "Get Log Capabilities"), \
+ ___C(CLEAR_LOG, "Clear Log"), \
+ ___C(GET_SUP_LOG_SUBLIST, "Get Supported Logs Sub-List"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 35ee41e435ab..6584443144de 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -127,7 +127,7 @@ static struct {
#define CXL_TEST_EVENT_CNT_MAX 15
/* Set a number of events to return at a time for simulation. */
-#define CXL_TEST_EVENT_CNT 3
+#define CXL_TEST_EVENT_RET_MAX 4
struct mock_event_log {
u16 clear_idx;
@@ -222,6 +222,12 @@ static void mes_add_event(struct mock_event_store *mes,
log->nr_events++;
}
+/*
+ * Vary the number of events returned to simulate events occuring while the
+ * logs are being read.
+ */
+static int ret_limit = 0;
+
static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
@@ -233,14 +239,18 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
if (cmd->size_in != sizeof(log_type))
return -EINVAL;
- if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
+ ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
+ if (!ret_limit)
+ ret_limit = 1;
+
+ if (cmd->size_out < struct_size(pl, records, ret_limit))
return -EINVAL;
log_type = *((u8 *)cmd->payload_in);
if (log_type >= CXL_EVENT_TYPE_MAX)
return -EINVAL;
- memset(cmd->payload_out, 0, cmd->size_out);
+ memset(cmd->payload_out, 0, struct_size(pl, records, 0));
log = event_find_log(dev, log_type);
if (!log || event_log_empty(log))
@@ -248,7 +258,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
pl = cmd->payload_out;
- for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
+ for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
memcpy(&pl->records[i], event_get_current(log),
sizeof(pl->records[i]));
pl->records[i].event.generic.hdr.handle =
@@ -256,6 +266,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
log->cur_idx++;
}
+ cmd->size_out = struct_size(pl, records, i);
pl->record_count = cpu_to_le16(i);
if (!event_log_empty(log))
pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;