summaryrefslogtreecommitdiff
path: root/include/linux/nd.h
blob: 7b2ccbdc1cbc26795c0182999b544c4496627c17 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 */
#ifndef __LINUX_ND_H__
#define __LINUX_ND_H__
#include <linux/fs.h>
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>

enum nvdimm_event {
	NVDIMM_REVALIDATE_POISON,
	NVDIMM_REVALIDATE_REGION,
};

enum nvdimm_claim_class {
	NVDIMM_CCLASS_NONE,
	NVDIMM_CCLASS_BTT,
	NVDIMM_CCLASS_BTT2,
	NVDIMM_CCLASS_PFN,
	NVDIMM_CCLASS_DAX,
	NVDIMM_CCLASS_UNKNOWN,
};

#define NVDIMM_EVENT_VAR(_id)  event_attr_##_id
#define NVDIMM_EVENT_PTR(_id)  (&event_attr_##_id.attr.attr)

#define NVDIMM_EVENT_ATTR(_name, _id)				\
	PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id,	\
			nvdimm_events_sysfs_show)

/* Event attribute array index */
#define NVDIMM_PMU_FORMAT_ATTR	0
#define NVDIMM_PMU_EVENT_ATTR	1
#define NVDIMM_PMU_CPUMASK_ATTR	2
#define NVDIMM_PMU_NULL_ATTR	3

/**
 * struct nvdimm_pmu - data structure for nvdimm perf driver
 * @pmu: pmu data structure for nvdimm performance stats.
 * @dev: nvdimm device pointer.
 * @cpu: designated cpu for counter access.
 * @node: node for cpu hotplug notifier link.
 * @cpuhp_state: state for cpu hotplug notification.
 * @arch_cpumask: cpumask to get designated cpu for counter access.
 */
struct nvdimm_pmu {
	struct pmu pmu;
	struct device *dev;
	int cpu;
	struct hlist_node node;
	enum cpuhp_state cpuhp_state;
	/* cpumask provided by arch/platform specific code */
	struct cpumask arch_cpumask;
};

extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
					struct device_attribute *attr,
					char *page);

int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu);
int perf_pmu_register(struct pmu *pmu, const char *name, int type);
void perf_pmu_unregister(struct pmu *pmu);

struct nd_device_driver {
	struct device_driver drv;
	unsigned long type;
	int (*probe)(struct device *dev);
	void (*remove)(struct device *dev);
	void (*shutdown)(struct device *dev);
	void (*notify)(struct device *dev, enum nvdimm_event event);
};

static inline struct nd_device_driver *to_nd_device_driver(
		struct device_driver *drv)
{
	return container_of(drv, struct nd_device_driver, drv);
};

/**
 * struct nd_namespace_common - core infrastructure of a namespace
 * @force_raw: ignore other personalities for the namespace (e.g. btt)
 * @dev: device model node
 * @claim: when set a another personality has taken ownership of the namespace
 * @claim_class: restrict claim type to a given class
 * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
 */
struct nd_namespace_common {
	int force_raw;
	struct device dev;
	struct device *claim;
	enum nvdimm_claim_class claim_class;
	int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
			void *buf, size_t size, int rw, unsigned long flags);
};

static inline struct nd_namespace_common *to_ndns(struct device *dev)
{
	return container_of(dev, struct nd_namespace_common, dev);
}

/**
 * struct nd_namespace_io - device representation of a persistent memory range
 * @dev: namespace device created by the nd region driver
 * @res: struct resource conversion of a NFIT SPA table
 * @size: cached resource_size(@res) for fast path size checks
 * @addr: virtual address to access the namespace range
 * @bb: badblocks list for the namespace range
 */
struct nd_namespace_io {
	struct nd_namespace_common common;
	struct resource res;
	resource_size_t size;
	void *addr;
	struct badblocks bb;
};

/**
 * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
 * @nsio: device and system physical address range to drive
 * @lbasize: logical sector size for the namespace in block-device-mode
 * @alt_name: namespace name supplied in the dimm label
 * @uuid: namespace name supplied in the dimm label
 * @id: ida allocated id
 */
struct nd_namespace_pmem {
	struct nd_namespace_io nsio;
	unsigned long lbasize;
	char *alt_name;
	uuid_t *uuid;
	int id;
};

static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
	return container_of(dev, struct nd_namespace_io, common.dev);
}

static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev)
{
	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);

	return container_of(nsio, struct nd_namespace_pmem, nsio);
}

/**
 * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
 * @ndns: device to read
 * @offset: namespace-relative starting offset
 * @buf: buffer to fill
 * @size: transfer length
 *
 * @buf is up-to-date upon return from this routine.
 */
static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size,
		unsigned long flags)
{
	return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
}

/**
 * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
 * @ndns: device to write
 * @offset: namespace-relative starting offset
 * @buf: buffer to drain
 * @size: transfer length
 *
 * NVDIMM Namepaces disks do not implement sectors internally.  Depending on
 * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
 * or on backing memory media upon return from this routine.  Flushing
 * to media is handled internal to the @ndns driver, if at all.
 */
static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
		resource_size_t offset, void *buf, size_t size,
		unsigned long flags)
{
	return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
}

#define MODULE_ALIAS_ND_DEVICE(type) \
	MODULE_ALIAS("nd:t" __stringify(type) "*")
#define ND_DEVICE_MODALIAS_FMT "nd:t%d"

struct nd_region;
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
		struct module *module, const char *mod_name);
static inline void nd_driver_unregister(struct nd_device_driver *drv)
{
	driver_unregister(&drv->drv);
}
#define nd_driver_register(driver) \
	__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
#define module_nd_driver(driver) \
	module_driver(driver, nd_driver_register, nd_driver_unregister)
#endif /* __LINUX_ND_H__ */